index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/PrefixFormatter.java
|
package com.netflix.suro.sink.remotefile.formatter;
public interface PrefixFormatter {
String format();
}
| 1,300 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/aws/PropertyAWSCredentialsProvider.java
|
package com.netflix.suro.aws;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.netflix.governator.annotations.Configuration;
/**
* An {@link AWSCredentialsProvider} implementation that is backed by Java properties. It is up to wired
* {@link com.netflix.governator.configuration.ConfigurationProvider} to set the property values
* for access key and secret key. If we use {@link com.netflix.suro.SuroServer}, then such properties can
* be passed in using {@link com.netflix.suro.SuroServer}'s command line parameters. The properties are
* set at server initialization time, and does not get refreshed.
*
* If you want to integrate with the profile-based credential provider, use Amazon's <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/InstanceProfileCredentialsProvider.html">InstanceProfileCredentialsProvider</a>
*
* @author jbae
* @author elandau
*/
public class PropertyAWSCredentialsProvider implements AWSCredentialsProvider {
@Configuration("SuroServer.AWSAccessKey")
private String accessKey;
@Configuration("SuroServer.AWSSecretKey")
private String secretKey;
@Override
public AWSCredentials getCredentials() {
if (accessKey != null && secretKey != null) {
return new AWSCredentials() {
@Override
public String getAWSAccessKeyId() {
return accessKey;
}
@Override
public String getAWSSecretKey() {
return secretKey;
}
};
}
else {
return null;
}
}
@Override
public void refresh() {
}
}
| 1,301 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/SuroServer4Test.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageSetReader;
import com.netflix.suro.thrift.*;
import org.apache.thrift.TException;
import org.apache.thrift.server.THsHaServer;
import org.apache.thrift.transport.TNonblockingServerSocket;
import java.lang.reflect.Field;
import java.net.ServerSocket;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
public class SuroServer4Test implements SuroServer.Iface {
private long uptime = System.currentTimeMillis();
private int port = 0;
private THsHaServer server = null;
private TNonblockingServerSocket transport = null;
private SuroServer.Processor processor = null;
private Map<String, AtomicLong> counters = new HashMap<String, AtomicLong>();
private List<TMessageSet> messageSetList = new LinkedList<TMessageSet>();
public SuroServer4Test() {
counters.put("messageSetCount",new AtomicLong(0l));
counters.put("messageCount",new AtomicLong(0l));
}
private boolean tryLater = false;
public void setTryLater() {
tryLater = true;
}
public void cancelTryLater() {
tryLater = false;
}
private boolean holdConnection = false;
public void setHoldConnection() {
holdConnection = true;
}
public void cancelHoldConnection() {
holdConnection = false;
latch.countDown();
}
public int getPort() {
return port;
}
private CountDownLatch latch = new CountDownLatch(1);
public Result process(TMessageSet messageSet) throws TException {
Result result = new Result();
if (tryLater) {
result.setResultCode(ResultCode.QUEUE_FULL);
} else {
if (!holdConnection) {
handleMessages(messageSet, result);
} else {
try {
latch.await();
handleMessages(messageSet, result);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
return result;
}
private void handleMessages(TMessageSet messageSet, Result result) {
System.out.println(this + "=====================>>>>>>>>>>>>>>>>>>>> getting a new messageSet" + counters.get("messageSetCount").getAndIncrement());
// for (StackTraceElement e : Thread.currentThread().getStackTrace()) {
// System.out.println(e);
// }
messageSetList.add(messageSet);
int count = 0;
for (Message m : new MessageSetReader(messageSet)) {
counters.get("messageCount").incrementAndGet();
++count;
}
System.out.println(this + "=====================>>>>>>>>>>>>>>>>>>>> getting a new messageSet: " + count);
result.setMessage("my message");
result.setResultCode(ResultCode.OK);
}
public TMessageSet getMessageSet(int index) {
return messageSetList.get(index);
}
public long getCounter(String key) throws TException {
return counters.get(key).get();
}
public String getName() throws TException {
return "Test Server";
}
public ServiceStatus getStatus() throws TException {
return ServiceStatus.ALIVE;
}
public long getUptime() throws TException {
return uptime;
}
public String getVersion() throws TException {
return "Test Server - V0";
}
public void start() throws Exception {
transport = new TNonblockingServerSocket(port);
processor = new SuroServer.Processor(this);
THsHaServer.Args serverArgs = new THsHaServer.Args(transport);
serverArgs.processor(processor);
serverArgs.workerThreads(2);
server = new THsHaServer(serverArgs);
System.out.println("Server started on port:" + port);
Thread t = new Thread() {
@Override
public void run() {
server.serve();
}
};
t.start();
Field serverSocketField = TNonblockingServerSocket.class.getDeclaredField("serverSocket_");
serverSocketField.setAccessible(true);
ServerSocket serverSocket = (ServerSocket) serverSocketField.get(transport);
port = serverSocket.getLocalPort();
}
public long getMessageSetCount() {
return counters.get("messageSetCount").get();
}
public long getMessageCount() {
return counters.get("messageCount").get();
}
public long shutdown() {
server.stop();
transport.close();
System.out.println("shutdown STC");
try {Thread.sleep(1000);} catch (Exception e) { e.printStackTrace(); }
return 0;
}
}
| 1,302 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/connection/TestConnectionOutPool.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.connection;
import com.google.inject.AbstractModule;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.thrift.ResultCode;
import com.netflix.suro.thrift.TMessageSet;
import org.apache.thrift.TException;
import org.junit.After;
import org.junit.Test;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.*;
public class TestConnectionOutPool {
private Injector injector;
private List<SuroServer4Test> servers;
private Properties props = new Properties();
private ConnectionPool pool;
@Test
public void testOutPool() throws Exception {
props.put(ClientConfig.ENABLE_OUTPOOL, "true");
setup();
ExecutorService executors = Executors.newFixedThreadPool(10);
for (int i = 0; i < 10; ++i) {
executors.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 5; ++i) {
try {
ConnectionPool.SuroConnection client = pool.chooseConnection();
assertEquals(client.send(TestConnectionPool.createMessageSet(100)).getResultCode(), ResultCode.OK);
pool.endConnection(client);
} catch (TException e) {
fail(e.getMessage());
}
}
}
});
}
executors.shutdown();
executors.awaitTermination(10, TimeUnit.SECONDS);
assertTrue(pool.getOutPoolSize() > 0);
try { Thread.sleep(1000); } catch (Exception e) { e.printStackTrace(); }
TestConnectionPool.checkMessageSetCount(servers, 50, false);
}
public void setup() throws Exception {
servers = TestConnectionPool.startServers(1);
props.put(ClientConfig.LB_SERVER, TestConnectionPool.createConnectionString(servers));
props.put(ClientConfig.CONNECTION_TIMEOUT, Integer.toString(Integer.MAX_VALUE));
injector = LifecycleInjector.builder()
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
}
})
.withAdditionalModules(new AbstractModule() {
@Override
protected void configure() {
bind(ILoadBalancer.class).to(StaticLoadBalancer.class);
}
})
.build().createInjector();
injector.getInstance(LifecycleManager.class).start();
pool = injector.getInstance(ConnectionPool.class);
assertEquals(pool.getPoolSize(), 1);
}
@After
public void after() {
TestConnectionPool.shutdownServers(servers);
}
@Test
public void shouldConnectionOutPoolStopGrowing() throws Exception {
setup();
for (SuroServer4Test server : servers) {
server.setHoldConnection();
}
int numThreads = 10;
final CountDownLatch latch = new CountDownLatch(numThreads);
ExecutorService executors = Executors.newFixedThreadPool(numThreads);
for (int i = 0; i < numThreads; ++i) {
executors.execute(new Runnable() {
@Override
public void run() {
TMessageSet messageSet = TestConnectionPool.createMessageSet(100);
boolean set = true;
while (true) {
ConnectionPool.SuroConnection connection = pool.chooseConnection();
if (set) {
latch.countDown();
set = false;
}
if (connection == null) {
continue;
}
try {
ResultCode result = connection.send(messageSet).getResultCode();
System.out.println("result code: " + result);
if (result == ResultCode.OK) {
break;
}
} catch (Exception e) {
} finally {
pool.endConnection(connection);
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
System.out.println("finished");
}
});
}
latch.await();
assertEquals(pool.getOutPoolSize(), 0);
for (SuroServer4Test server : servers) {
server.cancelHoldConnection();
}
executors.shutdown();
executors.awaitTermination(60, TimeUnit.SECONDS);
try { Thread.sleep(1000); } catch (Exception e) { e.printStackTrace(); }
TestConnectionPool.checkMessageSetCount(servers, numThreads, false);
}
}
| 1,303 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/connection/TestSuroPing.java
|
package com.netflix.suro.connection;
import com.netflix.loadbalancer.Server;
import com.netflix.suro.SuroServer4Test;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* @author thinker0
*/
public class TestSuroPing {
@Test
public void pingTest() throws Exception {
final SuroServer4Test server4Test = new SuroServer4Test();
server4Test.start();
SuroPing ping = new SuroPing();
Server server = new Server("localhost", server4Test.getPort());
assertEquals(true, ping.isAlive(server));
server4Test.shutdown();
}
@Test
public void pingFailTest() throws Exception {
SuroPing ping = new SuroPing();
Server server = new Server("localhost", 7901);
assertEquals(false, ping.isAlive(server));
}
}
| 1,304 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/connection/TestConnectionPool.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.connection;
import com.google.common.base.Joiner;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.loadbalancer.Server;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.message.Compression;
import com.netflix.suro.message.MessageSetBuilder;
import com.netflix.suro.thrift.TMessageSet;
import org.apache.thrift.TException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static junit.framework.Assert.assertEquals;
import static junit.framework.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestConnectionPool {
private Injector injector;
private List<SuroServer4Test> servers;
private Properties props = new Properties();
@Before
public void setup() throws Exception {
servers = startServers(3);
}
private void createInjector() throws Exception {
props.put(ClientConfig.LB_SERVER, createConnectionString(servers));
injector = LifecycleInjector.builder()
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
binder.bind(ILoadBalancer.class).to(StaticLoadBalancer.class);
}
}).build().createInjector();
injector.getInstance(LifecycleManager.class).start();
}
@After
public void tearDown() throws Exception {
shutdownServers(servers);
injector.getInstance(LifecycleManager.class).close();
props.clear();
}
public static List<SuroServer4Test> startServers(int count) throws Exception {
List<SuroServer4Test> collectors = new LinkedList<SuroServer4Test>();
for (int i = 0; i < count; ++i) {
SuroServer4Test c = new SuroServer4Test();
c.start();
collectors.add(c);
}
return collectors;
}
public static String createConnectionString(List<SuroServer4Test> servers) {
List<String> addrList = new ArrayList<String>();
for (SuroServer4Test c : servers) {
addrList.add("localhost:" + c.getPort());
}
return Joiner.on(',').join(addrList);
}
public static void shutdownServers(List<SuroServer4Test> servers) {
for (SuroServer4Test c : servers) {
c.shutdown();
}
}
public static TMessageSet createMessageSet(int messageCount) {
MessageSetBuilder builder = new MessageSetBuilder(new ClientConfig())
.withCompression(Compression.LZF);
for(int i = 0; i < messageCount; ++i) {
builder.withMessage(
"routingKey",
("testMessage" +i).getBytes());
}
return builder.build();
}
@Test
public void testPool() throws Exception {
createInjector();
final ConnectionPool pool = injector.getInstance(ConnectionPool.class);
ExecutorService executors = Executors.newFixedThreadPool(3);
for (int i = 0; i < 3; ++i) {
executors.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 5; ++i) {
try {
ConnectionPool.SuroConnection client = pool.chooseConnection();
long prevTime = System.currentTimeMillis();
int prevCount = client.getSentCount();
client.send(createMessageSet(100));
assertEquals(client.getSentCount() - prevCount, 1);
if (client.getSentCount() == 1) {
assertTrue(
client.getTimeUsed() <= System.currentTimeMillis() &&
client.getTimeUsed() >= prevTime);
}
pool.endConnection(client);
} catch (TException e) {
fail(e.getMessage());
}
}
}
});
}
executors.shutdown();
executors.awaitTermination(10, TimeUnit.SECONDS);
checkMessageSetCount(servers, 15, true);
}
public static void checkMessageSetCount(List<SuroServer4Test> servers, int count, boolean unbalanceCheck) {
int messagesetCount = 0;
for (SuroServer4Test c : servers) {
messagesetCount += c.getMessageSetCount();
if (c.getMessageSetCount() == 0 && unbalanceCheck) {
fail("unbalanced");
}
}
assertEquals(messagesetCount, count);
}
public static void checkMessageCount(List<SuroServer4Test> servers, int count) {
int messageCount = 0;
for (SuroServer4Test c : servers) {
messageCount += c.getMessageCount();
}
assertEquals(messageCount, count);
}
@Test
public void testServerDown() throws Exception {
props.setProperty(ClientConfig.MINIMUM_RECONNECT_TIME_INTERVAL, "0");
props.setProperty(ClientConfig.RECONNECT_INTERVAL, "0");
props.setProperty(ClientConfig.RECONNECT_TIME_INTERVAL, "0");
createInjector();
final ConnectionPool pool = injector.getInstance(ConnectionPool.class);
final CountDownLatch waitLatch = new CountDownLatch(2);
final CountDownLatch goLatch = new CountDownLatch(1);
ExecutorService executors = Executors.newFixedThreadPool(2);
for (int i = 0; i < 2; ++i) {
executors.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 5; ++i) {
try {
ConnectionPool.SuroConnection client = pool.chooseConnection();
long prevTime = System.currentTimeMillis();
int prevCount = client.getSentCount();
client.send(createMessageSet(100));
assertEquals(client.getSentCount() - prevCount, 1);
if (client.getSentCount() == 1) {
assertTrue(
client.getTimeUsed() <= System.currentTimeMillis() &&
client.getTimeUsed() >= prevTime);
}
pool.endConnection(client);
if (i == 1) {
waitLatch.countDown();
goLatch.await();
}
} catch (TException e) {
fail(e.getMessage());
} catch (InterruptedException e) {
fail(e.getMessage());
}
}
}
});
}
executors.shutdown();
waitLatch.await();
Server downServer = new Server("localhost", servers.get(0).getPort());
downServer.setAlive(true);
ConnectionPool.SuroConnection downConnection = new ConnectionPool.SuroConnection(
downServer,
injector.getInstance(ClientConfig.class),
true);
pool.markServerDown(downConnection);
long prevCount = servers.get(0).getMessageSetCount();
goLatch.countDown();
executors.awaitTermination(10, TimeUnit.SECONDS);
int messageSetCount = 0;
for (SuroServer4Test c : servers) {
messageSetCount += c.getMessageSetCount();
}
assertEquals(messageSetCount, 10);
assertTrue(servers.get(0).getMessageSetCount() - prevCount <= 1);
}
@Test
public void testReconnectInterval() throws Exception {
props.setProperty(ClientConfig.MINIMUM_RECONNECT_TIME_INTERVAL, "0");
props.setProperty(ClientConfig.RECONNECT_INTERVAL, "2");
props.setProperty(ClientConfig.RECONNECT_TIME_INTERVAL, "10000");
createInjector();
ConnectionPool pool = injector.getInstance(ConnectionPool.class);
for (int i = 0; i < 2; ++i) {
ConnectionPool.SuroConnection connection = pool.chooseConnection();
connection.send(TestConnectionPool.createMessageSet(100));
pool.endConnection(connection);
}
ConnectionPool.SuroConnection connection = pool.chooseConnection();
assertEquals(connection.getSentCount(), 0);
}
@Test
public void testReconnectTime() throws Exception {
props.setProperty(ClientConfig.MINIMUM_RECONNECT_TIME_INTERVAL, "0");
props.setProperty(ClientConfig.RECONNECT_INTERVAL, "1000");
props.setProperty(ClientConfig.RECONNECT_TIME_INTERVAL, "0");
createInjector();
ConnectionPool pool = injector.getInstance(ConnectionPool.class);
ConnectionPool.SuroConnection connection = pool.chooseConnection();
connection.send(TestConnectionPool.createMessageSet(100));
pool.endConnection(connection);
connection = pool.chooseConnection();
assertEquals(connection.getSentCount(), 0);
}
@Test
public void shouldBePopulatedWithNumberOfServersOnLessSenderThreads() throws Exception {
props.setProperty(ClientConfig.ASYNC_SENDER_THREADS, "1");
createInjector();
ILoadBalancer lb = mock(ILoadBalancer.class);
List<Server> servers = new LinkedList<Server>();
for (SuroServer4Test suroServer4Test : this.servers) {
servers.add(new Server("localhost", suroServer4Test.getPort()));
}
when(lb.getServerList(true)).thenReturn(servers);
ConnectionPool pool = new ConnectionPool(injector.getInstance(ClientConfig.class), lb);
assertTrue(pool.getPoolSize() >= 1);
for (int i = 0; i < 10; ++i) {
if (pool.getPoolSize() != 3) {
Thread.sleep(1000);
}
}
assertEquals(pool.getPoolSize(), 3);
}
@Test
public void shouldBePopulatedWithNumberOfServersOnMoreSenderThreads() throws Exception {
props.setProperty(ClientConfig.ASYNC_SENDER_THREADS, "10");
createInjector();
ILoadBalancer lb = mock(ILoadBalancer.class);
List<Server> servers = new LinkedList<Server>();
for (SuroServer4Test suroServer4Test : this.servers) {
servers.add(new Server("localhost", suroServer4Test.getPort()));
}
when(lb.getServerList(true)).thenReturn(servers);
ConnectionPool pool = new ConnectionPool(injector.getInstance(ClientConfig.class), lb);
assertEquals(pool.getPoolSize(), 3);
}
@Test
public void shouldPopulationFinishedOnTimeout() throws Exception {
shutdownServers(servers);
createInjector();
final ILoadBalancer lb = mock(ILoadBalancer.class);
List<Server> servers = new LinkedList<Server>();
for (SuroServer4Test suroServer4Test : this.servers) {
servers.add(new Server("localhost", suroServer4Test.getPort()));
}
when(lb.getServerList(true)).thenReturn(servers);
final AtomicBoolean passed = new AtomicBoolean(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
ConnectionPool pool = new ConnectionPool(injector.getInstance(ClientConfig.class), lb);
assertEquals(pool.getPoolSize(), 0);
passed.set(true);
}
});
t.start();
t.join((servers.size() + 1) * injector.getInstance(ClientConfig.class).getConnectionTimeout());
assertTrue(passed.get());
}
}
| 1,305 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/input/TestLog4JAppenderWithLog4JConfig.java
|
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.TagKey;
import com.netflix.suro.connection.TestConnectionPool;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
/**
*
*/
public class TestLog4JAppenderWithLog4JConfig {
private final static Logger LOG = Logger.getLogger(TestLog4JAppenderWithLog4JConfig.class);
public static final int DEFAULT_WAIT_INTERVAL = 20;
private List<SuroServer4Test> servers;
@Before
public void setup() throws Exception {
servers = TestConnectionPool.startServers(1);
Properties props = new Properties();
props.setProperty("log4j.logger.com.netflix.suro.input","WARN,SURO");
props.setProperty("log4j.appender.stdout.layout.ConversionPattern", "%5p [%t] (%F:%L) - %m%n");
props.setProperty("log4j.appender.SURO", "com.netflix.suro.input.Log4jAppender");
props.setProperty("log4j.appender.SURO.app", "ajjainApp");
props.setProperty("log4j.appender.SURO.routingKey", "ajjainroutingkey");
props.setProperty("log4j.appender.SURO.loadBalancerType", "static");
props.setProperty("log4j.appender.SURO.loadBalancerServer", TestConnectionPool.createConnectionString(servers));
props.setProperty("log4j.appender.SURO.compression", "0");
props.setProperty("log4j.appender.SURO.clientType", "sync");
PropertyConfigurator.configure(props);
}
@After
public void tearDown() throws Exception {
TestConnectionPool.shutdownServers(servers);
}
@Test
public void testAllMessagesWillBeDeliveredInSeparatedMessageSet() {
Map<String, String> message = new HashMap<String, String>();
message.put(TagKey.ROUTING_KEY, "routing_key_1");
message.put("data", "test");
final int messageCount = 20;
for(int i = 0; i < messageCount; ++i) {
LOG.warn(message);
}
waitAndVerify(5000, new Runnable() {
@Override
public void run() {
assertEquals(messageCount, servers.get(0).getMessageSetCount());
assertEquals(messageCount, servers.get(0).getMessageCount());
}
});
}
private void waitAndVerify(long millis, Runnable assertion) {
waitAndVerify(millis, assertion, DEFAULT_WAIT_INTERVAL);
}
private void sleepThrough(long millis) {
long remaining = millis;
while( remaining > 0 ) {
long start = System.currentTimeMillis();
try{
Thread.sleep(remaining);
} catch (InterruptedException e){ }
remaining -= (System.currentTimeMillis() - start);
}
}
private void waitAndVerify(long millis, Runnable assertion, long waitInterval) {
long remaining = millis;
while(remaining > 0) {
try{
assertion.run();
// Assertion is successful, so we don't need to wait any more
return;
} catch(Throwable t) {
sleepThrough(waitInterval);
remaining -= waitInterval;
}
}
// Last attempt after timeout, so we will get assertion failure if
// there is one.
assertion.run();
}
}
| 1,306 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/input/TestLog4jAppender.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.client.SuroClient;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.message.Message;
import org.apache.log4j.Level;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.ArgumentCaptor;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
public class TestLog4jAppender {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
public static final int DEFAULT_WAIT_INTERVAL = 20;
private Log4jAppender appender = new Log4jAppender();
private List<SuroServer4Test> servers;
@Before
public void setup() throws Exception {
servers = TestConnectionPool.startServers(1);
}
@After
public void clean() {
TestConnectionPool.shutdownServers(servers);
}
private void sleepThrough(long millis) {
long remaining = millis;
while( remaining > 0 ) {
long start = System.currentTimeMillis();
try{
Thread.sleep(remaining);
} catch (InterruptedException e){ }
remaining -= (System.currentTimeMillis() - start);
}
}
private void waitAndVerify(long millis, Runnable assertion, long waitInterval) {
long remaining = millis;
while(remaining > 0) {
try{
assertion.run();
// Assertion is successful, so we don't need to wait any more
return;
} catch(Throwable t) {
sleepThrough(waitInterval);
remaining -= waitInterval;
}
}
// Last attempt after timeout, so we will get assertion failure if
// there is one.
assertion.run();
}
private void waitAndVerify(long millis, Runnable assertion) {
waitAndVerify(millis, assertion, DEFAULT_WAIT_INTERVAL);
}
@Test
public void testMemory() throws Exception {
appender.setLoadBalancerType("static");
appender.setLoadBalancerServer(TestConnectionPool.createConnectionString(servers));
appender.activateOptions();
LoggingEvent event = mock(LoggingEvent.class);
when(event.getMessage()).thenReturn(createEventMap());
when(event.getLevel()).thenReturn(Level.INFO);
appender.append(event);
// Make sure client has enough time to drain the intermediary message queue
waitAndVerify(5000, new Runnable(){
@Override
public void run() {
assertEquals(appender.getSentMessageCount(), 1); // it should be successful
}
});
appender.close();
}
@Test
public void testFile() throws Exception {
appender.setAsyncQueueType("file");
appender.setAsyncFileQueuePath(tempDir.newFolder().getAbsolutePath());
appender.setLoadBalancerType("static");
appender.setLoadBalancerServer(TestConnectionPool.createConnectionString(servers));
appender.activateOptions();
LoggingEvent event = mock(LoggingEvent.class);
when(event.getMessage()).thenReturn(createEventMap());
when(event.getLevel()).thenReturn(Level.INFO);
appender.append(event);
// Make sure client has enough time to drain the intermediary message queue
waitAndVerify(15000, new Runnable() {
public void run() {
assertEquals(appender.getSentMessageCount(), 1);
}
});
appender.close();
}
@Test
public void testLog4jFormatter() {
appender.setFormatterClass("com.netflix.suro.input.StaticLog4jFormatter");
appender.setLoadBalancerType("static");
appender.setLoadBalancerServer(TestConnectionPool.createConnectionString(servers));
appender.setClientType("sync");
appender.setRoutingKey("testRoutingKey");
appender.activateOptions();
appender.client = mock(SuroClient.class);
doNothing().when(appender.client).send(any(Message.class));
LoggingEvent event = mock(LoggingEvent.class);
when(event.getMessage()).thenReturn("string log");
when(event.getLevel()).thenReturn(Level.INFO);
appender.append(event);
ArgumentCaptor<Message> argument = ArgumentCaptor.forClass(Message.class);
verify(appender.client).send(argument.capture());
assertEquals(argument.getValue().getRoutingKey(), "testRoutingKey");
String[] v0 = new String(argument.getValue().getPayload()).split("\t");
String[] v1 = new StaticLog4jFormatter(new ClientConfig()).format(event).split("\t");
assertEquals(v0.length, v1.length);
for (int i = 0; i < v0.length; ++i) {
if (i == 0) {
assertEquals(v0[0].split(":")[0], v1[0].split(":")[0]);
} else {
assertEquals(v0[i], v1[i]);
}
}
}
private Map<String, String> createEventMap() {
Map<String, String> map = new HashMap<String, String>();
map.put("one", "1");
map.put("two", "2");
map.put("three", "3");
map.put("routingKey", "routingKey");
return map;
}
}
| 1,307 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/input/TestLog4jFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.collect.Maps;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.jackson.DefaultObjectMapper;
import org.apache.log4j.Level;
import org.apache.log4j.spi.LoggingEvent;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.junit.Test;
import java.io.IOException;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestLog4jFormatter {
@Test
public void testString() {
ClientConfig config = new ClientConfig();
StringLog4jFormatter formatter = new StringLog4jFormatter(config);
LoggingEvent event = mock(LoggingEvent.class);
when(event.getLevel()).thenReturn(Level.INFO);
when(event.getLoggerName()).thenReturn("TestLogger");
when(event.getMessage()).thenReturn("TestMessage");
when(event.getThrowableStrRep()).thenReturn(new String[]{"StackTrace0", "StackTrace1"});
String[] messages = formatter.format(event).split("\035");
// can't compare datetime because of millisecond
// just check the time with second
DateTime now = new DateTime();
DateTimeFormatter fmt = DateTimeFormat.forPattern(config.getLog4jDateTimeFormat());
String nowStr = fmt.print(now);
assertEquals(nowStr.split(",")[0], messages[0].split(",")[0]);
String[] answers = new String[]{"", "INFO", "TestLogger", "TestMessage", "Exception\002StackTrace0\nStackTrace1"};
for (int i = 1; i < messages.length; ++i) {
assertEquals(messages[i], answers[i]);
}
}
@Test
public void testJson() throws IOException {
ClientConfig config = new ClientConfig();
JsonLog4jFormatter formatter = new JsonLog4jFormatter(config);
Map<String, Object> logEvent = Maps.newHashMap();
logEvent.put("field1", "value1");
logEvent.put("field2", 100);
LoggingEvent event = mock(LoggingEvent.class);
when(event.getLevel()).thenReturn(Level.INFO);
when(event.getLoggerName()).thenReturn("TestLogger");
when(event.getMessage()).thenReturn(logEvent);
when(event.getThrowableStrRep()).thenReturn(new String[]{"StackTrace0", "StackTrace1"});
Map<String, Object> formattedEvent = new DefaultObjectMapper().readValue(
formatter.format(event),
new TypeReference<Map<String, Object>>(){});
assertEquals(formattedEvent.get("field1"), "value1");
assertEquals(formattedEvent.get("field2"), 100);
assertEquals(formattedEvent.get("logLevel"), "INFO");
assertEquals(formattedEvent.get("class"), "TestLogger");
assertEquals(formattedEvent.get("Exception").toString(), "[StackTrace0, StackTrace1]");
// can't compare datetime because of millisecond
// just check the time with minute
DateTime now = new DateTime();
DateTimeFormatter fmt = DateTimeFormat.forPattern(config.getLog4jDateTimeFormat());
String nowStr = fmt.print(now);
assertEquals(nowStr.split(":")[0]+nowStr.split(":")[1],
((String)formattedEvent.get("datetime")).split(":")[0] + ((String)formattedEvent.get("datetime")).split(":")[1]);
}
}
| 1,308 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/sink/TestClientSuroSink.java
|
package com.netflix.suro.sink;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.StringMessage;
import com.netflix.suro.sink.kafka.KafkaSink;
import com.netflix.suro.sink.notice.NoNotice;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import static org.junit.Assert.assertEquals;
public class TestClientSuroSink {
private List<SuroServer4Test> servers;
@Before
public void setup() throws Exception {
servers = TestConnectionPool.startServers(3);
}
@After
public void tearDown() {
TestConnectionPool.shutdownServers(servers);
}
@Test
public void test() throws IOException {
String desc = "{\n" +
" \"type\":\"suro\",\n" +
" \"properties\": {\n" +
" \"SuroClient.loadBalancerServer\":\"" + TestConnectionPool.createConnectionString(servers) + "\",\n" +
" \"SuroClient.loadBalancerType\":\"static\",\n" +
" \"SuroClient.clientType\":\"sync\"\n" +
" }\n" +
"}";
Injector injector = Guice.createInjector(
new SuroPlugin() {
@Override
protected void configure() {
this.addSinkType(SuroSink.TYPE, SuroSink.class);
this.addSinkType(KafkaSink.TYPE, KafkaSink.class);
this.addNoticeType(NoNotice.TYPE, NoNotice.class);
}
},
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
}
}
);
ObjectMapper jsonMapper = injector.getInstance(DefaultObjectMapper.class);
Sink sink = jsonMapper.readValue(desc, new TypeReference<Sink>(){});
sink.open();
sink.writeTo(new StringMessage("routingKey", "testMessage"));
assertEquals(sink.getStat(), "sent: 1" + "\n" + "lost: 0");
TestConnectionPool.checkMessageCount(servers, 1);
sink.close();
}
}
| 1,309 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/message/TestCompression.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message;
import org.junit.Test;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
public class TestCompression {
@Test
public void testNoCompression() {
Compression compression = Compression.NO;
assertEquals(compression.getId(), 0);
String testString = "teststring";
ByteBuffer buffer = ByteBuffer.allocate(testString.length());
buffer.put(testString.getBytes());
assertEquals(compression.compress(buffer.array()), buffer.array());
}
@Test
public void testLZF() {
Compression compression = Compression.LZF;
assertEquals(compression.getId(), 1);
Random rand = new Random();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 4096; ++i) {
sb.append((char) (rand.nextInt(95) + 32));
}
byte[] compressed = compression.compress(sb.toString().getBytes());
assertNotSame(compressed, sb.toString().getBytes());
byte[] decompressed = compression.decompress(compressed);
assertTrue(Arrays.equals(decompressed, sb.toString().getBytes()));
}
}
| 1,310 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/message/TestStringSerDe.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestStringSerDe {
@Test
public void test() {
SerDe serde = new StringSerDe();
String testString = "teststring";
byte[] serialized = serde.serialize(testString);
byte[] original = testString.getBytes();
assertEquals(serialized.length, original.length);
for (int i = 0; i < original.length; ++i) {
assertEquals(serialized[i], original[i]);
}
assertEquals(serde.deserialize(testString.getBytes()), testString);
}
}
| 1,311 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/message/TestMessageSerDe.java
|
package com.netflix.suro.message;
import com.netflix.suro.sink.kafka.SuroKeyedMessage;
import org.junit.Test;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import static org.junit.Assert.assertEquals;
public class TestMessageSerDe {
@Test
public void testMessage() {
MessageSerDe serde = new MessageSerDe();
for (int k = 0; k < 10; ++k) {
Message msg = new Message("routingKey",
"payload".getBytes());
byte[] bytes = serde.serialize(msg);
assertEquals(msg, serde.deserialize(bytes));
}
}
@Test
public void testSuroKeyedMessage() {
MessageSerDe serde = new MessageSerDe();
for (int k = 0; k < 10; ++k) {
Message msg = new SuroKeyedMessage(
k,
new Message("routingKey", "payload".getBytes()));
byte[] bytes = serde.serialize(msg);
SuroKeyedMessage suroKeyedMessage = (SuroKeyedMessage) serde.deserialize(bytes);
assertEquals(msg, suroKeyedMessage);
assertEquals(suroKeyedMessage.getKey(), k);
}
}
@Test
public void testMultiThreaded() throws InterruptedException {
final MessageSerDe serde = new MessageSerDe();
int threadCount = 10;
ExecutorService executors = Executors.newFixedThreadPool(threadCount);
final CountDownLatch latch = new CountDownLatch(threadCount);
final CountDownLatch startLatch = new CountDownLatch(1);
for (int i = 0; i < threadCount; ++i) {
executors.execute(new Runnable() {
@Override
public void run() {
try {
startLatch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
for (int j = 0; j < 10000; ++j) {
String str = generateRandomString();
Message msg = new Message("testRoutingKey", str.getBytes());
byte[] bytes = serde.serialize(msg);
Message bmsg = serde.deserialize(bytes);
assertEquals(bmsg.getRoutingKey(), "testRoutingKey");
assertEquals(new String(bmsg.getPayload()), str);
}
latch.countDown();
}
});
}
startLatch.countDown();
latch.await();
}
public String generateRandomString() {
Random rand = new Random();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 4096; ++i) {
sb.append((char) (rand.nextInt(95) + 32));
}
return sb.toString();
}
}
| 1,312 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/message/TestJsonSerDe.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.serde.JsonSerDe;
import org.junit.Test;
import java.io.IOException;
import java.util.Map;
import static org.junit.Assert.assertEquals;
public class TestJsonSerDe {
@Test
public void test() throws IOException {
String obj = "{\n" +
" \"field1\": \"value1\",\n" +
" \"field2\": 3,\n" +
" \"field3\": {\n" +
" \"field3_1\": \"value3_1\",\n" +
" \"field3_2\": \"value3_2\"\n" +
" }\n" +
"}";
ObjectMapper jsonMapper = new DefaultObjectMapper();
TypeReference<Map<String, Object>> type = new TypeReference<Map<String, Object>>(){};
Map<String, Object> map = jsonMapper.readValue(obj, type);
assertEquals(map.get("field2"), 3); // just check whether the object is created properly
JsonSerDe<Map<String, Object>> serde = new JsonSerDe<Map<String, Object>>();
byte[] bytes = serde.serialize(map);
Map<String, Object> map2 = jsonMapper.readValue(bytes, type);
assertEquals(map, map2);
map2 = serde.deserialize(bytes);
assertEquals(map, map2);
assertEquals(obj.replaceAll("[\\t\\r\\n ]", ""), serde.toString(bytes));
}
}
| 1,313 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/message/TestMessageSet.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.thrift.TMessageSet;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class TestMessageSet {
public static List<Message> createMessageList(List<String> routingKeyList, List<String> payloadList) {
List<Message> messageList = new LinkedList<Message>();
for (int i = 0; i < routingKeyList.size(); ++i) {
Message message = new Message(
routingKeyList.get(i),
payloadList.get(i).getBytes());
messageList.add(message);
}
return messageList;
}
private List<String> payloadList;
private List<String> routingKeyList;
private List<Message> messageList;
@Before
public void generateMessages() {
payloadList = new LinkedList<String>();
routingKeyList = new LinkedList<String>();
for (int i = 0; i < 10; ++i) {
routingKeyList.add("routingKey" + i);
payloadList.add("payload" + i);
}
messageList = createMessageList(routingKeyList, payloadList);
}
@Test
public void testEmptyBuilder() {
TMessageSet messageSet = new MessageSetBuilder(new ClientConfig()).build();
assertEquals(messageSet.getNumMessages(), 0);
}
@Test
public void testBuilder() throws IOException {
TMessageSet messageSet = buildMessageSet();
assertEquals(messageSet.getCompression(), 1);
byte[] bytePayload = messageSet.getMessages();
byte[] payload = MessageSetBuilder.createPayload(messageList, Compression.LZF);
assertEquals(bytePayload.length, payload.length);
for (int i = 0; i < bytePayload.length; ++i) {
assertEquals(bytePayload[i], payload[i]);
}
}
private TMessageSet buildMessageSet() {
MessageSetBuilder builder = new MessageSetBuilder(new ClientConfig());
for (Message message : messageList) {
builder.withMessage(message.getRoutingKey(), message.getPayload());
}
return builder.build();
}
@Test
public void testReader() throws Exception {
TMessageSet messageSet = buildMessageSet();
MessageSetReader reader = new MessageSetReader(messageSet);
assertTrue(reader.checkCRC());
SerDe<String> serde = new StringSerDe();
int i = 0;
for (Message message : reader) {
assertEquals(message.getRoutingKey(), "routingKey" + i);
assertEquals(serde.deserialize(message.getPayload()), "payload" + i);
++i;
}
assertEquals(i, 10);
}
@Test
public void testReaderException() throws IOException {
MessageSetBuilder builder = new MessageSetBuilder(new ClientConfig()).withCompression(Compression.NO);
for (Message message : messageList) {
builder.withMessage(message.getRoutingKey(), message.getPayload());
}
TMessageSet messageSet = builder.build();
byte[] b = messageSet.getMessages();
// corrup the data
b[0] = (byte) 0xff;
b[1] = (byte) 0xff;
b[2] = (byte) 0xff;
b[3] = (byte) 0xff;
MessageSetReader reader = new MessageSetReader(messageSet);
//assertTrue(reader.checkCRC());
SerDe<String> serde = new StringSerDe();
int i = 0;
for (Message message : reader) {
assertNull(message);
}
assertEquals(i, 0);
}
}
| 1,314 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/client/Test.java
|
package com.netflix.suro.client;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.message.Message;
import java.util.Properties;
public class Test {
public static void main(String[] args) throws Exception {
Properties prop = new Properties();
prop.setProperty(ClientConfig.LB_TYPE, "static");
prop.setProperty(ClientConfig.LB_SERVER, "localhost:7101");
prop.setProperty(ClientConfig.CLIENT_TYPE, "sync");
prop.setProperty(ClientConfig.COMPRESSION, "0");
// prop.setProperty(ClientConfig.CLIENT_TYPE, "async");
// prop.setProperty(ClientConfig.ASYNC_SENDER_THREADS, "3");
// prop.setProperty(ClientConfig.ASYNC_BATCH_SIZE, "2");
// prop.setProperty(ClientConfig.ASYNC_TIMEOUT, "1000");
// prop.setProperty(ClientConfig.ASYNC_QUEUE_TYPE, "file");
// prop.setProperty(ClientConfig.ASYNC_FILEQUEUE_PATH, "c:/tmp/queue");
SuroClient client = new SuroClient(prop);
int num = 1000000;
for (int i = 1; i <= num; i++) {
byte[] bytes = ("message #" + i).getBytes();
client.send(new Message("document-routing", bytes));
}
System.out.println("1.______________lost:" + client.getLostMessageCount() + " pending:" + client.getNumOfPendingMessages() + " sent:" + client.getSentMessageCount());
Thread.sleep(10000);
client.shutdown();
System.out.println("2.______________lost:" + client.getLostMessageCount() + " pending:" + client.getNumOfPendingMessages() + " sent:" + client.getSentMessageCount());
}
}
| 1,315 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/client/TestSyncSuroClient.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.connection.StaticLoadBalancer;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.message.Message;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.util.List;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestSyncSuroClient {
private Injector injector;
private List<SuroServer4Test> servers;
@Before
public void setup() throws Exception {
servers = TestConnectionPool.startServers(3);
final Properties props = new Properties();
props.setProperty(ClientConfig.LB_SERVER, TestConnectionPool.createConnectionString(servers));
props.setProperty(ClientConfig.MINIMUM_RECONNECT_TIME_INTERVAL, "1");
props.setProperty(ClientConfig.RECONNECT_INTERVAL, "1");
props.setProperty(ClientConfig.RECONNECT_TIME_INTERVAL, "1");
props.setProperty(ClientConfig.APP, "app");
injector = LifecycleInjector.builder()
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
binder.bind(ILoadBalancer.class).to(StaticLoadBalancer.class);
}
}).build().createInjector();
injector.getInstance(LifecycleManager.class).start();
}
@After
public void tearDown() throws Exception {
TestConnectionPool.shutdownServers(servers);
injector.getInstance(LifecycleManager.class).close();
}
@Test
public void test() {
SyncSuroClient client = injector.getInstance(SyncSuroClient.class);
client.send(TestConnectionPool.createMessageSet(100));
client.send(new Message("routingKey", "testMessage".getBytes()));
assertEquals(client.getSentMessageCount(), 101);
TestConnectionPool.checkMessageSetCount(servers, 2, false);
TestConnectionPool.checkMessageCount(servers, 101);
}
@Test
public void testRetry() throws InterruptedException {
servers.get(0).setTryLater();
SyncSuroClient client = injector.getInstance(SyncSuroClient.class);
for (int i = 0; i < 10; ++i) {
client.send(TestConnectionPool.createMessageSet(100));
}
TestConnectionPool.checkMessageSetCount(servers, 10, false);
assertEquals(client.getSentMessageCount(), 1000);
assertTrue(client.getRetriedCount() > 0);
}
@Test
public void testLost() throws InterruptedException {
SyncSuroClient client = injector.getInstance(SyncSuroClient.class);
for (SuroServer4Test c : servers) {
c.setTryLater();
}
for (int i = 0; i < 10; ++i) {
client.send(TestConnectionPool.createMessageSet(100));
}
assertEquals(client.getSentMessageCount(), 0);
assertEquals(client.getLostMessageCount(), 1000);
}
}
| 1,316 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/client/async/TestAsyncSuroSender.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client.async;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.connection.ConnectionPool;
import com.netflix.suro.connection.StaticLoadBalancer;
import com.netflix.suro.connection.TestConnectionPool;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestAsyncSuroSender {
private Injector injector;
private List<SuroServer4Test> servers;
@Before
public void setup() throws Exception {
servers = TestConnectionPool.startServers(3);
final Properties props = new Properties();
props.setProperty(ClientConfig.LB_SERVER, TestConnectionPool.createConnectionString(servers));
props.setProperty(ClientConfig.MINIMUM_RECONNECT_TIME_INTERVAL, "1");
props.setProperty(ClientConfig.RECONNECT_INTERVAL, "1");
props.setProperty(ClientConfig.RECONNECT_TIME_INTERVAL, "1");
props.setProperty(ClientConfig.APP, "app");
injector = LifecycleInjector.builder()
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
binder.bind(ILoadBalancer.class).to(StaticLoadBalancer.class);
}
}).build().createInjector();
injector.getInstance(LifecycleManager.class).start();
}
@After
public void tearDown() throws Exception {
TestConnectionPool.shutdownServers(servers);
injector.getInstance(LifecycleManager.class).close();
}
@Test
public void test() {
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
AsyncSuroSender sender = new AsyncSuroSender(
TestConnectionPool.createMessageSet(100),
client,
injector.getInstance(ClientConfig.class));
sender.run();
assertEquals(client.getSentMessageCount(), 100);
TestConnectionPool.checkMessageSetCount(servers, 1, false);
}
@Test
public void testMultithread() throws InterruptedException {
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
ExecutorService executors = Executors.newFixedThreadPool(3);
for (int i = 0; i < 10; ++i) {
executors.submit(new AsyncSuroSender(
TestConnectionPool.createMessageSet(100),
client,
injector.getInstance(ClientConfig.class)));
}
executors.shutdown();
executors.awaitTermination(10, TimeUnit.SECONDS);
TestConnectionPool.checkMessageSetCount(servers, 10, true);
assertEquals(client.getSentMessageCount(), 1000);
}
@Test
public void testRetry() throws InterruptedException {
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
ExecutorService executors = Executors.newFixedThreadPool(3);
for (int i = 0; i < 3; ++i) {
executors.submit(new AsyncSuroSender(
TestConnectionPool.createMessageSet(100),
client,
injector.getInstance(ClientConfig.class)));
}
executors.shutdown();
executors.awaitTermination(10, TimeUnit.SECONDS);
TestConnectionPool.checkMessageSetCount(servers, 3, false);
servers.get(0).setTryLater();
executors = Executors.newFixedThreadPool(3);
for (int i = 0; i < 7; ++i) {
executors.submit(new AsyncSuroSender(
TestConnectionPool.createMessageSet(100),
client,
injector.getInstance(ClientConfig.class)));
}
executors.shutdown();
executors.awaitTermination(10, TimeUnit.SECONDS);
TestConnectionPool.checkMessageSetCount(servers, 10, false);
assertEquals(client.getSentMessageCount(), 1000);
assertTrue(client.getRetriedCount() > 0);
}
@Test
public void testRestore() throws InterruptedException {
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
for (SuroServer4Test c : servers) {
c.setTryLater();
}
AsyncSuroSender sender = new AsyncSuroSender(
TestConnectionPool.createMessageSet(600),
client,
injector.getInstance(ClientConfig.class));
sender.run();
assertEquals(client.getSentMessageCount(), 0);
assertTrue(client.getRestoredMessageCount() >= 600);
for (SuroServer4Test c : servers) {
c.cancelTryLater();
}
injector.getInstance(ConnectionPool.class).populateClients();
// wait until client restored
while (client.getSentMessageCount() < 600) {
System.out.println("sent: " + client.getSentMessageCount());
Thread.sleep(1000);
}
client.shutdown();
TestConnectionPool.checkMessageCount(servers, 600);
}
}
| 1,317 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/client/async/TestAsyncSuroClientWithNonExistentFilePath.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client.async;
import com.google.inject.Injector;
import com.google.inject.ProvisionException;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.connection.StaticLoadBalancer;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.message.Message;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.Test;
import java.io.File;
import java.util.List;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestAsyncSuroClientWithNonExistentFilePath {
private Injector injector;
private List<SuroServer4Test> servers;
private final static String NON_EXISTENT_PATH = "/tmp/should_not_existing_at_all_"+System.currentTimeMillis();
private final static String TEMP_FILE = "/tmp/tempFile";
private void setupFile(final Properties props, String filePath) throws Exception {
servers = TestConnectionPool.startServers(3);
props.put(ClientConfig.LB_SERVER, TestConnectionPool.createConnectionString(servers));
props.put(ClientConfig.ASYNC_FILEQUEUE_PATH, filePath);
props.put(ClientConfig.ASYNC_QUEUE_TYPE, "file");
injector = LifecycleInjector.builder()
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
binder.bind(ILoadBalancer.class).to(StaticLoadBalancer.class);
}
}).build().createInjector();
injector.getInstance(LifecycleManager.class).start();
}
@After
public void tearDown() throws Exception {
FileUtils.deleteDirectory(new File(NON_EXISTENT_PATH));
FileUtils.deleteQuietly(new File(TEMP_FILE));
TestConnectionPool.shutdownServers(servers);
injector.getInstance(LifecycleManager.class).close();
assertFalse(String.format("The directory %s should be deleted", NON_EXISTENT_PATH), new File(NON_EXISTENT_PATH).exists());
}
@Test
public void testFileBaseQueueShouldCreateNonExistentFile() throws Exception {
assertFalse(String.format("The file %s shouldn't exist", NON_EXISTENT_PATH), new File(NON_EXISTENT_PATH).exists());
setupFile(new Properties(), NON_EXISTENT_PATH);
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
assertTrue(String.format("The path %s should be created", NON_EXISTENT_PATH), new File(NON_EXISTENT_PATH).exists());
assertTrue(String.format("The path %s should be a directory", NON_EXISTENT_PATH), new File(NON_EXISTENT_PATH).isDirectory());
for (int i = 0; i < 3000; ++i) {
client.send(new Message("routingKey", "testMessage".getBytes()));
}
client.shutdown();
TestConnectionPool.checkMessageCount(servers, 3000);
assertEquals(client.getSentMessageCount(), 3000);
}
@Test
public void testExistingFileWillResultInFailure() throws Exception {
File tempFile = new File(TEMP_FILE);
tempFile.createNewFile();
assertTrue(String.format("The temp file %s should be created", tempFile), tempFile.exists() && tempFile.isFile());
setupFile(new Properties(), TEMP_FILE);
try{
injector.getInstance(AsyncSuroClient.class);
fail("The creation of Suro client should fail due to invalid path");
}catch(ProvisionException e) {
assertTrue("The unexpected error message: "+e.getMessage(), e.getMessage().contains("IllegalStateException"));
}
}
}
| 1,318 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/client/async/TestAsyncSuroClient.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client.async;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.SuroServer4Test;
import com.netflix.suro.connection.ConnectionPool;
import com.netflix.suro.connection.StaticLoadBalancer;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.message.Message;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestAsyncSuroClient {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
private Injector injector;
private List<SuroServer4Test> servers;
@Before
public void setup() throws Exception {
servers = TestConnectionPool.startServers(3);
}
private void setupMemory(final Properties props) throws Exception {
injector = LifecycleInjector.builder()
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
binder.bind(ILoadBalancer.class).to(StaticLoadBalancer.class);
}
}).build().createInjector();
injector.getInstance(LifecycleManager.class).start();
}
private void setupFile(final Properties props) throws Exception {
props.put(ClientConfig.LB_SERVER, TestConnectionPool.createConnectionString(servers));
props.put(ClientConfig.ASYNC_FILEQUEUE_PATH, tempDir.newFolder().getAbsolutePath());
props.put(ClientConfig.ASYNC_QUEUE_TYPE, "file");
injector = LifecycleInjector.builder()
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
binder.bind(ILoadBalancer.class).to(StaticLoadBalancer.class);
}
}).build().createInjector();
injector.getInstance(LifecycleManager.class).start();
}
@After
public void tearDown() throws Exception {
TestConnectionPool.shutdownServers(servers);
injector.getInstance(LifecycleManager.class).close();
}
@Test
public void testMemory() throws Exception {
Properties props = new Properties();
props.put(ClientConfig.LB_SERVER, TestConnectionPool.createConnectionString(servers));
setupMemory(props);
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
int messageCount = 10;
for (int i = 0; i < messageCount; ++i) {
client.send(new Message("routingKey", "testMessage".getBytes()));
}
client.shutdown();
TestConnectionPool.checkMessageCount(servers, messageCount);
assertEquals(client.getSentMessageCount(), messageCount);
}
@Test
public void testFile() throws Exception {
setupFile(new Properties());
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
int messageCount = 10;
for (int i = 0; i < messageCount; ++i) {
client.send(new Message("routingKey", "testMessage".getBytes()));
}
client.shutdown();
TestConnectionPool.checkMessageCount(servers, messageCount);
assertEquals(client.getSentMessageCount(), messageCount);
}
@Test
public void testRestore() throws Exception {
Properties props = new Properties();
props.setProperty(ClientConfig.RETRY_COUNT, "1");
props.setProperty(ClientConfig.ASYNC_TIMEOUT, "1");
setupFile(props);
int messageCount = 3;
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
final CountDownLatch restoreLatch = new CountDownLatch(messageCount / 3);
final CountDownLatch sentLatch = new CountDownLatch(messageCount);
client.addListener(new AsyncSuroClient.Listener() {
@Override
public void sentCallback(int count) {
for (int i = 0; i < count; ++i) {
sentLatch.countDown();
}
}
@Override
public void restoredCallback() {
restoreLatch.countDown();
}
@Override
public void lostCallback(int count) {
fail("should not be lost");
}
@Override
public void retriedCallback() {
}
});
for (SuroServer4Test c : servers) {
c.setTryLater();
}
for (int i = 0; i < messageCount; ++i) {
client.send(new Message("routingKey", "testMessage".getBytes()));
}
restoreLatch.await(10, TimeUnit.SECONDS);
assertEquals(restoreLatch.getCount(), 0);
for (SuroServer4Test c : servers) {
c.cancelTryLater();
}
injector.getInstance(ConnectionPool.class).populateClients();
sentLatch.await(60, TimeUnit.SECONDS);
assertEquals(client.getSentMessageCount(), messageCount);
assertEquals(client.getLostMessageCount(), 0);
client.shutdown();
TestConnectionPool.checkMessageCount(servers, messageCount);
}
@Test
public void shouldBeBlockedOnJobQueueFull() throws Exception {
for (SuroServer4Test c : servers) {
c.setHoldConnection();
}
Properties props = new Properties();
props.setProperty(ClientConfig.ASYNC_JOBQUEUE_CAPACITY, "1");
props.setProperty(ClientConfig.ASYNC_SENDER_THREADS, "1");
props.setProperty(ClientConfig.CONNECTION_TIMEOUT, Integer.toString(Integer.MAX_VALUE));
setupFile(props);
AsyncSuroClient client = injector.getInstance(AsyncSuroClient.class);
for (int i = 0; i < 3000; ++i) {
client.send(new Message("routingKey", "testMessage".getBytes()));
}
client.shutdown();
assertEquals(client.queuedMessageSetCount, 2);
}
}
| 1,319 |
0 |
Create_ds/suro/suro-client/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/test/java/com/netflix/suro/jackson/TestJackson.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.jackson;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.BeanProperty;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.InjectableValues;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.Maps;
import org.junit.Test;
import java.io.IOException;
import java.util.Map;
import static junit.framework.TestCase.assertEquals;
public class TestJackson {
@Test
public void test() throws IOException {
String spec = "{\"a\":\"aaa\", \"b\":\"bbb\"}";
ObjectMapper mapper = new DefaultObjectMapper();
final Map<String, Object> injectables = Maps.newHashMap();
injectables.put("test", "test");
injectables.put("b", "binjected");
mapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(
Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance
) {
return injectables.get(valueId);
}
});
TestClass test = mapper.readValue(spec, new TypeReference<TestClass>(){});
assertEquals(test.getTest(), "test");
assertEquals(test.getB(), "bbb");
}
public static class TestClass {
@JacksonInject("test")
private String test;
private String a;
private String b;
@JsonCreator
public TestClass(
@JsonProperty("a") String a,
@JsonProperty("b") @JacksonInject("b") String b) {
this.a = a;
this.b = b;
}
public String getTest() { return test; }
public String getA() { return a; }
public String getB() { return b; }
}
}
| 1,320 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/ClientConfig.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro;
import com.netflix.governator.annotations.Configuration;
import com.netflix.suro.input.JsonLog4jFormatter;
import com.netflix.suro.message.serde.JsonSerDe;
import com.netflix.suro.message.SerDe;
import com.netflix.suro.message.serde.SerDeFactory;
public class ClientConfig {
public static final String CONNECTION_TIMEOUT = "SuroClient.connectionTimeout";
@Configuration(CONNECTION_TIMEOUT)
private int connectionTimeout = 5000; // millisecond
public int getConnectionTimeout() {
return connectionTimeout;
}
public static final String ENABLE_OUTPOOL = "SuroClient.enableOutPool";
@Configuration(ENABLE_OUTPOOL)
private boolean enableOutPool = false;
public boolean getEnableOutPool() { return enableOutPool; }
public static final String CONNECTION_SWEEP_INTERVAL = "SuroClient.connectionSweepInterval";
@Configuration(CONNECTION_SWEEP_INTERVAL)
private int connectionSweepInterval = 3600; // second
public int getConnectionSweepInterval() {
return connectionSweepInterval;
}
public static final String LOG4J_FORMATTER = "SuroClient.log4jFormatter";
@Configuration(LOG4J_FORMATTER)
private String log4jFormatter = JsonLog4jFormatter.class.toString();
public String getLog4jFormatter() {
return log4jFormatter;
}
public static final String LOG4J_DATETIMEFORMAT = "SuroClient.log4jDateTimeFormat";
@Configuration(LOG4J_DATETIMEFORMAT)
private String log4jDateTimeFormat = "yyyy-MM-dd'T'HH:mm:ss,SSS";
public String getLog4jDateTimeFormat() {
return log4jDateTimeFormat;
}
public static final String LOG4J_ROUTING_KEY = "SuroClient.log4jRoutingKey";
@Configuration(LOG4J_ROUTING_KEY)
private String log4jRoutingKey = "";
public String getLog4jRoutingKey() {
return log4jRoutingKey;
}
public static final String APP = "SuroClient.app";
@Configuration(APP)
private String app = "defaultApp";
public String getApp() {
return app;
}
public static final String RETRY_COUNT = "SuroClient.retryCount";
@Configuration(RETRY_COUNT)
private int retryCount = 5;
public int getRetryCount() {
return retryCount;
}
public static final String SERDE = "SuroClient.serDe";
@Configuration(SERDE)
private String serde = JsonSerDe.class.getCanonicalName();
public SerDe getSerDe() {
return SerDeFactory.create(serde);
}
public static final String COMPRESSION = "SuroClient.compression";
@Configuration(COMPRESSION)
private int compression = 1;
public int getCompression() {
return compression;
}
public static final String CLIENT_TYPE = "SuroClient.clientType";
@Configuration(CLIENT_TYPE)
private String clientType = "async";
public String getClientType() {
return clientType;
}
public static final String ASYNC_SENDER_THREADS = "SuroClient.asyncSenderThreads";
@Configuration(ASYNC_SENDER_THREADS)
private int senderThreads = 3;
public int getAsyncSenderThreads() {
return senderThreads;
}
public static final String ASYNC_BATCH_SIZE = "SuroClient.asyncBatchSize";
@Configuration(ASYNC_BATCH_SIZE)
private int asyncBatchSize = 200;
public int getAsyncBatchSize() {
return asyncBatchSize;
}
public static final String ASYNC_TIMEOUT = "SuroClient.asyncTimeout";
@Configuration(ASYNC_TIMEOUT)
private int asyncTimeout = 5000;
public int getAsyncTimeout() {
return asyncTimeout;
}
public static final String ASYNC_QUEUE_TYPE = "SuroClient.asyncQueueType";
@Configuration(ASYNC_QUEUE_TYPE)
private String asyncQueueType = "memory";
public String getAsyncQueueType() {
return asyncQueueType;
}
public static final String ASYNC_MEMORYQUEUE_CAPACITY = "SuroClient.asyncMemoryQueueCapacity";
@Configuration(ASYNC_MEMORYQUEUE_CAPACITY)
private int asyncMemoryQueueCapacity = 10000;
public int getAsyncMemoryQueueCapacity() {
return asyncMemoryQueueCapacity;
}
public static final String ASYNC_JOBQUEUE_CAPACITY = "SuroClient.asyncJobQueueCapacity";
@Configuration(ASYNC_JOBQUEUE_CAPACITY)
private int asyncJobQueueCapacity = 0;
public int getAsyncJobQueueCapacity() {
if (asyncJobQueueCapacity == 0) {
return asyncMemoryQueueCapacity / asyncBatchSize;
} else {
return asyncJobQueueCapacity;
}
}
public static final String ASYNC_FILEQUEUE_PATH = "SuroClient.asyncFileQueuePath";
@Configuration(ASYNC_FILEQUEUE_PATH)
private String asyncFileQueuePath = "/logs/suroclient";
public String getAsyncFileQueuePath() {
return asyncFileQueuePath;
}
public static final String ASYNC_FILEQUEUE_NAME = "SuroClient.asyncFileQueueName";
@Configuration(ASYNC_FILEQUEUE_NAME)
private String asyncFileQueueName = "default";
public String getAsyncFileQueueName() {
return asyncFileQueueName;
}
public static final String FILEQUEUE_GC_PERIOD = "SuroClient.asyncFileQueueGCPeriod";
@Configuration(FILEQUEUE_GC_PERIOD)
private String fileQueueGCPeriod = "PT1m";
public String getAsyncFileQueueGCPeriod() {
return fileQueueGCPeriod;
}
public static final String FILEQUEUE_SIZELIMIT = "SuroClient.fileQueueSizeLimit";
@Configuration(FILEQUEUE_SIZELIMIT)
private long fileQueueSizeLimit = 10L * 1024L * 1024L * 1024L; // 10 GB
public long getFileQueueSizeLimit() { return fileQueueSizeLimit; }
public static final String LB_TYPE = "SuroClient.loadBalancerType";
@Configuration(LB_TYPE)
private String loadBalancerType;
public String getLoadBalancerType() {
return loadBalancerType;
}
public static final String LB_SERVER = "SuroClient.loadBalancerServer";
@Configuration(LB_SERVER)
private String loadBalancerServer;
public String getLoadBalancerServer() {
return loadBalancerServer;
}
public static final String MINIMUM_RECONNECT_TIME_INTERVAL = "SuroClient.minimum.reconnect.timeInterval";
@Configuration(MINIMUM_RECONNECT_TIME_INTERVAL)
private int minimumReconnectTimeInterval = 90000;
public int getMinimumReconnectTimeInterval() { return minimumReconnectTimeInterval; }
public static final String RECONNECT_INTERVAL = "SuroClient.reconnect.interval";
@Configuration(RECONNECT_INTERVAL)
private int reconnectInterval = 240;
public int getReconnectInterval() { return reconnectInterval; }
public static final String RECONNECT_TIME_INTERVAL = "SuroClient.reconnect.timeInterval";
@Configuration(RECONNECT_TIME_INTERVAL)
private int reconnectTimeInterval = 300000;
public int getReconnectTimeInterval() { return reconnectTimeInterval; }
}
| 1,321 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/connection/EurekaLoadBalancer.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.connection;
import com.google.inject.Inject;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.DefaultClientConfigImpl;
import com.netflix.client.config.IClientConfig;
import com.netflix.governator.guice.lazy.LazySingleton;
import com.netflix.loadbalancer.DynamicServerListLoadBalancer;
import com.netflix.loadbalancer.Server;
import com.netflix.niws.loadbalancer.DiscoveryEnabledNIWSServerList;
import com.netflix.suro.ClientConfig;
import javax.annotation.PreDestroy;
import java.util.List;
/**
* A load balancer that depends on <a href="https://github.com/Netflix/eureka">Netflix Eureka</a> to find
* all the registered server instances.
*
* @author jbae
*/
@LazySingleton
public class EurekaLoadBalancer extends DynamicServerListLoadBalancer {
private final int port;
/**
* @param config contains vipAddress
*/
@Inject
public EurekaLoadBalancer(ClientConfig config) {
String[] vipAddress_port = config.getLoadBalancerServer().split(":");
if (vipAddress_port.length != 2) {
throw new IllegalArgumentException(String.format(
"EurekaLoadBalancer server should be formatted vipAddress:port ('%s')",
config.getLoadBalancerServer()));
}
this.port = Integer.parseInt(vipAddress_port[1]);
IClientConfig loadBalancerConfig = new DefaultClientConfigImpl();
loadBalancerConfig.loadProperties("suroClient");
loadBalancerConfig.setProperty(CommonClientConfigKey.DeploymentContextBasedVipAddresses, vipAddress_port[0]);
loadBalancerConfig.setProperty(CommonClientConfigKey.NIWSServerListClassName, DiscoveryEnabledNIWSServerList.class.getName());
super.initWithNiwsConfig(loadBalancerConfig);
}
/**
* This function is called from ConnectionPool to retrieve which server to
* communicate
*
* @param key can bel null
* @return
*/
@Override
public Server chooseServer(Object key) {
Server server = super.chooseServer(key);
if (server == null) {
return null;
}
server.setPort(port);
return server;
}
@Override
public List<Server> getServerList(boolean availableOnly) {
List<Server> serverList = super.getServerList(availableOnly);
for (Server s : serverList) {
s.setPort(port);
}
return serverList;
}
@PreDestroy
public void clear() {
cancelPingTask();
}
}
| 1,322 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/connection/ConnectionPool.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.connection;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.inject.Inject;
import com.netflix.governator.guice.lazy.LazySingleton;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.loadbalancer.Server;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.thrift.Result;
import com.netflix.suro.thrift.ServiceStatus;
import com.netflix.suro.thrift.SuroServer;
import com.netflix.suro.thrift.TMessageSet;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.transport.TFramedTransport;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PreDestroy;
import java.util.*;
import java.util.concurrent.*;
/**
* Pooling for thrift connection to suro-server
* After creating all connections to suro-server discovered by {@link ILoadBalancer}, a {@code ConnectionPool} returns
* a connection when the client requests to get one. When there's no connection available, {@code ConnectionPool} will
* create a new connection immediately. This is called OutPool connection.
*
* @author jbae
*/
@LazySingleton
public class ConnectionPool {
private static final Logger logger = LoggerFactory.getLogger(ConnectionPool.class);
private Map<Server, SuroConnection> connectionPool = new ConcurrentHashMap<Server, SuroConnection>();
private Set<Server> serverSet = Collections.newSetFromMap(new ConcurrentHashMap<Server, Boolean>());
private List<SuroConnection> connectionList = Collections.synchronizedList(new LinkedList<SuroConnection>());
private final ClientConfig config;
private final ILoadBalancer lb;
private ScheduledExecutorService connectionSweeper;
private ExecutorService newConnectionBuilder;
private BlockingQueue<SuroConnection> connectionQueue = new LinkedBlockingQueue<SuroConnection>();
private CountDownLatch populationLatch;
/**
*
* @param config Client configuration
* @param lb LoadBalancer implementation
*/
@Inject
public ConnectionPool(ClientConfig config, ILoadBalancer lb) {
this.config = config;
this.lb = lb;
connectionSweeper = Executors.newScheduledThreadPool(1);
newConnectionBuilder = Executors.newFixedThreadPool(1);
Monitors.registerObject(this);
populationLatch = new CountDownLatch(Math.min(lb.getServerList(true).size(), config.getAsyncSenderThreads()));
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
populateClients();
}
});
try {
populationLatch.await(populationLatch.getCount() * config.getConnectionTimeout(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
logger.error("Exception on CountDownLatch awaiting: " + e.getMessage(), e);
}
logger.info("ConnectionPool population finished with the size: " + getPoolSize()
+ ", will continue up to: " + lb.getServerList(true).size());
}
@PreDestroy
public void shutdown() {
serverSet.clear();
connectionPool.clear();
connectionQueue.clear();
for (SuroConnection conn : connectionList) {
conn.disconnect();
}
connectionSweeper.shutdownNow();
newConnectionBuilder.shutdownNow();
}
/**
* @return number of connections in the pool
*/
@Monitor(name = "PoolSize", type = DataSourceType.GAUGE)
public int getPoolSize() {
return connectionList.size();
}
@Monitor(name = "OutPoolSize", type = DataSourceType.GAUGE)
private int outPoolSize = 0;
/**
* @return number of connections created out of the pool
*/
public int getOutPoolSize() {
return outPoolSize;
}
public void populateClients() {
for (Server server : lb.getServerList(true)) {
SuroConnection connection = new SuroConnection(server, config, true);
try {
connection.connect();
addConnection(server, connection, true);
logger.info(connection + " is added to SuroClientPool");
} catch (Exception e) {
logger.error("Error in connecting to " + connection + " message: " + e.getMessage(), e);
lb.markServerDown(server);
}
}
connectionSweeper.scheduleAtFixedRate(
new Runnable() {
@Override
public void run() {
removeConnection(Sets.difference(serverSet, new HashSet<Server>(lb.getServerList(true))));
}
},
config.getConnectionSweepInterval(),
config.getConnectionSweepInterval(),
TimeUnit.SECONDS);
}
@VisibleForTesting
protected void addConnection(Server server, SuroConnection connection, boolean inPool) {
if (inPool) {
connectionPool.put(server, connection);
if (populationLatch.getCount() > 0) {
populationLatch.countDown();
}
}
serverSet.add(server);
connectionList.add(connection);
}
private synchronized void removeConnection(Set<Server> removedServers) {
for (Server s : removedServers) {
serverSet.remove(s);
connectionPool.remove(s);
}
Iterator<SuroConnection> i = connectionQueue.iterator();
while (i.hasNext()) {
if (!serverSet.contains(i.next().getServer())) {
i.remove();
logger.info("connection was removed from the queue");
}
}
i = connectionList.iterator();
while (i.hasNext()) {
SuroConnection c = i.next();
if (!serverSet.contains(c.getServer())) {
c.disconnect();
i.remove();
}
}
}
/**
* When the client calls this method, it will return the connection.
* @return connection
*/
public SuroConnection chooseConnection() {
SuroConnection connection = connectionQueue.poll();
if (connection == null) {
connection = chooseFromPool();
}
if (config.getEnableOutPool()) {
synchronized (this) {
for (int i = 0; i < config.getRetryCount() && connection == null; ++i) {
Server server = lb.chooseServer(null);
if (server != null) {
connection = new SuroConnection(server, config, false);
try {
connection.connect();
++outPoolSize;
logger.info(connection + " is created out of the pool");
break;
} catch (Exception e) {
logger.error("Error in connecting to " + connection + " message: " + e.getMessage(), e);
lb.markServerDown(server);
}
}
}
}
}
if (connection == null) {
logger.error("No valid connection exists after " + config.getRetryCount() + " retries");
}
return connection;
}
private SuroConnection chooseFromPool() {
SuroConnection connection = null;
int count = 0;
while (connection == null) {
Server server = lb.chooseServer(null);
if (server != null) {
if (!serverSet.contains(server)) {
newConnectionBuilder.execute(createNewConnection(server, true));
} else {
connection = connectionPool.remove(server);
}
} else {
break;
}
++count;
if (count >= 10) {
logger.error("no connection available selected in 10 retries");
break;
}
}
return connection;
}
private Runnable createNewConnection(final Server server, final boolean inPool) {
return new Runnable() {
@Override
public void run() {
if (connectionPool.get(server) == null) {
SuroConnection connection = new SuroConnection(server, config, inPool);
try {
connection.connect();
addConnection(server, connection, inPool);
logger.info(connection + " is added to ConnectionPool");
} catch (Exception e) {
logger.error("Error in connecting to " + connection + " message: " + e.getMessage(), e);
lb.markServerDown(server);
}
}
}
};
}
/**
* When the client finishes communication with the client, this method
* should be called to release the connection and return it to the pool.
* @param connection
*/
public void endConnection(SuroConnection connection) {
if (connection != null && shouldChangeConnection(connection)) {
connection.initStat();
connectionPool.put(connection.getServer(), connection);
connection = chooseFromPool();
}
if (connection != null) {
connectionQueue.offer(connection);
}
}
/**
* Mark up the server related with the connection as down
* When the client fails to communicate with the connection,
* this method should be called to remove the server from the pool
* @param connection
*/
public void markServerDown(SuroConnection connection) {
if (connection != null) {
lb.markServerDown(connection.getServer());
removeConnection(new ImmutableSet.Builder<Server>().add(connection.getServer()).build());
}
}
private boolean shouldChangeConnection(SuroConnection connection) {
if (!connection.isInPool()) {
return false;
}
long now = System.currentTimeMillis();
long minimumTimeSpan = connection.getTimeUsed() + config.getMinimumReconnectTimeInterval();
return connectionExpired(connection, now, minimumTimeSpan);
}
private boolean connectionExpired(SuroConnection connection, long now, long minimumTimeSpan) {
return minimumTimeSpan <= now &&
(connection.getSentCount() >= config.getReconnectInterval() ||
connection.getTimeUsed() + config.getReconnectTimeInterval() <= now);
}
/**
* Thrift socket connection wrapper with configuration
*/
public static class SuroConnection {
private TTransport transport;
private SuroServer.Client client;
private final Server server;
private final ClientConfig config;
private final boolean inPool;
private int sentCount = 0;
private long timeUsed = 0;
/**
* @param server hostname and port information
* @param config properties including timeout, etc
* @param inPool whether this connection is in the pool or out of it
*/
public SuroConnection(Server server, ClientConfig config, boolean inPool) {
this.server = server;
this.config = config;
this.inPool = inPool;
}
public void connect() throws Exception {
TSocket socket = new TSocket(server.getHost(), server.getPort(), config.getConnectionTimeout());
socket.getSocket().setTcpNoDelay(true);
socket.getSocket().setKeepAlive(true);
socket.getSocket().setSoLinger(true, 0);
transport = new TFramedTransport(socket);
transport.open();
TProtocol protocol = new TBinaryProtocol(transport);
client = new SuroServer.Client(protocol);
ServiceStatus status = client.getStatus();
if (status != ServiceStatus.ALIVE) {
transport.close();
throw new RuntimeException(server + " IS NOT ALIVE!!!");
}
}
public void disconnect() {
try {
transport.flush();
} catch (TTransportException e) {
logger.error("Exception on disconnect: " + e.getMessage(), e);
} finally {
transport.close();
}
}
public Result send(TMessageSet messageSet) throws TException {
++sentCount;
if (sentCount == 1) {
timeUsed = System.currentTimeMillis();
}
return client.process(messageSet);
}
public Server getServer() { return server; }
/**
* @return How many times send() method is called
*/
public int getSentCount() {
return sentCount;
}
/**
* For the connection retention control
* @return how long it has been used from the client
*/
public long getTimeUsed() {
return timeUsed;
}
public boolean isInPool() {
return inPool;
}
public void initStat() {
sentCount = 0;
timeUsed = 0;
}
@Override
public String toString() {
return server.getHostPort();
}
@Override
public boolean equals(Object o) {
if (o instanceof Server) {
return server.equals(o);
} else if (o instanceof SuroConnection) {
return server.equals(((SuroConnection) o).server);
} else {
return false;
}
}
@Override
public int hashCode() {
return server.hashCode();
}
}
}
| 1,323 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/connection/StaticLoadBalancer.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.connection;
import com.google.inject.Inject;
import com.netflix.client.config.DefaultClientConfigImpl;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import com.netflix.governator.guice.lazy.LazySingleton;
import com.netflix.loadbalancer.BaseLoadBalancer;
import com.netflix.loadbalancer.Server;
import com.netflix.suro.ClientConfig;
import javax.annotation.PreDestroy;
import java.util.ArrayList;
import java.util.List;
/**
* A load balancer that works on a static list of servers as defined in client configuration.
*
* @author jbae
*/
@LazySingleton
public class StaticLoadBalancer extends BaseLoadBalancer {
/**
* @param config contains the server list, comma separated with the format
* hostname:port
*/
@Inject
public StaticLoadBalancer(ClientConfig config) {
List<Server> serverList = new ArrayList<Server>();
for (String s : config.getLoadBalancerServer().split(",")) {
String[] host_port = s.split(":");
serverList.add(new Server(host_port[0], Integer.parseInt(host_port[1])));
}
if (serverList.isEmpty()) {
throw new IllegalArgumentException("empty server list");
}
IClientConfig loadBalancerConfig = new DefaultClientConfigImpl();
loadBalancerConfig.loadProperties("suroClient");
loadBalancerConfig.setProperty(CommonClientConfigKey.NFLoadBalancerPingClassName, "com.netflix.suro.connection.SuroPing");
super.initWithNiwsConfig(loadBalancerConfig);
addServers(serverList);
}
@PreDestroy
public void clear() {
cancelPingTask();
setServersList(new ArrayList<Server>());
}
}
| 1,324 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/connection/SuroPing.java
|
package com.netflix.suro.connection;
import com.netflix.client.config.IClientConfig;
import com.netflix.loadbalancer.AbstractLoadBalancerPing;
import com.netflix.loadbalancer.Server;
import org.apache.thrift.transport.TFramedTransport;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.net.SocketException;
/**
* Simple implementation for checking if a single server is alive
*
* @author zdexter
*/
public class SuroPing extends AbstractLoadBalancerPing {
private static Logger logger = LoggerFactory
.getLogger(SuroPing.class);
public SuroPing() {
}
private void close(@Nullable TTransport transport) {
if (transport == null) {
return;
}
transport.close();
}
public boolean isAlive(Server server) {
TSocket socket = null;
TFramedTransport transport = null;
try {
socket = new TSocket(server.getHost(), server.getPort(), 2000);
socket.getSocket().setTcpNoDelay(true);
socket.getSocket().setKeepAlive(true);
socket.getSocket().setSoLinger(true, 0);
transport = new TFramedTransport(socket);
transport.open();
return true;
} catch (TTransportException e) {
logger.warn("Ping {}", e.getMessage());
return false;
} catch (SocketException e) {
logger.warn("Ping {}", e.getMessage());
return false;
} finally {
close(transport);
close(socket);
}
}
@Override
public void initWithNiwsConfig(IClientConfig clientConfig) {
}
}
| 1,325 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/input/Log4jFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input;
import org.apache.log4j.spi.LoggingEvent;
public interface Log4jFormatter {
String format(LoggingEvent event);
String getRoutingKey();
}
| 1,326 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/input/Log4jAppender.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.client.SuroClient;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.SerDe;
import com.netflix.suro.message.StringSerDe;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Properties;
public class Log4jAppender extends AppenderSkeleton {
protected static String localHostAddr = null;
static {
try {
localHostAddr = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
localHostAddr = "N/A";
}
}
private String formatterClass = JsonLog4jFormatter.class.getName();
public void setFormatterClass(String formatterClass) {
this.formatterClass = formatterClass;
}
public String getFormatterClass() {
return formatterClass;
}
private String datetimeFormat = "yyyy-MM-dd'T'HH:mm:ss,SSS";
public void setDatetimeFormat(String datetimeFormat) {
this.datetimeFormat = datetimeFormat;
}
public String getDatetimeFormat() {
return datetimeFormat;
}
private String routingKey = "";
public void setRoutingKey(String routingKey) {
this.routingKey = routingKey;
}
public String getRoutingKey() { return routingKey; }
private String app = "defaultApp";
public void setApp(String app) {
this.app = app;
}
public String getApp() {
return app;
}
private String compression = "1";
public void setCompression(String compression) {
this.compression = compression;
}
public String getCompression() {
return compression;
}
private String loadBalancerType = "eureka";
public void setLoadBalancerType(String loadBalancerType) {
this.loadBalancerType = loadBalancerType;
}
private String getLoadBalancerType() {
return loadBalancerType;
}
private String loadBalancerServer;
public void setLoadBalancerServer(String loadBalancerServer) {
this.loadBalancerServer = loadBalancerServer;
}
private String getLoadBalancerServer() {
return loadBalancerServer;
}
private String asyncQueueType = "memory";
public void setAsyncQueueType(String asyncQueueType) {
this.asyncQueueType = asyncQueueType;
}
public String getAsyncQueueType() {
return asyncQueueType;
}
private String asyncMemoryQueueCapacity = "10000";
public void setAsyncMemoryQueueCapacity(String memoryQueueCapacity) {
this.asyncMemoryQueueCapacity = memoryQueueCapacity;
}
public String getAsyncMemoryQueueCapacity() {
return asyncMemoryQueueCapacity;
}
private String asyncFileQueuePath = "/logs/suroClient";
public String getAsyncFileQueuePath() {
return asyncFileQueuePath;
}
public void setAsyncFileQueuePath(String fileQueuePath) {
this.asyncFileQueuePath = fileQueuePath;
}
private String clientType = "async";
public void setClientType(String clientType) {
this.clientType = clientType;
}
public String getClientType() {
return clientType;
}
private Log4jFormatter formatter;
@VisibleForTesting
protected SuroClient client;
@Override
public void activateOptions() {
client = new SuroClient(createProperties());
try {
formatter = (Log4jFormatter)
Class.forName(formatterClass).getDeclaredConstructor(ClientConfig.class)
.newInstance(client.getConfig());
} catch (Exception e) {
formatter = new JsonLog4jFormatter(client.getConfig());
}
}
private Properties createProperties() {
Properties properties = new Properties();
properties.setProperty(ClientConfig.LOG4J_FORMATTER, formatterClass);
properties.setProperty(ClientConfig.LOG4J_DATETIMEFORMAT, datetimeFormat);
properties.setProperty(ClientConfig.LOG4J_ROUTING_KEY, routingKey);
properties.setProperty(ClientConfig.APP, app);
properties.setProperty(ClientConfig.COMPRESSION, compression);
properties.setProperty(ClientConfig.LB_TYPE, loadBalancerType);
properties.setProperty(ClientConfig.LB_SERVER, loadBalancerServer);
properties.setProperty(ClientConfig.ASYNC_MEMORYQUEUE_CAPACITY, asyncMemoryQueueCapacity);
properties.setProperty(ClientConfig.ASYNC_QUEUE_TYPE, asyncQueueType);
properties.setProperty(ClientConfig.ASYNC_FILEQUEUE_PATH, asyncFileQueuePath);
properties.setProperty(ClientConfig.CLIENT_TYPE, clientType);
return properties;
}
@Override
public void doAppend(LoggingEvent event) {
this.append(event);
}
private SerDe<String> serDe = new StringSerDe();
@Override
protected void append(LoggingEvent event) {
String result = formatter.format(event);
client.send(new Message(
formatter.getRoutingKey(),
result.getBytes()));
}
@Override
public void close() {
client.shutdown();
}
@Override
public boolean requiresLayout() {
return false;
}
public long getSentMessageCount() {
return client.getSentMessageCount();
}
}
| 1,327 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/input/StringLog4jFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input;
import com.google.inject.Inject;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.TagKey;
import org.apache.log4j.spi.LoggingEvent;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import java.util.Iterator;
import java.util.Map;
public class StringLog4jFormatter implements Log4jFormatter {
public static final char fieldDelim = '\035';
public static final char fieldEqual = '\002';
private final ClientConfig config;
private final DateTimeFormatter fmt;
private String routingKey;
@Inject
public StringLog4jFormatter(ClientConfig config) {
this.config = config;
fmt = DateTimeFormat.forPattern(config.getLog4jDateTimeFormat());
}
@Override
public String format(LoggingEvent event) {
StringBuilder sb = new StringBuilder();
sb.append(fmt.print(new DateTime())).append(fieldDelim);
sb.append(event.getLevel()).append(fieldDelim).append(event.getLoggerName());
Object obj = event.getMessage();
routingKey = null;
// time UTC^]Level^]Map
if (obj instanceof Map) {
Map map = (Map) event.getMessage();
Iterator it = map.keySet().iterator();
String key = null;
while (it.hasNext()) {
key = (String) it.next();
sb.append(fieldDelim).append(key).append(fieldEqual).append(map.get(key));
if (key.equalsIgnoreCase(TagKey.ROUTING_KEY)) {
routingKey = (String) map.get(key);
}
}
} else {
// time UTC^]Level^]String
sb.append(fieldDelim).append(obj.toString());
}
// Extract exceptions
String[] s = event.getThrowableStrRep();
if (s != null && s.length > 0) {
sb.append(fieldDelim).append("Exception").append(fieldEqual).append(s[0]);
for (int i = 1; i < s.length; i++) {
sb.append('\n').append(s[i]);
}
}
return sb.toString();
}
@Override
public String getRoutingKey() {
return routingKey;
}
}
| 1,328 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/input/StaticLog4jFormatter.java
|
package com.netflix.suro.input;
import com.google.inject.Inject;
import com.netflix.suro.ClientConfig;
import org.apache.log4j.spi.LoggingEvent;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
public class StaticLog4jFormatter implements Log4jFormatter {
private final DateTimeFormatter fmt;
private final ClientConfig config;
@Inject
public StaticLog4jFormatter(ClientConfig config) {
this.config = config;
fmt = DateTimeFormat.forPattern(config.getLog4jDateTimeFormat());
}
@Override
public String format(LoggingEvent event) {
StringBuilder sb = new StringBuilder();
sb.append(fmt.print(new DateTime())).append('\t');
sb.append(event.getLevel()).append('\t').append(event.getLoggerName());
Object obj = event.getMessage();
sb.append('\t').append(obj.toString());
// Extract exceptions
String[] s = event.getThrowableStrRep();
if (s != null && s.length > 0) {
sb.append('\n').append(s[0]);
for (int i = 1; i < s.length; i++) {
sb.append('\n').append(s[i]);
}
}
return sb.toString();
}
@Override
public String getRoutingKey() {
return config.getLog4jRoutingKey();
}
}
| 1,329 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/input/JsonLog4jFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Inject;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.TagKey;
import com.netflix.suro.jackson.DefaultObjectMapper;
import org.apache.log4j.spi.LoggingEvent;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class JsonLog4jFormatter implements Log4jFormatter {
private final ClientConfig config;
private final DateTimeFormatter fmt;
private final ObjectMapper jsonMapper;
private final StringLog4jFormatter stringFormatter;
private String routingKey;
@Monitor(name = "jsonParsingError", type = DataSourceType.COUNTER)
private AtomicLong jsonParsingError = new AtomicLong(0);
public JsonLog4jFormatter(ClientConfig config) {
this(config, null);
}
@Inject
public JsonLog4jFormatter(ClientConfig config, ObjectMapper jsonMapper) {
this.config = config;
if (jsonMapper == null)
this.jsonMapper = new DefaultObjectMapper();
else
this.jsonMapper = jsonMapper;
fmt = DateTimeFormat.forPattern(config.getLog4jDateTimeFormat());
stringFormatter = new StringLog4jFormatter(config);
Monitors.registerObject(this);
}
@SuppressWarnings("unchecked")
@Override
public String format(LoggingEvent event) {
Object obj = event.getMessage();
routingKey = null;
if (obj instanceof Map) {
Map map = (Map) event.getMessage();
DateTime now = new DateTime();
map.put("ts", now.getMillis());
map.put("datetime", fmt.print(now));
map.put("logLevel", event.getLevel().toString());
map.put("class", event.getLoggerName());
routingKey = (String) map.get(TagKey.ROUTING_KEY);
// Extract exceptions
String[] s = event.getThrowableStrRep();
if (s != null && s.length > 0) {
map.put("Exception", s);
}
try {
return jsonMapper.writeValueAsString(map);
} catch (JsonProcessingException e) {
jsonParsingError.incrementAndGet();
return stringFormatter.format(event);
}
} else {
jsonParsingError.incrementAndGet();
return stringFormatter.format(event);
}
}
@Override
public String getRoutingKey() {
return routingKey;
}
}
| 1,330 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/sink/SuroSink.java
|
package com.netflix.suro.sink;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.suro.client.SuroClient;
import com.netflix.suro.message.MessageContainer;
import java.util.Properties;
/**
* Sink implementation of suro client for sending messages to suro server
*
* @author jbae
*/
public class SuroSink implements Sink {
public static final String TYPE = "suro";
private SuroClient client;
private final Properties props;
@JsonCreator
public SuroSink(@JsonProperty("properties") Properties props) {
this.props = props;
}
@Override
public void writeTo(MessageContainer message) {
client.send(message.getMessage());
}
@Override
public void open() {
client = new SuroClient(props);
}
@Override
public void close() {
client.shutdown();
}
@Override
public String recvNotice() {
return "";
}
@Override
public String getStat() {
return "sent: " + client.getSentMessageCount() + '\n' + "lost: " + client.getLostMessageCount();
}
@Override
public long getNumOfPendingMessages() {
return client.getNumOfPendingMessages();
}
@Override
public long checkPause() {
return 0;
}
}
| 1,331 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/message/Compression.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message;
import com.ning.compress.lzf.LZFDecoder;
import com.ning.compress.lzf.LZFEncoder;
import java.io.IOException;
/**
* Suro message payload compression
*
* The method {@link #compress(byte[])} receives byte[] and returns compressed byte[]
* The method {@link #decompress(byte[])} receives compressed byte[] and returns uncompressed one
*
* 0, NO no compression
* 1, LZF LZF compression
*
* @author jbae
*/
public enum Compression {
NO(0) {
byte[] compress(byte[] buffer) {
return buffer;
}
byte[] decompress(byte[] buffer) {
return buffer;
}
},
LZF(1) {
byte[] compress(byte[] buffer) {
try {
return LZFEncoder.encode(buffer);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
byte[] decompress(byte[] buffer) {
try {
return LZFDecoder.decode(buffer);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
private final int id;
Compression(int id) { this.id = id; }
public int getId() { return id; }
public static Compression create(int id) {
for (Compression compression : values()) {
if (id == compression.getId()) {
return compression;
}
}
throw new IllegalArgumentException("invalid compression id: " + id);
}
abstract byte[] compress(byte[] buffer);
abstract byte[] decompress(byte[] buffer);
}
| 1,332 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/message/MessageSetBuilder.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message;
import com.google.common.io.ByteArrayDataOutput;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.client.async.Queue4Client;
import com.netflix.suro.thrift.TMessageSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.zip.CRC32;
/**
* The payload for Suro's Thrift communication is {@link TMessageSet}, a thrift presentation of
* a set of messages. This class can be helpful to easily create {@link TMessageSet} instances.
*
* @author jbae
*/
public class MessageSetBuilder {
private static final Logger log = LoggerFactory.getLogger(MessageSetBuilder.class);
private final ClientConfig config;
private List<Message> messageList;
private Compression compression = Compression.LZF;
/**
* @param config contains information including application name, etc
*/
public MessageSetBuilder(ClientConfig config) {
this.config = config;
messageList = new ArrayList<Message>();
}
public MessageSetBuilder withMessage(String routingKey, byte[] payload) {
this.messageList.add(new Message(routingKey, payload));
return this;
}
public MessageSetBuilder withCompression(Compression compresson) {
this.compression = compresson;
return this;
}
public TMessageSet build() {
try {
byte[] buffer = createPayload(messageList, compression);
long crc = getCRC(buffer);
return new TMessageSet(
config.getApp(),
messageList.size(),
(byte) compression.getId(),
crc,
ByteBuffer.wrap(buffer));
} catch (IOException e) {
log.error("Exception on building TMessageSet: " + e.getMessage(), e);
return null;
} finally {
messageList.clear();
}
}
/**
* @return number of messages in MessageSet
*/
public int size() {
return messageList.size();
}
/**
* Create compressed byte[] from the list of messages. Each message contains
* byte[] as its message body, so, this method is simply flattening byte[]
* for all messages in the messageList
*
* @param messageList a list of messages for payload
* @param compression Compression method to be applied to the payload
* @return A byte array that encodes the build payload
* @throws IOException
*/
public static byte[] createPayload(List<Message> messageList, Compression compression) throws IOException {
ByteArrayDataOutput out = new ByteArrayDataOutputStream(outputStream.get());
for (Message message : messageList) {
message.write(out);
}
return compression.compress(out.toByteArray());
}
private static ThreadLocal<ByteArrayOutputStream> outputStream =
new ThreadLocal<ByteArrayOutputStream>() {
@Override
protected ByteArrayOutputStream initialValue() {
return new ByteArrayOutputStream();
}
@Override
public ByteArrayOutputStream get() {
ByteArrayOutputStream b = super.get();
b.reset();
return b;
}
};
/**
* Compute CRC32 value for byte[]
*
* @param buffer all the bytes in the buffer will be used for CRC32 calculation
*
* @return a CRC32 value for the given byte array
*/
public static long getCRC(byte[] buffer) {
CRC32 crc = new CRC32();
crc.update(buffer);
return crc.getValue();
}
/**
* Drains the given number of messages from the givne queue. Instead of calling {@link #withMessage(String, byte[])},
* we can call this method.
* This method is reverse one of JDK BlockingQueue.drainTo.
*
* @param queue the queue to drain messages from
* @param size the number of messages to drain from the given queue
*/
public void drainFrom(Queue4Client queue, int size) {
queue.drain(size, messageList);
}
}
| 1,333 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/message/MessageSetReader.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message;
import com.google.common.io.ByteArrayDataInput;
import com.google.common.io.ByteStreams;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.TagKey;
import com.netflix.suro.thrift.TMessageSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Iterator;
/**
* This class implements an {@link Iterable} reader for {@link TMessageSet} so we can iterate
* over each message in a {@link TMessageSet}.
*
* @author jbae
*/
public class MessageSetReader implements Iterable<Message> {
private static final Logger log = LoggerFactory.getLogger(MessageSetReader.class);
private final TMessageSet messageSet;
public MessageSetReader(TMessageSet messageSet) {
this.messageSet = messageSet;
}
public boolean checkCRC() {
long crcReceived = messageSet.getCrc();
long crc = MessageSetBuilder.getCRC(messageSet.getMessages());
return crcReceived == crc;
}
@Override
public Iterator<Message> iterator() {
try {
final ByteArrayDataInput input = ByteStreams.newDataInput(
Compression.create(messageSet.getCompression()).decompress(messageSet.getMessages()));
return new Iterator<Message>() {
private int messageCount = messageSet.getNumMessages();
@Override
public boolean hasNext() {
return messageCount > 0;
}
@Override
public Message next() {
Message m = new Message();
try {
m.readFields(input);
--messageCount;
return m;
} catch (Exception e) {
log.error("Exception while iterating MessageSet:" + e.getMessage(), e);
DynamicCounter.increment(
MonitorConfig.builder(TagKey.DROPPED_COUNT)
.withTag("reason", "MessageSetReaderError").build(),
messageCount);
messageCount = 0; // discard further messages
return null;
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove is not supported");
}
};
} catch (Exception e) {
log.error("Exception while reading: " + e.getMessage(), e);
return new Iterator<Message>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public Message next() {
return null;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove is not supported");
}
};
}
}
}
| 1,334 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/message
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/message/serde/SerDeFactory.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message.serde;
import com.netflix.suro.message.SerDe;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
*
*/
public class SerDeFactory {
private static final Logger log = LoggerFactory.getLogger(SerDeFactory.class);
private static ConcurrentMap<String, SerDe> map = new ConcurrentHashMap<String, SerDe>();
public static SerDe create(String clazz) {
SerDe serDe = map.get(clazz);
if (serDe == null) {
try {
serDe = (SerDe) Class.forName(clazz).newInstance();
map.putIfAbsent(clazz, serDe);
} catch (Exception e) {
throw new RuntimeException("Exception on creating SerDe using reflection: " + e.getMessage(), e);
}
}
return serDe;
}
}
| 1,335 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/message
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/message/serde/JsonSerDe.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.message.serde;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.SerDe;
import org.apache.log4j.Logger;
/**
* @param <T>
*/
public class JsonSerDe<T> implements SerDe<T> {
private static final Logger log = Logger.getLogger(JsonSerDe.class);
private final ObjectMapper jsonMapper = new DefaultObjectMapper();
private final TypeReference<T> typeReference = new TypeReference<T>(){};
@Override
public T deserialize(byte[] payload) {
try {
return jsonMapper.readValue(payload, typeReference);
} catch (Exception e) {
log.error("deserialize error in JsonSerDe: " + e.getMessage(), e);
return null;
}
}
@Override
public byte[] serialize(T payload) {
try {
return jsonMapper.writeValueAsBytes(payload);
} catch (Exception e) {
log.error("serialize error in JsonSerDe: " + e.getMessage(), e);
return new byte[]{};
}
}
@Override
public String toString(byte[] payload) {
return new String(payload); // json payload is string
}
}
| 1,336 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/thrift/TMessageSet.java
|
/**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.netflix.suro.thrift;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import java.nio.ByteBuffer;
import java.util.*;
public class TMessageSet implements org.apache.thrift.TBase<TMessageSet, TMessageSet._Fields>, java.io.Serializable, Cloneable, Comparable<TMessageSet> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TMessageSet");
private static final org.apache.thrift.protocol.TField APP_FIELD_DESC = new org.apache.thrift.protocol.TField("app", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final org.apache.thrift.protocol.TField NUM_MESSAGES_FIELD_DESC = new org.apache.thrift.protocol.TField("numMessages", org.apache.thrift.protocol.TType.I32, (short)2);
private static final org.apache.thrift.protocol.TField COMPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("compression", org.apache.thrift.protocol.TType.BYTE, (short)3);
private static final org.apache.thrift.protocol.TField CRC_FIELD_DESC = new org.apache.thrift.protocol.TField("crc", org.apache.thrift.protocol.TType.I64, (short)4);
private static final org.apache.thrift.protocol.TField MESSAGES_FIELD_DESC = new org.apache.thrift.protocol.TField("messages", org.apache.thrift.protocol.TType.STRING, (short)5);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new TMessageSetStandardSchemeFactory());
schemes.put(TupleScheme.class, new TMessageSetTupleSchemeFactory());
}
public String app; // required
public int numMessages; // required
public byte compression; // required
public long crc; // required
public ByteBuffer messages; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
APP((short)1, "app"),
NUM_MESSAGES((short)2, "numMessages"),
COMPRESSION((short)3, "compression"),
CRC((short)4, "crc"),
MESSAGES((short)5, "messages");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // APP
return APP;
case 2: // NUM_MESSAGES
return NUM_MESSAGES;
case 3: // COMPRESSION
return COMPRESSION;
case 4: // CRC
return CRC;
case 5: // MESSAGES
return MESSAGES;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __NUMMESSAGES_ISSET_ID = 0;
private static final int __COMPRESSION_ISSET_ID = 1;
private static final int __CRC_ISSET_ID = 2;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.APP, new org.apache.thrift.meta_data.FieldMetaData("app", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.NUM_MESSAGES, new org.apache.thrift.meta_data.FieldMetaData("numMessages", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
tmpMap.put(_Fields.COMPRESSION, new org.apache.thrift.meta_data.FieldMetaData("compression", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE)));
tmpMap.put(_Fields.CRC, new org.apache.thrift.meta_data.FieldMetaData("crc", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.MESSAGES, new org.apache.thrift.meta_data.FieldMetaData("messages", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TMessageSet.class, metaDataMap);
}
public TMessageSet() {
}
public TMessageSet(
String app,
int numMessages,
byte compression,
long crc,
ByteBuffer messages)
{
this();
this.app = app;
this.numMessages = numMessages;
setNumMessagesIsSet(true);
this.compression = compression;
setCompressionIsSet(true);
this.crc = crc;
setCrcIsSet(true);
this.messages = messages;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public TMessageSet(TMessageSet other) {
__isset_bitfield = other.__isset_bitfield;
if (other.isSetApp()) {
this.app = other.app;
}
this.numMessages = other.numMessages;
this.compression = other.compression;
this.crc = other.crc;
if (other.isSetMessages()) {
this.messages = org.apache.thrift.TBaseHelper.copyBinary(other.messages);
;
}
}
public TMessageSet deepCopy() {
return new TMessageSet(this);
}
@Override
public void clear() {
this.app = null;
setNumMessagesIsSet(false);
this.numMessages = 0;
setCompressionIsSet(false);
this.compression = 0;
setCrcIsSet(false);
this.crc = 0;
this.messages = null;
}
public String getApp() {
return this.app;
}
public TMessageSet setApp(String app) {
this.app = app;
return this;
}
public void unsetApp() {
this.app = null;
}
/** Returns true if field app is set (has been assigned a value) and false otherwise */
public boolean isSetApp() {
return this.app != null;
}
public void setAppIsSet(boolean value) {
if (!value) {
this.app = null;
}
}
public int getNumMessages() {
return this.numMessages;
}
public TMessageSet setNumMessages(int numMessages) {
this.numMessages = numMessages;
setNumMessagesIsSet(true);
return this;
}
public void unsetNumMessages() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMMESSAGES_ISSET_ID);
}
/** Returns true if field numMessages is set (has been assigned a value) and false otherwise */
public boolean isSetNumMessages() {
return EncodingUtils.testBit(__isset_bitfield, __NUMMESSAGES_ISSET_ID);
}
public void setNumMessagesIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMMESSAGES_ISSET_ID, value);
}
public byte getCompression() {
return this.compression;
}
public TMessageSet setCompression(byte compression) {
this.compression = compression;
setCompressionIsSet(true);
return this;
}
public void unsetCompression() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COMPRESSION_ISSET_ID);
}
/** Returns true if field compression is set (has been assigned a value) and false otherwise */
public boolean isSetCompression() {
return EncodingUtils.testBit(__isset_bitfield, __COMPRESSION_ISSET_ID);
}
public void setCompressionIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COMPRESSION_ISSET_ID, value);
}
public long getCrc() {
return this.crc;
}
public TMessageSet setCrc(long crc) {
this.crc = crc;
setCrcIsSet(true);
return this;
}
public void unsetCrc() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CRC_ISSET_ID);
}
/** Returns true if field crc is set (has been assigned a value) and false otherwise */
public boolean isSetCrc() {
return EncodingUtils.testBit(__isset_bitfield, __CRC_ISSET_ID);
}
public void setCrcIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CRC_ISSET_ID, value);
}
public byte[] getMessages() {
setMessages(org.apache.thrift.TBaseHelper.rightSize(messages));
return messages == null ? null : messages.array();
}
public ByteBuffer bufferForMessages() {
return messages;
}
public TMessageSet setMessages(byte[] messages) {
setMessages(messages == null ? (ByteBuffer)null : ByteBuffer.wrap(messages));
return this;
}
public TMessageSet setMessages(ByteBuffer messages) {
this.messages = messages;
return this;
}
public void unsetMessages() {
this.messages = null;
}
/** Returns true if field messages is set (has been assigned a value) and false otherwise */
public boolean isSetMessages() {
return this.messages != null;
}
public void setMessagesIsSet(boolean value) {
if (!value) {
this.messages = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case APP:
if (value == null) {
unsetApp();
} else {
setApp((String)value);
}
break;
case NUM_MESSAGES:
if (value == null) {
unsetNumMessages();
} else {
setNumMessages((Integer)value);
}
break;
case COMPRESSION:
if (value == null) {
unsetCompression();
} else {
setCompression((Byte)value);
}
break;
case CRC:
if (value == null) {
unsetCrc();
} else {
setCrc((Long)value);
}
break;
case MESSAGES:
if (value == null) {
unsetMessages();
} else {
setMessages((ByteBuffer)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case APP:
return getApp();
case NUM_MESSAGES:
return Integer.valueOf(getNumMessages());
case COMPRESSION:
return Byte.valueOf(getCompression());
case CRC:
return Long.valueOf(getCrc());
case MESSAGES:
return getMessages();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case APP:
return isSetApp();
case NUM_MESSAGES:
return isSetNumMessages();
case COMPRESSION:
return isSetCompression();
case CRC:
return isSetCrc();
case MESSAGES:
return isSetMessages();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof TMessageSet)
return this.equals((TMessageSet)that);
return false;
}
public boolean equals(TMessageSet that) {
if (that == null)
return false;
boolean this_present_app = true && this.isSetApp();
boolean that_present_app = true && that.isSetApp();
if (this_present_app || that_present_app) {
if (!(this_present_app && that_present_app))
return false;
if (!this.app.equals(that.app))
return false;
}
boolean this_present_numMessages = true;
boolean that_present_numMessages = true;
if (this_present_numMessages || that_present_numMessages) {
if (!(this_present_numMessages && that_present_numMessages))
return false;
if (this.numMessages != that.numMessages)
return false;
}
boolean this_present_compression = true;
boolean that_present_compression = true;
if (this_present_compression || that_present_compression) {
if (!(this_present_compression && that_present_compression))
return false;
if (this.compression != that.compression)
return false;
}
boolean this_present_crc = true;
boolean that_present_crc = true;
if (this_present_crc || that_present_crc) {
if (!(this_present_crc && that_present_crc))
return false;
if (this.crc != that.crc)
return false;
}
boolean this_present_messages = true && this.isSetMessages();
boolean that_present_messages = true && that.isSetMessages();
if (this_present_messages || that_present_messages) {
if (!(this_present_messages && that_present_messages))
return false;
if (!this.messages.equals(that.messages))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(TMessageSet other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetApp()).compareTo(other.isSetApp());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetApp()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.app, other.app);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetNumMessages()).compareTo(other.isSetNumMessages());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetNumMessages()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numMessages, other.numMessages);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetCompression()).compareTo(other.isSetCompression());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetCompression()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compression, other.compression);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetCrc()).compareTo(other.isSetCrc());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetCrc()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.crc, other.crc);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetMessages()).compareTo(other.isSetMessages());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMessages()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.messages, other.messages);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("TMessageSet(");
boolean first = true;
sb.append("app:");
if (this.app == null) {
sb.append("null");
} else {
sb.append(this.app);
}
first = false;
if (!first) sb.append(", ");
sb.append("numMessages:");
sb.append(this.numMessages);
first = false;
if (!first) sb.append(", ");
sb.append("compression:");
sb.append(this.compression);
first = false;
if (!first) sb.append(", ");
sb.append("crc:");
sb.append(this.crc);
first = false;
if (!first) sb.append(", ");
sb.append("messages:");
if (this.messages == null) {
sb.append("null");
} else {
org.apache.thrift.TBaseHelper.toString(this.messages, sb);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class TMessageSetStandardSchemeFactory implements SchemeFactory {
public TMessageSetStandardScheme getScheme() {
return new TMessageSetStandardScheme();
}
}
private static class TMessageSetStandardScheme extends StandardScheme<TMessageSet> {
public void read(org.apache.thrift.protocol.TProtocol iprot, TMessageSet struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // APP
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.app = iprot.readString();
struct.setAppIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // NUM_MESSAGES
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.numMessages = iprot.readI32();
struct.setNumMessagesIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 3: // COMPRESSION
if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) {
struct.compression = iprot.readByte();
struct.setCompressionIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 4: // CRC
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.crc = iprot.readI64();
struct.setCrcIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 5: // MESSAGES
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.messages = iprot.readBinary();
struct.setMessagesIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, TMessageSet struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.app != null) {
oprot.writeFieldBegin(APP_FIELD_DESC);
oprot.writeString(struct.app);
oprot.writeFieldEnd();
}
oprot.writeFieldBegin(NUM_MESSAGES_FIELD_DESC);
oprot.writeI32(struct.numMessages);
oprot.writeFieldEnd();
oprot.writeFieldBegin(COMPRESSION_FIELD_DESC);
oprot.writeByte(struct.compression);
oprot.writeFieldEnd();
oprot.writeFieldBegin(CRC_FIELD_DESC);
oprot.writeI64(struct.crc);
oprot.writeFieldEnd();
if (struct.messages != null) {
oprot.writeFieldBegin(MESSAGES_FIELD_DESC);
oprot.writeBinary(struct.messages);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class TMessageSetTupleSchemeFactory implements SchemeFactory {
public TMessageSetTupleScheme getScheme() {
return new TMessageSetTupleScheme();
}
}
private static class TMessageSetTupleScheme extends TupleScheme<TMessageSet> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, TMessageSet struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetApp()) {
optionals.set(0);
}
if (struct.isSetNumMessages()) {
optionals.set(1);
}
if (struct.isSetCompression()) {
optionals.set(2);
}
if (struct.isSetCrc()) {
optionals.set(3);
}
if (struct.isSetMessages()) {
optionals.set(4);
}
oprot.writeBitSet(optionals, 5);
if (struct.isSetApp()) {
oprot.writeString(struct.app);
}
if (struct.isSetNumMessages()) {
oprot.writeI32(struct.numMessages);
}
if (struct.isSetCompression()) {
oprot.writeByte(struct.compression);
}
if (struct.isSetCrc()) {
oprot.writeI64(struct.crc);
}
if (struct.isSetMessages()) {
oprot.writeBinary(struct.messages);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, TMessageSet struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(5);
if (incoming.get(0)) {
struct.app = iprot.readString();
struct.setAppIsSet(true);
}
if (incoming.get(1)) {
struct.numMessages = iprot.readI32();
struct.setNumMessagesIsSet(true);
}
if (incoming.get(2)) {
struct.compression = iprot.readByte();
struct.setCompressionIsSet(true);
}
if (incoming.get(3)) {
struct.crc = iprot.readI64();
struct.setCrcIsSet(true);
}
if (incoming.get(4)) {
struct.messages = iprot.readBinary();
struct.setMessagesIsSet(true);
}
}
}
}
| 1,337 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/thrift/SuroService.java
|
/**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.netflix.suro.thrift;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
public class SuroService {
public interface Iface {
public long shutdown() throws TException;
public String getName() throws TException;
public ServiceStatus getStatus() throws TException;
public String getVersion() throws TException;
}
public interface AsyncIface {
public void shutdown(AsyncMethodCallback resultHandler) throws TException;
public void getName(AsyncMethodCallback resultHandler) throws TException;
public void getStatus(AsyncMethodCallback resultHandler) throws TException;
public void getVersion(AsyncMethodCallback resultHandler) throws TException;
}
public static class Client extends org.apache.thrift.TServiceClient implements Iface {
public static class Factory implements org.apache.thrift.TServiceClientFactory<Client> {
public Factory() {}
public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
return new Client(prot);
}
public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
return new Client(iprot, oprot);
}
}
public Client(org.apache.thrift.protocol.TProtocol prot)
{
super(prot, prot);
}
public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
super(iprot, oprot);
}
public long shutdown() throws TException
{
send_shutdown();
return recv_shutdown();
}
public void send_shutdown() throws TException
{
shutdown_args args = new shutdown_args();
sendBase("shutdown", args);
}
public long recv_shutdown() throws TException
{
shutdown_result result = new shutdown_result();
receiveBase(result, "shutdown");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "shutdown failed: unknown result");
}
public String getName() throws TException
{
send_getName();
return recv_getName();
}
public void send_getName() throws TException
{
getName_args args = new getName_args();
sendBase("getName", args);
}
public String recv_getName() throws TException
{
getName_result result = new getName_result();
receiveBase(result, "getName");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getName failed: unknown result");
}
public ServiceStatus getStatus() throws TException
{
send_getStatus();
return recv_getStatus();
}
public void send_getStatus() throws TException
{
getStatus_args args = new getStatus_args();
sendBase("getStatus", args);
}
public ServiceStatus recv_getStatus() throws TException
{
getStatus_result result = new getStatus_result();
receiveBase(result, "getStatus");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getStatus failed: unknown result");
}
public String getVersion() throws TException
{
send_getVersion();
return recv_getVersion();
}
public void send_getVersion() throws TException
{
getVersion_args args = new getVersion_args();
sendBase("getVersion", args);
}
public String recv_getVersion() throws TException
{
getVersion_result result = new getVersion_result();
receiveBase(result, "getVersion");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getVersion failed: unknown result");
}
}
public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
private org.apache.thrift.async.TAsyncClientManager clientManager;
private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
this.clientManager = clientManager;
this.protocolFactory = protocolFactory;
}
public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
return new AsyncClient(protocolFactory, clientManager, transport);
}
}
public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
super(protocolFactory, clientManager, transport);
}
public void shutdown(AsyncMethodCallback resultHandler) throws TException {
checkReady();
shutdown_call method_call = new shutdown_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class shutdown_call extends org.apache.thrift.async.TAsyncMethodCall {
public shutdown_call(AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("shutdown", org.apache.thrift.protocol.TMessageType.CALL, 0));
shutdown_args args = new shutdown_args();
args.write(prot);
prot.writeMessageEnd();
}
public long getResult() throws TException {
if (getState() != State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_shutdown();
}
}
public void getName(AsyncMethodCallback resultHandler) throws TException {
checkReady();
getName_call method_call = new getName_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getName_call extends org.apache.thrift.async.TAsyncMethodCall {
public getName_call(AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getName", org.apache.thrift.protocol.TMessageType.CALL, 0));
getName_args args = new getName_args();
args.write(prot);
prot.writeMessageEnd();
}
public String getResult() throws TException {
if (getState() != State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getName();
}
}
public void getStatus(AsyncMethodCallback resultHandler) throws TException {
checkReady();
getStatus_call method_call = new getStatus_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getStatus_call extends org.apache.thrift.async.TAsyncMethodCall {
public getStatus_call(AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getStatus", org.apache.thrift.protocol.TMessageType.CALL, 0));
getStatus_args args = new getStatus_args();
args.write(prot);
prot.writeMessageEnd();
}
public ServiceStatus getResult() throws TException {
if (getState() != State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getStatus();
}
}
public void getVersion(AsyncMethodCallback resultHandler) throws TException {
checkReady();
getVersion_call method_call = new getVersion_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getVersion_call extends org.apache.thrift.async.TAsyncMethodCall {
public getVersion_call(AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getVersion", org.apache.thrift.protocol.TMessageType.CALL, 0));
getVersion_args args = new getVersion_args();
args.write(prot);
prot.writeMessageEnd();
}
public String getResult() throws TException {
if (getState() != State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getVersion();
}
}
}
public static class Processor<I extends Iface> extends org.apache.thrift.TBaseProcessor<I> implements org.apache.thrift.TProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName());
public Processor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>>()));
}
protected Processor(I iface, Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends Iface> Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> getProcessMap(Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
processMap.put("shutdown", new shutdown());
processMap.put("getName", new getName());
processMap.put("getStatus", new getStatus());
processMap.put("getVersion", new getVersion());
return processMap;
}
public static class shutdown<I extends Iface> extends org.apache.thrift.ProcessFunction<I, shutdown_args> {
public shutdown() {
super("shutdown");
}
public shutdown_args getEmptyArgsInstance() {
return new shutdown_args();
}
protected boolean isOneway() {
return false;
}
public shutdown_result getResult(I iface, shutdown_args args) throws TException {
shutdown_result result = new shutdown_result();
result.success = iface.shutdown();
result.setSuccessIsSet(true);
return result;
}
}
public static class getName<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getName_args> {
public getName() {
super("getName");
}
public getName_args getEmptyArgsInstance() {
return new getName_args();
}
protected boolean isOneway() {
return false;
}
public getName_result getResult(I iface, getName_args args) throws TException {
getName_result result = new getName_result();
result.success = iface.getName();
return result;
}
}
public static class getStatus<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getStatus_args> {
public getStatus() {
super("getStatus");
}
public getStatus_args getEmptyArgsInstance() {
return new getStatus_args();
}
protected boolean isOneway() {
return false;
}
public getStatus_result getResult(I iface, getStatus_args args) throws TException {
getStatus_result result = new getStatus_result();
result.success = iface.getStatus();
return result;
}
}
public static class getVersion<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getVersion_args> {
public getVersion() {
super("getVersion");
}
public getVersion_args getEmptyArgsInstance() {
return new getVersion_args();
}
protected boolean isOneway() {
return false;
}
public getVersion_result getResult(I iface, getVersion_args args) throws TException {
getVersion_result result = new getVersion_result();
result.success = iface.getVersion();
return result;
}
}
}
public static class AsyncProcessor<I extends AsyncIface> extends org.apache.thrift.TBaseAsyncProcessor<I> {
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName());
public AsyncProcessor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>>()));
}
protected AsyncProcessor(I iface, Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends AsyncIface> Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase,?>> getProcessMap(Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
processMap.put("shutdown", new shutdown());
processMap.put("getName", new getName());
processMap.put("getStatus", new getStatus());
processMap.put("getVersion", new getVersion());
return processMap;
}
public static class shutdown<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, shutdown_args, Long> {
public shutdown() {
super("shutdown");
}
public shutdown_args getEmptyArgsInstance() {
return new shutdown_args();
}
public AsyncMethodCallback<Long> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Long>() {
public void onComplete(Long o) {
shutdown_result result = new shutdown_result();
result.success = o;
result.setSuccessIsSet(true);
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
shutdown_result result = new shutdown_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, shutdown_args args, AsyncMethodCallback<Long> resultHandler) throws TException {
iface.shutdown(resultHandler);
}
}
public static class getName<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getName_args, String> {
public getName() {
super("getName");
}
public getName_args getEmptyArgsInstance() {
return new getName_args();
}
public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<String>() {
public void onComplete(String o) {
getName_result result = new getName_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getName_result result = new getName_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getName_args args, AsyncMethodCallback<String> resultHandler) throws TException {
iface.getName(resultHandler);
}
}
public static class getStatus<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getStatus_args, ServiceStatus> {
public getStatus() {
super("getStatus");
}
public getStatus_args getEmptyArgsInstance() {
return new getStatus_args();
}
public AsyncMethodCallback<ServiceStatus> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<ServiceStatus>() {
public void onComplete(ServiceStatus o) {
getStatus_result result = new getStatus_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getStatus_result result = new getStatus_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getStatus_args args, AsyncMethodCallback<ServiceStatus> resultHandler) throws TException {
iface.getStatus(resultHandler);
}
}
public static class getVersion<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getVersion_args, String> {
public getVersion() {
super("getVersion");
}
public getVersion_args getEmptyArgsInstance() {
return new getVersion_args();
}
public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<String>() {
public void onComplete(String o) {
getVersion_result result = new getVersion_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getVersion_result result = new getVersion_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getVersion_args args, AsyncMethodCallback<String> resultHandler) throws TException {
iface.getVersion(resultHandler);
}
}
}
public static class shutdown_args implements org.apache.thrift.TBase<shutdown_args, shutdown_args._Fields>, java.io.Serializable, Cloneable, Comparable<shutdown_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("shutdown_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new shutdown_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new shutdown_argsTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(shutdown_args.class, metaDataMap);
}
public shutdown_args() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public shutdown_args(shutdown_args other) {
}
public shutdown_args deepCopy() {
return new shutdown_args(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof shutdown_args)
return this.equals((shutdown_args)that);
return false;
}
public boolean equals(shutdown_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(shutdown_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("shutdown_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class shutdown_argsStandardSchemeFactory implements SchemeFactory {
public shutdown_argsStandardScheme getScheme() {
return new shutdown_argsStandardScheme();
}
}
private static class shutdown_argsStandardScheme extends StandardScheme<shutdown_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, shutdown_args struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, shutdown_args struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class shutdown_argsTupleSchemeFactory implements SchemeFactory {
public shutdown_argsTupleScheme getScheme() {
return new shutdown_argsTupleScheme();
}
}
private static class shutdown_argsTupleScheme extends TupleScheme<shutdown_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, shutdown_args struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, shutdown_args struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class shutdown_result implements org.apache.thrift.TBase<shutdown_result, shutdown_result._Fields>, java.io.Serializable, Cloneable, Comparable<shutdown_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("shutdown_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new shutdown_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new shutdown_resultTupleSchemeFactory());
}
public long success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __SUCCESS_ISSET_ID = 0;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(shutdown_result.class, metaDataMap);
}
public shutdown_result() {
}
public shutdown_result(
long success)
{
this();
this.success = success;
setSuccessIsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public shutdown_result(shutdown_result other) {
__isset_bitfield = other.__isset_bitfield;
this.success = other.success;
}
public shutdown_result deepCopy() {
return new shutdown_result(this);
}
@Override
public void clear() {
setSuccessIsSet(false);
this.success = 0;
}
public long getSuccess() {
return this.success;
}
public shutdown_result setSuccess(long success) {
this.success = success;
setSuccessIsSet(true);
return this;
}
public void unsetSuccess() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
}
public void setSuccessIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((Long)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return Long.valueOf(getSuccess());
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof shutdown_result)
return this.equals((shutdown_result)that);
return false;
}
public boolean equals(shutdown_result that) {
if (that == null)
return false;
boolean this_present_success = true;
boolean that_present_success = true;
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (this.success != that.success)
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(shutdown_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("shutdown_result(");
boolean first = true;
sb.append("success:");
sb.append(this.success);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class shutdown_resultStandardSchemeFactory implements SchemeFactory {
public shutdown_resultStandardScheme getScheme() {
return new shutdown_resultStandardScheme();
}
}
private static class shutdown_resultStandardScheme extends StandardScheme<shutdown_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, shutdown_result struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.success = iprot.readI64();
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, shutdown_result struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.isSetSuccess()) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeI64(struct.success);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class shutdown_resultTupleSchemeFactory implements SchemeFactory {
public shutdown_resultTupleScheme getScheme() {
return new shutdown_resultTupleScheme();
}
}
private static class shutdown_resultTupleScheme extends TupleScheme<shutdown_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, shutdown_result struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeI64(struct.success);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, shutdown_result struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = iprot.readI64();
struct.setSuccessIsSet(true);
}
}
}
}
public static class getName_args implements org.apache.thrift.TBase<getName_args, getName_args._Fields>, java.io.Serializable, Cloneable, Comparable<getName_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getName_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getName_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getName_argsTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getName_args.class, metaDataMap);
}
public getName_args() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getName_args(getName_args other) {
}
public getName_args deepCopy() {
return new getName_args(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getName_args)
return this.equals((getName_args)that);
return false;
}
public boolean equals(getName_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getName_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getName_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class getName_argsStandardSchemeFactory implements SchemeFactory {
public getName_argsStandardScheme getScheme() {
return new getName_argsStandardScheme();
}
}
private static class getName_argsStandardScheme extends StandardScheme<getName_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getName_args struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getName_args struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getName_argsTupleSchemeFactory implements SchemeFactory {
public getName_argsTupleScheme getScheme() {
return new getName_argsTupleScheme();
}
}
private static class getName_argsTupleScheme extends TupleScheme<getName_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getName_args struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getName_args struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class getName_result implements org.apache.thrift.TBase<getName_result, getName_result._Fields>, java.io.Serializable, Cloneable, Comparable<getName_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getName_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getName_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getName_resultTupleSchemeFactory());
}
public String success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getName_result.class, metaDataMap);
}
public getName_result() {
}
public getName_result(
String success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getName_result(getName_result other) {
if (other.isSetSuccess()) {
this.success = other.success;
}
}
public getName_result deepCopy() {
return new getName_result(this);
}
@Override
public void clear() {
this.success = null;
}
public String getSuccess() {
return this.success;
}
public getName_result setSuccess(String success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((String)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getName_result)
return this.equals((getName_result)that);
return false;
}
public boolean equals(getName_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getName_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getName_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class getName_resultStandardSchemeFactory implements SchemeFactory {
public getName_resultStandardScheme getScheme() {
return new getName_resultStandardScheme();
}
}
private static class getName_resultStandardScheme extends StandardScheme<getName_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getName_result struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.success = iprot.readString();
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getName_result struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeString(struct.success);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getName_resultTupleSchemeFactory implements SchemeFactory {
public getName_resultTupleScheme getScheme() {
return new getName_resultTupleScheme();
}
}
private static class getName_resultTupleScheme extends TupleScheme<getName_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getName_result struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeString(struct.success);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getName_result struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = iprot.readString();
struct.setSuccessIsSet(true);
}
}
}
}
public static class getStatus_args implements org.apache.thrift.TBase<getStatus_args, getStatus_args._Fields>, java.io.Serializable, Cloneable, Comparable<getStatus_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getStatus_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getStatus_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getStatus_argsTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getStatus_args.class, metaDataMap);
}
public getStatus_args() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getStatus_args(getStatus_args other) {
}
public getStatus_args deepCopy() {
return new getStatus_args(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getStatus_args)
return this.equals((getStatus_args)that);
return false;
}
public boolean equals(getStatus_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getStatus_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getStatus_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class getStatus_argsStandardSchemeFactory implements SchemeFactory {
public getStatus_argsStandardScheme getScheme() {
return new getStatus_argsStandardScheme();
}
}
private static class getStatus_argsStandardScheme extends StandardScheme<getStatus_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getStatus_args struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getStatus_args struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getStatus_argsTupleSchemeFactory implements SchemeFactory {
public getStatus_argsTupleScheme getScheme() {
return new getStatus_argsTupleScheme();
}
}
private static class getStatus_argsTupleScheme extends TupleScheme<getStatus_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getStatus_args struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getStatus_args struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class getStatus_result implements org.apache.thrift.TBase<getStatus_result, getStatus_result._Fields>, java.io.Serializable, Cloneable, Comparable<getStatus_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getStatus_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getStatus_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getStatus_resultTupleSchemeFactory());
}
/**
*
* @see com.netflix.suro.thrift.ServiceStatus
*/
public ServiceStatus success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
/**
*
* @see com.netflix.suro.thrift.ServiceStatus
*/
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ServiceStatus.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getStatus_result.class, metaDataMap);
}
public getStatus_result() {
}
public getStatus_result(
ServiceStatus success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getStatus_result(getStatus_result other) {
if (other.isSetSuccess()) {
this.success = other.success;
}
}
public getStatus_result deepCopy() {
return new getStatus_result(this);
}
@Override
public void clear() {
this.success = null;
}
/**
*
* @see com.netflix.suro.thrift.ServiceStatus
*/
public ServiceStatus getSuccess() {
return this.success;
}
/**
*
* @see com.netflix.suro.thrift.ServiceStatus
*/
public getStatus_result setSuccess(ServiceStatus success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((ServiceStatus)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getStatus_result)
return this.equals((getStatus_result)that);
return false;
}
public boolean equals(getStatus_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getStatus_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getStatus_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class getStatus_resultStandardSchemeFactory implements SchemeFactory {
public getStatus_resultStandardScheme getScheme() {
return new getStatus_resultStandardScheme();
}
}
private static class getStatus_resultStandardScheme extends StandardScheme<getStatus_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getStatus_result struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.success = ServiceStatus.findByValue(iprot.readI32());
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getStatus_result struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeI32(struct.success.getValue());
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getStatus_resultTupleSchemeFactory implements SchemeFactory {
public getStatus_resultTupleScheme getScheme() {
return new getStatus_resultTupleScheme();
}
}
private static class getStatus_resultTupleScheme extends TupleScheme<getStatus_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getStatus_result struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeI32(struct.success.getValue());
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getStatus_result struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = ServiceStatus.findByValue(iprot.readI32());
struct.setSuccessIsSet(true);
}
}
}
}
public static class getVersion_args implements org.apache.thrift.TBase<getVersion_args, getVersion_args._Fields>, java.io.Serializable, Cloneable, Comparable<getVersion_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getVersion_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getVersion_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getVersion_argsTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVersion_args.class, metaDataMap);
}
public getVersion_args() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getVersion_args(getVersion_args other) {
}
public getVersion_args deepCopy() {
return new getVersion_args(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getVersion_args)
return this.equals((getVersion_args)that);
return false;
}
public boolean equals(getVersion_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getVersion_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getVersion_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class getVersion_argsStandardSchemeFactory implements SchemeFactory {
public getVersion_argsStandardScheme getScheme() {
return new getVersion_argsStandardScheme();
}
}
private static class getVersion_argsStandardScheme extends StandardScheme<getVersion_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getVersion_args struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getVersion_args struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getVersion_argsTupleSchemeFactory implements SchemeFactory {
public getVersion_argsTupleScheme getScheme() {
return new getVersion_argsTupleScheme();
}
}
private static class getVersion_argsTupleScheme extends TupleScheme<getVersion_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getVersion_args struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getVersion_args struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class getVersion_result implements org.apache.thrift.TBase<getVersion_result, getVersion_result._Fields>, java.io.Serializable, Cloneable, Comparable<getVersion_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getVersion_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getVersion_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getVersion_resultTupleSchemeFactory());
}
public String success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVersion_result.class, metaDataMap);
}
public getVersion_result() {
}
public getVersion_result(
String success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getVersion_result(getVersion_result other) {
if (other.isSetSuccess()) {
this.success = other.success;
}
}
public getVersion_result deepCopy() {
return new getVersion_result(this);
}
@Override
public void clear() {
this.success = null;
}
public String getSuccess() {
return this.success;
}
public getVersion_result setSuccess(String success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((String)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getVersion_result)
return this.equals((getVersion_result)that);
return false;
}
public boolean equals(getVersion_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getVersion_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getVersion_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class getVersion_resultStandardSchemeFactory implements SchemeFactory {
public getVersion_resultStandardScheme getScheme() {
return new getVersion_resultStandardScheme();
}
}
private static class getVersion_resultStandardScheme extends StandardScheme<getVersion_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getVersion_result struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.success = iprot.readString();
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getVersion_result struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeString(struct.success);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getVersion_resultTupleSchemeFactory implements SchemeFactory {
public getVersion_resultTupleScheme getScheme() {
return new getVersion_resultTupleScheme();
}
}
private static class getVersion_resultTupleScheme extends TupleScheme<getVersion_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getVersion_result struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeString(struct.success);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getVersion_result struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = iprot.readString();
struct.setSuccessIsSet(true);
}
}
}
}
}
| 1,338 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/thrift/ServiceStatus.java
|
/**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.netflix.suro.thrift;
import java.util.Map;
import java.util.HashMap;
import org.apache.thrift.TEnum;
public enum ServiceStatus implements TEnum {
DEAD(0),
STARTING(1),
ALIVE(2),
STOPPING(3),
STOPPED(4),
WARNING(5);
private final int value;
private ServiceStatus(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
* @return null if the value is not found.
*/
public static ServiceStatus findByValue(int value) {
switch (value) {
case 0:
return DEAD;
case 1:
return STARTING;
case 2:
return ALIVE;
case 3:
return STOPPING;
case 4:
return STOPPED;
case 5:
return WARNING;
default:
return null;
}
}
}
| 1,339 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/thrift/ResultCode.java
|
/**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.netflix.suro.thrift;
import java.util.Map;
import java.util.HashMap;
import org.apache.thrift.TEnum;
public enum ResultCode implements TEnum {
OK(0),
STOPPED(1),
CRC_CORRUPTED(2),
QUEUE_FULL(3),
OTHER_ERROR(4);
private final int value;
private ResultCode(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
* @return null if the value is not found.
*/
public static ResultCode findByValue(int value) {
switch (value) {
case 0:
return OK;
case 1:
return STOPPED;
case 2:
return CRC_CORRUPTED;
case 3:
return QUEUE_FULL;
case 4:
return OTHER_ERROR;
default:
return null;
}
}
}
| 1,340 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/thrift/SuroServer.java
|
/**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.netflix.suro.thrift;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
public class SuroServer {
public interface Iface extends SuroService.Iface {
public Result process(TMessageSet messageSet) throws TException;
}
public interface AsyncIface extends SuroService .AsyncIface {
public void process(TMessageSet messageSet, AsyncMethodCallback resultHandler) throws TException;
}
public static class Client extends SuroService.Client implements Iface {
public static class Factory implements org.apache.thrift.TServiceClientFactory<Client> {
public Factory() {}
public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
return new Client(prot);
}
public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
return new Client(iprot, oprot);
}
}
public Client(org.apache.thrift.protocol.TProtocol prot)
{
super(prot, prot);
}
public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
super(iprot, oprot);
}
public Result process(TMessageSet messageSet) throws TException
{
send_process(messageSet);
return recv_process();
}
public void send_process(TMessageSet messageSet) throws TException
{
process_args args = new process_args();
args.setMessageSet(messageSet);
sendBase("process", args);
}
public Result recv_process() throws TException
{
process_result result = new process_result();
receiveBase(result, "process");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "process failed: unknown result");
}
}
public static class AsyncClient extends SuroService.AsyncClient implements AsyncIface {
public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
private org.apache.thrift.async.TAsyncClientManager clientManager;
private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
this.clientManager = clientManager;
this.protocolFactory = protocolFactory;
}
public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
return new AsyncClient(protocolFactory, clientManager, transport);
}
}
public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
super(protocolFactory, clientManager, transport);
}
public void process(TMessageSet messageSet, AsyncMethodCallback resultHandler) throws TException {
checkReady();
process_call method_call = new process_call(messageSet, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class process_call extends org.apache.thrift.async.TAsyncMethodCall {
private TMessageSet messageSet;
public process_call(TMessageSet messageSet, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
super(client, protocolFactory, transport, resultHandler, false);
this.messageSet = messageSet;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("process", org.apache.thrift.protocol.TMessageType.CALL, 0));
process_args args = new process_args();
args.setMessageSet(messageSet);
args.write(prot);
prot.writeMessageEnd();
}
public Result getResult() throws TException {
if (getState() != State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_process();
}
}
}
public static class Processor<I extends Iface> extends SuroService.Processor<I> implements org.apache.thrift.TProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName());
public Processor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>>()));
}
protected Processor(I iface, Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends Iface> Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> getProcessMap(Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
processMap.put("process", new process());
return processMap;
}
public static class process<I extends Iface> extends org.apache.thrift.ProcessFunction<I, process_args> {
public process() {
super("process");
}
public process_args getEmptyArgsInstance() {
return new process_args();
}
protected boolean isOneway() {
return false;
}
public process_result getResult(I iface, process_args args) throws TException {
process_result result = new process_result();
result.success = iface.process(args.messageSet);
return result;
}
}
}
public static class AsyncProcessor<I extends AsyncIface> extends SuroService.AsyncProcessor<I> {
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName());
public AsyncProcessor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>>()));
}
protected AsyncProcessor(I iface, Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends AsyncIface> Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase,?>> getProcessMap(Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
processMap.put("process", new process());
return processMap;
}
public static class process<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, process_args, Result> {
public process() {
super("process");
}
public process_args getEmptyArgsInstance() {
return new process_args();
}
public AsyncMethodCallback<Result> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Result>() {
public void onComplete(Result o) {
process_result result = new process_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
process_result result = new process_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, process_args args, AsyncMethodCallback<Result> resultHandler) throws TException {
iface.process(args.messageSet,resultHandler);
}
}
}
public static class process_args implements org.apache.thrift.TBase<process_args, process_args._Fields>, java.io.Serializable, Cloneable, Comparable<process_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("process_args");
private static final org.apache.thrift.protocol.TField MESSAGE_SET_FIELD_DESC = new org.apache.thrift.protocol.TField("messageSet", org.apache.thrift.protocol.TType.STRUCT, (short)1);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new process_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new process_argsTupleSchemeFactory());
}
public TMessageSet messageSet; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
MESSAGE_SET((short)1, "messageSet");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // MESSAGE_SET
return MESSAGE_SET;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.MESSAGE_SET, new org.apache.thrift.meta_data.FieldMetaData("messageSet", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TMessageSet.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(process_args.class, metaDataMap);
}
public process_args() {
}
public process_args(
TMessageSet messageSet)
{
this();
this.messageSet = messageSet;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public process_args(process_args other) {
if (other.isSetMessageSet()) {
this.messageSet = new TMessageSet(other.messageSet);
}
}
public process_args deepCopy() {
return new process_args(this);
}
@Override
public void clear() {
this.messageSet = null;
}
public TMessageSet getMessageSet() {
return this.messageSet;
}
public process_args setMessageSet(TMessageSet messageSet) {
this.messageSet = messageSet;
return this;
}
public void unsetMessageSet() {
this.messageSet = null;
}
/** Returns true if field messageSet is set (has been assigned a value) and false otherwise */
public boolean isSetMessageSet() {
return this.messageSet != null;
}
public void setMessageSetIsSet(boolean value) {
if (!value) {
this.messageSet = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case MESSAGE_SET:
if (value == null) {
unsetMessageSet();
} else {
setMessageSet((TMessageSet)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case MESSAGE_SET:
return getMessageSet();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case MESSAGE_SET:
return isSetMessageSet();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof process_args)
return this.equals((process_args)that);
return false;
}
public boolean equals(process_args that) {
if (that == null)
return false;
boolean this_present_messageSet = true && this.isSetMessageSet();
boolean that_present_messageSet = true && that.isSetMessageSet();
if (this_present_messageSet || that_present_messageSet) {
if (!(this_present_messageSet && that_present_messageSet))
return false;
if (!this.messageSet.equals(that.messageSet))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(process_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetMessageSet()).compareTo(other.isSetMessageSet());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMessageSet()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.messageSet, other.messageSet);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("process_args(");
boolean first = true;
sb.append("messageSet:");
if (this.messageSet == null) {
sb.append("null");
} else {
sb.append(this.messageSet);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
if (messageSet != null) {
messageSet.validate();
}
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class process_argsStandardSchemeFactory implements SchemeFactory {
public process_argsStandardScheme getScheme() {
return new process_argsStandardScheme();
}
}
private static class process_argsStandardScheme extends StandardScheme<process_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, process_args struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // MESSAGE_SET
if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
struct.messageSet = new TMessageSet();
struct.messageSet.read(iprot);
struct.setMessageSetIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, process_args struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.messageSet != null) {
oprot.writeFieldBegin(MESSAGE_SET_FIELD_DESC);
struct.messageSet.write(oprot);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class process_argsTupleSchemeFactory implements SchemeFactory {
public process_argsTupleScheme getScheme() {
return new process_argsTupleScheme();
}
}
private static class process_argsTupleScheme extends TupleScheme<process_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, process_args struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetMessageSet()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetMessageSet()) {
struct.messageSet.write(oprot);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, process_args struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.messageSet = new TMessageSet();
struct.messageSet.read(iprot);
struct.setMessageSetIsSet(true);
}
}
}
}
public static class process_result implements org.apache.thrift.TBase<process_result, process_result._Fields>, java.io.Serializable, Cloneable, Comparable<process_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("process_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new process_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new process_resultTupleSchemeFactory());
}
public Result success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Result.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(process_result.class, metaDataMap);
}
public process_result() {
}
public process_result(
Result success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public process_result(process_result other) {
if (other.isSetSuccess()) {
this.success = new Result(other.success);
}
}
public process_result deepCopy() {
return new process_result(this);
}
@Override
public void clear() {
this.success = null;
}
public Result getSuccess() {
return this.success;
}
public process_result setSuccess(Result success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((Result)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof process_result)
return this.equals((process_result)that);
return false;
}
public boolean equals(process_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(process_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("process_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
if (success != null) {
success.validate();
}
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class process_resultStandardSchemeFactory implements SchemeFactory {
public process_resultStandardScheme getScheme() {
return new process_resultStandardScheme();
}
}
private static class process_resultStandardScheme extends StandardScheme<process_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, process_result struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
struct.success = new Result();
struct.success.read(iprot);
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, process_result struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
struct.success.write(oprot);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class process_resultTupleSchemeFactory implements SchemeFactory {
public process_resultTupleScheme getScheme() {
return new process_resultTupleScheme();
}
}
private static class process_resultTupleScheme extends TupleScheme<process_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, process_result struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
struct.success.write(oprot);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, process_result struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = new Result();
struct.success.read(iprot);
struct.setSuccessIsSet(true);
}
}
}
}
}
| 1,341 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/thrift/Result.java
|
/**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.netflix.suro.thrift;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import java.util.*;
public class Result implements org.apache.thrift.TBase<Result, Result._Fields>, java.io.Serializable, Cloneable, Comparable<Result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Result");
private static final org.apache.thrift.protocol.TField RESULT_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("resultCode", org.apache.thrift.protocol.TType.I32, (short)1);
private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new ResultStandardSchemeFactory());
schemes.put(TupleScheme.class, new ResultTupleSchemeFactory());
}
/**
*
* @see com.netflix.suro.thrift.ResultCode
*/
public ResultCode resultCode; // required
public String message; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
/**
*
* @see com.netflix.suro.thrift.ResultCode
*/
RESULT_CODE((short)1, "resultCode"),
MESSAGE((short)2, "message");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // RESULT_CODE
return RESULT_CODE;
case 2: // MESSAGE
return MESSAGE;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.RESULT_CODE, new org.apache.thrift.meta_data.FieldMetaData("resultCode", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ResultCode.class)));
tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Result.class, metaDataMap);
}
public Result() {
}
public Result(
ResultCode resultCode,
String message)
{
this();
this.resultCode = resultCode;
this.message = message;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public Result(Result other) {
if (other.isSetResultCode()) {
this.resultCode = other.resultCode;
}
if (other.isSetMessage()) {
this.message = other.message;
}
}
public Result deepCopy() {
return new Result(this);
}
@Override
public void clear() {
this.resultCode = null;
this.message = null;
}
/**
*
* @see com.netflix.suro.thrift.ResultCode
*/
public ResultCode getResultCode() {
return this.resultCode;
}
/**
*
* @see com.netflix.suro.thrift.ResultCode
*/
public Result setResultCode(ResultCode resultCode) {
this.resultCode = resultCode;
return this;
}
public void unsetResultCode() {
this.resultCode = null;
}
/** Returns true if field resultCode is set (has been assigned a value) and false otherwise */
public boolean isSetResultCode() {
return this.resultCode != null;
}
public void setResultCodeIsSet(boolean value) {
if (!value) {
this.resultCode = null;
}
}
public String getMessage() {
return this.message;
}
public Result setMessage(String message) {
this.message = message;
return this;
}
public void unsetMessage() {
this.message = null;
}
/** Returns true if field message is set (has been assigned a value) and false otherwise */
public boolean isSetMessage() {
return this.message != null;
}
public void setMessageIsSet(boolean value) {
if (!value) {
this.message = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case RESULT_CODE:
if (value == null) {
unsetResultCode();
} else {
setResultCode((ResultCode)value);
}
break;
case MESSAGE:
if (value == null) {
unsetMessage();
} else {
setMessage((String)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case RESULT_CODE:
return getResultCode();
case MESSAGE:
return getMessage();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case RESULT_CODE:
return isSetResultCode();
case MESSAGE:
return isSetMessage();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof Result)
return this.equals((Result)that);
return false;
}
public boolean equals(Result that) {
if (that == null)
return false;
boolean this_present_resultCode = true && this.isSetResultCode();
boolean that_present_resultCode = true && that.isSetResultCode();
if (this_present_resultCode || that_present_resultCode) {
if (!(this_present_resultCode && that_present_resultCode))
return false;
if (!this.resultCode.equals(that.resultCode))
return false;
}
boolean this_present_message = true && this.isSetMessage();
boolean that_present_message = true && that.isSetMessage();
if (this_present_message || that_present_message) {
if (!(this_present_message && that_present_message))
return false;
if (!this.message.equals(that.message))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(Result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetResultCode()).compareTo(other.isSetResultCode());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetResultCode()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resultCode, other.resultCode);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMessage()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Result(");
boolean first = true;
sb.append("resultCode:");
if (this.resultCode == null) {
sb.append("null");
} else {
sb.append(this.resultCode);
}
first = false;
if (!first) sb.append(", ");
sb.append("message:");
if (this.message == null) {
sb.append("null");
} else {
sb.append(this.message);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (TException te) {
throw new java.io.IOException(te);
}
}
private static class ResultStandardSchemeFactory implements SchemeFactory {
public ResultStandardScheme getScheme() {
return new ResultStandardScheme();
}
}
private static class ResultStandardScheme extends StandardScheme<Result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, Result struct) throws TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // RESULT_CODE
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.resultCode = ResultCode.findByValue(iprot.readI32());
struct.setResultCodeIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // MESSAGE
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.message = iprot.readString();
struct.setMessageIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, Result struct) throws TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.resultCode != null) {
oprot.writeFieldBegin(RESULT_CODE_FIELD_DESC);
oprot.writeI32(struct.resultCode.getValue());
oprot.writeFieldEnd();
}
if (struct.message != null) {
oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
oprot.writeString(struct.message);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class ResultTupleSchemeFactory implements SchemeFactory {
public ResultTupleScheme getScheme() {
return new ResultTupleScheme();
}
}
private static class ResultTupleScheme extends TupleScheme<Result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, Result struct) throws TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetResultCode()) {
optionals.set(0);
}
if (struct.isSetMessage()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetResultCode()) {
oprot.writeI32(struct.resultCode.getValue());
}
if (struct.isSetMessage()) {
oprot.writeString(struct.message);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Result struct) throws TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.resultCode = ResultCode.findByValue(iprot.readI32());
struct.setResultCodeIsSet(true);
}
if (incoming.get(1)) {
struct.message = iprot.readString();
struct.setMessageIsSet(true);
}
}
}
}
| 1,342 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/SuroClient.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.client.async.AsyncSuroClient;
import com.netflix.suro.message.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Properties;
/**
* Wrapped implementation of {@link ISuroClient}.
* Depending on its configuration, it will select either {@link SyncSuroClient}
* or {@link AsyncSuroClient}
* @author jbae
*/
public class SuroClient implements ISuroClient {
private static final Logger log = LoggerFactory.getLogger(SuroClient.class);
private final ISuroClient client;
public SuroClient(Properties properties) {
createInjector(properties);
client = injector.getInstance(ISuroClient.class);
}
public void shutdown() {
injector.getInstance(LifecycleManager.class).close();
}
private Injector injector;
private Injector createInjector(final Properties properties) {
injector = LifecycleInjector
.builder()
.withBootstrapModule(
new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(
new PropertiesConfigurationProvider(properties));
}
}
)
.withModules(new SuroClientModule())
.build().createInjector();
LifecycleManager manager = injector.getInstance(LifecycleManager.class);
try {
manager.start();
} catch (Exception e) {
throw new RuntimeException("LifecycleManager cannot start with an exception: " + e.getMessage(), e);
}
return injector;
}
@Override
public void send(Message message) {
client.send(message);
}
@Override
public long getSentMessageCount() {
return client.getSentMessageCount();
}
@Override
public long getLostMessageCount() {
return client.getLostMessageCount();
}
@Override
public long getNumOfPendingMessages() {
return client.getNumOfPendingMessages();
}
public ClientConfig getConfig() {
return injector.getInstance(ClientConfig.class);
}
}
| 1,343 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/SyncSuroClient.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.TagKey;
import com.netflix.suro.client.async.AsyncSuroClient;
import com.netflix.suro.connection.ConnectionPool;
import com.netflix.suro.message.Compression;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageSetBuilder;
import com.netflix.suro.message.MessageSetReader;
import com.netflix.suro.thrift.Result;
import com.netflix.suro.thrift.ResultCode;
import com.netflix.suro.thrift.TMessageSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
/**
* Synchronous {@link ISuroClient} implementation
* Sends a message or TMessageSet one by one
* @author jbae
*/
public class SyncSuroClient implements ISuroClient {
private static final Logger log = LoggerFactory.getLogger(SyncSuroClient.class);
private final ClientConfig config;
private final ConnectionPool connectionPool;
private final Compression compression;
@Inject
public SyncSuroClient(ClientConfig config, ConnectionPool connectionPool) {
this.config = config;
this.connectionPool = connectionPool;
this.compression = Compression.create(config.getCompression());
Monitors.registerObject(this);
}
private AtomicLong sentMessageCount = new AtomicLong(0);
@Override
public long getSentMessageCount() {
return sentMessageCount.get();
}
private AtomicLong lostMessageCount = new AtomicLong(0);
@Override
public long getLostMessageCount() {
return lostMessageCount.get();
}
@Override
public long getNumOfPendingMessages() {
return 0;
}
private AtomicLong retriedCount = new AtomicLong(0);
public long getRetriedCount() {
return retriedCount.get();
}
@Monitor(name = "senderExceptionCount", type = DataSourceType.COUNTER)
private AtomicLong senderExceptionCount = new AtomicLong(0);
@Override
public void send(Message message) {
send(new MessageSetBuilder(config)
.withCompression(compression)
.withMessage(message.getRoutingKey(), message.getPayload()).build());
}
private List<AsyncSuroClient.Listener> emptyList = Lists.newArrayList();
public boolean send(TMessageSet messageSet) {
if (messageSet == null) {
return false;
}
boolean sent = false;
boolean retried = false;
for (int i = 0; i < config.getRetryCount(); ++i) {
ConnectionPool.SuroConnection connection = connectionPool.chooseConnection();
if (connection == null) {
continue;
}
try {
Result result = connection.send(messageSet);
if (result != null && result.getResultCode() == ResultCode.OK && result.isSetMessage()) {
sent = true;
connectionPool.endConnection(connection);
retried = i > 0;
break;
} else {
log.error("Server is not stable: " + connection.getServer().toString());
connectionPool.markServerDown(connection);
try { Thread.sleep(Math.min(i + 1, 5) * 100); } catch (InterruptedException e) {} // ignore an exception
}
} catch (Exception e) {
log.error("Exception in send: " + e.getMessage(), e);
connectionPool.markServerDown(connection);
}
}
MessageSetReader reader = new MessageSetReader(messageSet);
if (sent) {
sentMessageCount.addAndGet(incrementMessageCount(TagKey.SENT_COUNT, config.getApp(), reader, emptyList));
if (retried) {
retriedCount.incrementAndGet();
}
} else {
lostMessageCount.addAndGet(incrementMessageCount(TagKey.LOST_COUNT, config.getApp(), reader, emptyList));
}
return sent;
}
public static int incrementMessageCount(String counterName, String app, Iterable<Message> messages, List<AsyncSuroClient.Listener> listeners) {
int count = 0;
for (Message message : messages) {
DynamicCounter.increment(
MonitorConfig.builder(counterName)
.withTag(TagKey.APP, app)
.withTag(TagKey.DATA_SOURCE, message.getRoutingKey())
.build());
++count;
}
for (AsyncSuroClient.Listener listener : listeners) {
listener.sentCallback(count);
}
return count;
}
}
| 1,344 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/ConfigBasedSuroClientProvider.java
|
package com.netflix.suro.client;
import java.util.Map;
import javax.inject.Inject;
import com.google.inject.Provider;
import com.netflix.governator.guice.lazy.LazySingleton;
import com.netflix.suro.ClientConfig;
/**
* Configuration base {@link ISuroClient} provider that will create an {@link ISuroClient}
* implementation based on the value of {@link ClienConfig.getClientType()}.
*
* To add a new client type implementation see {@link SuroClientModule}
*
* @author elandau
*
*/
@LazySingleton
public class ConfigBasedSuroClientProvider implements Provider<ISuroClient> {
private final Provider<ClientConfig> configProvider;
private final Map<String, Provider<ISuroClient>> clientImpls;
private static final String DEFAULT_CLIENT_TYPE = "sync";
@Inject
public ConfigBasedSuroClientProvider(Provider<ClientConfig> configProvider, Map<String, Provider<ISuroClient>> clientImpls) {
this.configProvider = configProvider;
this.clientImpls = clientImpls;
}
@Override
public ISuroClient get() {
// Load the singleton ClientConfig lazily
ClientConfig config = configProvider.get();
if (config.getClientType() != null) {
if (!clientImpls.containsKey(config.getClientType())) {
throw new RuntimeException(
String.format("Unknown client type '%s'. Expecting one of %s",
config.getClientType(),
clientImpls.keySet()));
}
return clientImpls.get(config.getClientType()).get();
}
else {
return clientImpls.get(DEFAULT_CLIENT_TYPE).get();
}
}
}
| 1,345 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/ConfigBasedLoadBalancerProvider.java
|
package com.netflix.suro.client;
import java.util.Map;
import javax.inject.Inject;
import com.google.inject.Provider;
import com.netflix.governator.guice.lazy.LazySingleton;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.suro.ClientConfig;
/**
* Configuration based {@link ILoadBalancer} provider that will create an {@link ILoadBalancer}
* implementation based on the value of {@link ClienConfig.getLoadBalancerType()}.
*
* To add a new ILoadBalancer type implementation see {@link SuroClientModule}
*
* @author elandau
*
*/
@LazySingleton
public class ConfigBasedLoadBalancerProvider implements Provider<ILoadBalancer> {
private final Provider<ClientConfig> config;
private final Map<String, Provider<ILoadBalancer>> impls;
private static final String DEFAULT_LOAD_BALANCER_TYPE = "static";
@Inject
public ConfigBasedLoadBalancerProvider(Provider<ClientConfig> configProvider, Map<String, Provider<ILoadBalancer>> impls) {
this.config = configProvider;
this.impls = impls;
}
@Override
public ILoadBalancer get() {
// Load the singleton ClientConfig lazily
ClientConfig config = this.config.get();
if (config.getLoadBalancerType() != null) {
if (!impls.containsKey(config.getLoadBalancerType())) {
throw new RuntimeException(
String.format("Unknown load balancer type '%s'. Expecting one of %s",
config.getLoadBalancerType(),
impls.keySet()));
}
return impls.get(config.getLoadBalancerType()).get();
}
else {
return impls.get(DEFAULT_LOAD_BALANCER_TYPE).get();
}
}
}
| 1,346 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/ISuroClient.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client;
import com.netflix.suro.message.Message;
/**
* Interface for SuroClient
* @author jbae
*/
public interface ISuroClient {
void send(Message message);
long getSentMessageCount();
long getLostMessageCount();
long getNumOfPendingMessages();
}
| 1,347 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/SuroClientModule.java
|
package com.netflix.suro.client;
import javax.inject.Singleton;
import com.google.inject.AbstractModule;
import com.google.inject.multibindings.MapBinder;
import com.netflix.governator.guice.lazy.LazySingletonScope;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.suro.client.async.AsyncSuroClient;
import com.netflix.suro.connection.EurekaLoadBalancer;
import com.netflix.suro.connection.StaticLoadBalancer;
@Singleton
public class SuroClientModule extends AbstractModule {
@Override
protected void configure() {
MapBinder<String, ILoadBalancer> loadBalancers = MapBinder.newMapBinder(binder(), String.class, ILoadBalancer.class);
loadBalancers.addBinding("eureka").to(EurekaLoadBalancer.class);
loadBalancers.addBinding("static").to(StaticLoadBalancer.class);
MapBinder<String, ISuroClient> clientImpls = MapBinder.newMapBinder(binder(), String.class, ISuroClient.class);
clientImpls.addBinding("async").to(AsyncSuroClient.class).in(LazySingletonScope.get());
clientImpls.addBinding("sync").to(SyncSuroClient.class).in(LazySingletonScope.get());
bind(ISuroClient.class).toProvider(ConfigBasedSuroClientProvider.class);
bind(ILoadBalancer.class).toProvider(ConfigBasedLoadBalancerProvider.class);
}
}
| 1,348 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/example/SuroClient4Test.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client.example;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.client.SuroClient;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.Message;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
public class SuroClient4Test {
public static void main(String[] args) throws JsonProcessingException, InterruptedException {
// ip num_of_messages message_size sleep num_of_iterations
String ip = args[0];
int numMessages = Integer.parseInt(args[1]);
int messageSize = Integer.parseInt(args[2]);
int sleep = Integer.parseInt(args[3]);
int numIterations = Integer.parseInt(args[4]);
Properties props = new Properties();
props.setProperty(ClientConfig.LB_TYPE, "static");
props.setProperty(ClientConfig.LB_SERVER, ip);
SuroClient client = new SuroClient(props);
byte[] payload = createMessagePayload(messageSize);
for (int n = 0; n < numIterations; ++n) {
for (int i = 0; i < numMessages; ++i) {
client.send(new Message(i % 2 == 0 ? "request_trace" : "nf_errors_log", payload));
}
Thread.sleep(sleep);
}
client.shutdown();
}
public static byte[] createMessagePayload(int length) throws JsonProcessingException {
Map<String, Object> map = new HashMap<String, Object>();
int currentLength = 0;
int index = 0;
while (currentLength < length) {
String key = "f" + index;
String value = "v" + index;
map.put(key, value);
currentLength += key.length() + value.length();
++index;
}
return new DefaultObjectMapper().writeValueAsBytes(map);
}
}
| 1,349 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/example/ExampleClient.java
|
package com.netflix.suro.client.example;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.client.SuroClient;
import com.netflix.suro.message.Message;
import java.util.Properties;
public class ExampleClient {
public static void main(String[] args) {
// create the client
final Properties clientProperties = new Properties();
clientProperties.setProperty(ClientConfig.LB_TYPE, "static");
clientProperties.setProperty(ClientConfig.LB_SERVER, args[0]);
clientProperties.setProperty(ClientConfig.CLIENT_TYPE, args[1]);
SuroClient client = new SuroClient(clientProperties);
// send the message
for (int i = 0; i < Integer.parseInt(args[2]); ++i) {
client.send(new Message("routingKey", "testMessage".getBytes()));
}
client.shutdown();
}
}
| 1,350 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/async/AsyncSuroClient.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client.async;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.inject.Inject;
import com.netflix.config.DynamicIntProperty;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.TagKey;
import com.netflix.suro.client.ISuroClient;
import com.netflix.suro.client.SyncSuroClient;
import com.netflix.suro.connection.ConnectionPool;
import com.netflix.suro.message.Compression;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageSetBuilder;
import com.netflix.suro.message.MessageSetReader;
import com.netflix.suro.thrift.TMessageSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PreDestroy;
import java.util.List;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
/**
* Asynchronous {@link ISuroClient} implementation. This client puts messages into a queue first, and has a thread send
* out events off the queue asynchronously.
*/
public class AsyncSuroClient implements ISuroClient {
private static final Logger log = LoggerFactory.getLogger(AsyncSuroClient.class);
private final ClientConfig config;
private final ConnectionPool connectionPool;
private final Queue4Client messageQueue;
private final BlockingQueue<Runnable> jobQueue;
private final ThreadPoolExecutor senders;
private final MessageSetBuilder builder;
@Monitor(name = TagKey.LOST_COUNT, type = DataSourceType.COUNTER)
private AtomicLong lostMessages = new AtomicLong(0);
@Override
public long getLostMessageCount() {
return lostMessages.get();
}
@Monitor(name = "MessageQueueSize", type = DataSourceType.GAUGE)
@Override
public long getNumOfPendingMessages() {
return messageQueue.size();
}
private AtomicLong sentMessages = new AtomicLong(0);
@Override
public long getSentMessageCount() {
return sentMessages.get();
}
@Monitor(name = TagKey.RESTORED_COUNT, type = DataSourceType.COUNTER)
private AtomicLong restoredMessages = new AtomicLong(0);
public long getRestoredMessageCount() {
return restoredMessages.get();
}
@Monitor(name = TagKey.RETRIED_COUNT, type = DataSourceType.COUNTER)
private AtomicLong retriedCount = new AtomicLong(0);
public long getRetriedCount() {
return retriedCount.get();
}
private ExecutorService poller = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setNameFormat("AsyncSuroClientPoller-%d").build());
public static final String asyncRateLimitConfig = "SuroClient.asyncRateLimit";
private final DynamicIntProperty rateLimitMsgPerSec = new DynamicIntProperty(asyncRateLimitConfig, Integer.MAX_VALUE) {
@Override
protected void propertyChanged() {
rateLimiter.setMsgPerSec(get());
}
};
private final RateLimiter rateLimiter;
@Inject
public AsyncSuroClient(
ClientConfig config,
Queue4Client messageQueue,
ConnectionPool connectionPool) {
this.config = config;
this.messageQueue = messageQueue;
this.connectionPool = connectionPool;
this.builder = new MessageSetBuilder(config)
.withCompression(Compression.create(config.getCompression()));
poller.execute(createPoller());
jobQueue = new ArrayBlockingQueue<Runnable>(config.getAsyncJobQueueCapacity())
{
@Override
public boolean offer(Runnable runnable) {
try {
put(runnable); // not to reject the task, slowing down
} catch (InterruptedException e) {
// do nothing
}
return true;
}
}
;
senders = new ThreadPoolExecutor(
config.getAsyncSenderThreads(), config.getAsyncSenderThreads(),
10, TimeUnit.SECONDS,
jobQueue,
new RejectedExecutionHandler() {
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
TMessageSet messageSet = ((AsyncSuroSender) r).getMessageSet();
for (Message m : new MessageSetReader(messageSet)) {
restore(m);
}
}
});
rateLimiter = new RateLimiter(rateLimitMsgPerSec.get());
Monitors.registerObject(this);
}
@Override
public void send(Message message) {
if (!messageQueue.offer(message)) {
lostMessages.incrementAndGet();
DynamicCounter.increment(
MonitorConfig.builder(TagKey.LOST_COUNT)
.withTag(TagKey.APP, config.getApp())
.withTag(TagKey.DATA_SOURCE, message.getRoutingKey())
.build());
for (Listener listener : listeners) {
listener.lostCallback(1);
}
}
}
public void restore(Message message) {
restoredMessages.incrementAndGet();
DynamicCounter.increment(
MonitorConfig.builder(TagKey.RESTORED_COUNT)
.withTag(TagKey.APP, config.getApp())
.withTag(TagKey.DATA_SOURCE, message.getRoutingKey())
.build());
for (Listener listener : listeners) {
listener.restoredCallback();
}
send(message);
}
@VisibleForTesting
protected long queuedMessageSetCount = 0;
private boolean running;
private long lastBatch;
private Runnable createPoller() {
running = true;
final AsyncSuroClient client = this;
return new Runnable() {
@Override
public void run() {
while (running || !messageQueue.isEmpty()) {
try {
Message msg = messageQueue.poll(
Math.max(0, lastBatch + config.getAsyncTimeout() - System.currentTimeMillis()),
TimeUnit.MILLISECONDS);
boolean expired = (msg == null);
if (!expired) {
builder.withMessage(msg.getRoutingKey(), msg.getPayload());
builder.drainFrom(messageQueue, config.getAsyncBatchSize() - builder.size());
}
boolean full = (builder.size() >= config.getAsyncBatchSize());
if ((expired || full) && builder.size() > 0) {
lastBatch = System.currentTimeMillis();
rateLimiter.pause(builder.size());
senders.execute(new AsyncSuroSender(builder.build(), client, config));
++queuedMessageSetCount;
} else if (builder.size() == 0) {
Thread.sleep(config.getAsyncTimeout());
}
} catch (Exception e) {
log.error("MessageConsumer poller exception: " + e.getMessage(), e);
}
}
builder.drainFrom(messageQueue, (int) messageQueue.size());
if (builder.size() > 0) {
senders.execute(new AsyncSuroSender(builder.build(), client, config));
++queuedMessageSetCount;
}
}
};
}
@PreDestroy
public void shutdown() {
running = false;
poller.shutdown();
try {
poller.awaitTermination(5000 + config.getAsyncTimeout(), TimeUnit.MILLISECONDS);
if (!poller.isTerminated()) {
log.error("AsyncSuroClient.poller didn't terminate gracefully within {} seconds", (5 + config.getAsyncTimeout()/1000));
}
senders.shutdown();
senders.awaitTermination(5000 + config.getAsyncTimeout(), TimeUnit.MILLISECONDS);
if (!senders.isTerminated()) {
log.error("AsyncSuroClient.senders didn't terminate gracefully within {} seconds", (5 + config.getAsyncTimeout()/1000));
}
} catch (InterruptedException e) {
// ignore exceptions while shutting down
}
}
@Monitor(name = "JobQueueSize", type = DataSourceType.GAUGE)
private int getJobQueueSize() {
return jobQueue.size();
}
@Monitor(name = "sendTime", type = DataSourceType.GAUGE)
private long sendTime;
public void updateSendTime(long sendTime) {
this.sendTime = sendTime;
}
public void updateSentDataStats(TMessageSet messageSet, boolean retried) {
sentMessages.addAndGet(
SyncSuroClient.incrementMessageCount(
TagKey.SENT_COUNT,
config.getApp(),
new MessageSetReader(messageSet),
listeners));
if (retried) {
retriedCount.incrementAndGet();
for (Listener listener : listeners) {
listener.retriedCallback();
}
}
}
public ConnectionPool getConnectionPool() {
return connectionPool;
}
@Monitor(name = "senderExceptionCount", type = DataSourceType.COUNTER)
private AtomicLong senderExceptionCount = new AtomicLong(0);
public void updateSenderException() {
senderExceptionCount.incrementAndGet();
}
public static interface Listener {
void sentCallback(int count);
void restoredCallback();
void lostCallback(int count);
void retriedCallback();
}
private List<Listener> listeners = new CopyOnWriteArrayList<>();
public void addListener(Listener listener) {
listeners.add(listener);
}
}
| 1,351 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/async/RateLimiter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client.async;
/**
* A simple rate limiter based on <a href="http://lucene.apache.org/">Lucene</a>'s RateLimiter.SimpleRateLimiter.
*/
public class RateLimiter {
private volatile int msgPerSec;
private volatile double nsPerMsg;
private volatile long lastNS;
public RateLimiter(int msgPerSec) {
setMsgPerSec(msgPerSec);
}
public void setMsgPerSec(int msgPerSec) {
this.msgPerSec = msgPerSec;
nsPerMsg = 1000000000. / msgPerSec;
}
public int getMsgPerSec() {
return this.msgPerSec;
}
/** Pauses, if necessary, to keep the instantaneous IO
* rate at or below the target. NOTE: multiple threads
* may safely use this, however the implementation is
* not perfectly thread safe but likely in practice this
* is harmless (just means in some rare cases the rate
* might exceed the target). It's best to call this
* with a biggish count, not one byte at a time.
* @return the pause time in nano seconds
* */
public long pause(int msgs) throws InterruptedException {
if (msgs == 1) {
return 0;
}
// TODO: this is purely instantaneous rate; maybe we
// should also offer decayed recent history one?
final long targetNS = lastNS = lastNS + ((long) (msgs * nsPerMsg));
final long startNS;
long curNS = startNS = System.nanoTime();
if (lastNS < curNS) {
lastNS = curNS;
}
// While loop because Thread.sleep doesn't always sleep
// enough:
while(true) {
final long pauseNS = targetNS - curNS;
if (pauseNS > 0) {
Thread.sleep((int) (pauseNS/1000000), (int) (pauseNS % 1000000));
curNS = System.nanoTime();
continue;
}
break;
}
return curNS - startNS;
}
}
| 1,352 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/async/AsyncSuroSender.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client.async;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.connection.ConnectionPool;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageSetReader;
import com.netflix.suro.thrift.Result;
import com.netflix.suro.thrift.ResultCode;
import com.netflix.suro.thrift.TMessageSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The sender that actually sends out messages. It can be scheduled by an {@link java.util.concurrent.Executor}
* and therefore be used to send out messages asynchronously. It also retries if a message failed to be sent.
*/
public class AsyncSuroSender implements Runnable {
private static final Logger log = LoggerFactory.getLogger(AsyncSuroSender.class);
private final AsyncSuroClient client;
private final TMessageSet messageSet;
private final ConnectionPool connectionPool;
private final ClientConfig config;
public AsyncSuroSender(
TMessageSet messageSet,
AsyncSuroClient client,
ClientConfig config) {
this.messageSet = messageSet;
this.client = client;
this.connectionPool = client.getConnectionPool();
this.config = config;
}
public void run() {
boolean sent = false;
boolean retried = false;
long startTS = System.currentTimeMillis();
for (int i = 0; i < config.getRetryCount(); ++i) {
ConnectionPool.SuroConnection connection = connectionPool.chooseConnection();
if (connection == null) {
continue;
}
try {
Result result = connection.send(messageSet);
if (result != null && result.getResultCode() == ResultCode.OK && result.isSetMessage()) {
sent = true;
connectionPool.endConnection(connection);
retried = i > 0;
break;
} else {
log.error("Server is not stable: " + connection.getServer().toString());
connectionPool.markServerDown(connection);
try { Thread.sleep(Math.min(i + 1, 5) * 100); } catch (InterruptedException e) {} // ignore an exception
}
} catch (Exception e) {
log.error("Exception in send: " + e.getMessage(), e);
connectionPool.markServerDown(connection);
client.updateSenderException();
}
}
if (sent){
client.updateSendTime(System.currentTimeMillis() - startTS);
client.updateSentDataStats(messageSet, retried);
} else {
for (Message m : new MessageSetReader(messageSet)) {
client.restore(m);
}
}
}
public TMessageSet getMessageSet() {
return messageSet;
}
}
| 1,353 |
0 |
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client
|
Create_ds/suro/suro-client/src/main/java/com/netflix/suro/client/async/Queue4Client.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.client.async;
import com.google.inject.Inject;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.message.Message;
import com.netflix.suro.queue.FileQueue4Sink;
import com.netflix.suro.queue.MemoryQueue4Sink;
import com.netflix.suro.queue.MessageQueue4Sink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* A simple proxy of the queues used by {@link AsyncSuroClient}. It determines whether to use
* an in-memory queue or a file-backed queue based on user configuration.
*/
public class Queue4Client {
private static final Logger logger = LoggerFactory.getLogger(Queue4Client.class);
private MessageQueue4Sink queue;
@Inject
public Queue4Client(ClientConfig config) {
if (config.getAsyncQueueType().equals("memory")) {
queue = new MemoryQueue4Sink(config.getAsyncMemoryQueueCapacity());
} else {
try {
createQueuePathIfNeeded(config.getAsyncFileQueuePath());
queue = new FileQueue4Sink(
config.getAsyncFileQueuePath(),
config.getAsyncFileQueueName(),
config.getAsyncFileQueueGCPeriod(),
config.getFileQueueSizeLimit());
} catch (IOException e) {
throw new IllegalStateException("Exception on initializing Queue4Client: " + e.getMessage(), e);
}
}
}
private void createQueuePathIfNeeded(String queueDir) {
File f = new File(queueDir);
if(f.exists() && f.isDirectory()) {
return;
}
if(f.exists() && f.isFile()) {
throw new IllegalStateException(String.format("The given file queue location %s is not a directory. ", queueDir));
}
boolean created = f.mkdirs();
if(!created) {
throw new IllegalStateException("Failed to create the queue dir " + queueDir);
}else {
logger.info("The queue directory {} did not exist but is created", queueDir);
}
}
public boolean offer(Message msg) {
return queue.offer(msg);
}
public Message poll(long timeout, TimeUnit timeUnit) throws InterruptedException {
return queue.poll(timeout, timeUnit);
}
public int drain(int batchSize, List<Message> msgList) {
return queue.drain(batchSize, msgList);
}
public void close() {
queue.close();
}
public boolean isEmpty() {
return queue.isEmpty();
}
public long size() {
return queue.size();
}
}
| 1,354 |
0 |
Create_ds/suro/suro-elasticsearch/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/test/java/com/netflix/suro/sink/elasticsearch/TestIndexSuffixFormatter.java
|
package com.netflix.suro.sink.elasticsearch;
import org.joda.time.DateTime;
import org.junit.Test;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
public class TestIndexSuffixFormatter {
@Test
public void shouldNullTypeReturnsEmptyString() {
IndexSuffixFormatter formatter = new IndexSuffixFormatter(null, null);
assertEquals(formatter.format(any(IndexInfo.class)), "");
}
@Test
public void shouldDateTypeReturnsCorrectOne() {
System.setProperty("user.timezone", "GMT");
Properties props = new Properties();
props.put("dateFormat", "YYYYMMdd");
DateTime dt = new DateTime("2014-10-12T00:00:00.000Z");
IndexSuffixFormatter formatter = new IndexSuffixFormatter("date", props);
IndexInfo info = mock(IndexInfo.class);
doReturn(dt.getMillis()).when(info).getTimestamp();
assertEquals(formatter.format(info), "20141012");
}
@Test(expected=RuntimeException.class)
public void shouldThrowExceptionOnUnsupportedType() {
IndexSuffixFormatter formatter = new IndexSuffixFormatter("invalid", null);
}
@Test
public void testWeeklyRepresentation() {
System.setProperty("user.timezone", "GMT");
Properties props = new Properties();
props.put("dateFormat", "YYYYMM_ww");
DateTime dt = new DateTime("2014-10-12T00:00:00.000Z");
IndexSuffixFormatter formatter = new IndexSuffixFormatter("date", props);
IndexInfo info = mock(IndexInfo.class);
doReturn(dt.getMillis()).when(info).getTimestamp();
assertEquals(formatter.format(info), "201410_41");
}
}
| 1,355 |
0 |
Create_ds/suro/suro-elasticsearch/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/test/java/com/netflix/suro/sink/elasticsearch/TestElasticSearchSink.java
|
package com.netflix.suro.sink.elasticsearch;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.BeanProperty;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.InjectableValues;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.jsontype.NamedType;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import com.netflix.loadbalancer.BaseLoadBalancer;
import com.netflix.niws.client.http.RestClient;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.DefaultMessageContainer;
import com.netflix.suro.message.Message;
import com.netflix.suro.sink.Sink;
import org.elasticsearch.action.count.CountRequest;
import org.elasticsearch.action.count.CountResponse;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.joda.time.DateTime;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 1)
public class TestElasticSearchSink extends ElasticsearchIntegrationTest {
protected String getPort() {
return "9200";
}
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return ImmutableSettings.settingsBuilder()
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 1)
.put(super.nodeSettings(nodeOrdinal)).build();
}
@Test
public void testDefaultArgument() throws IOException {
String index = "topic";
createDefaultESSink(index);
refresh();
CountResponse countResponse = client().count(new CountRequest(index)).actionGet();
assertEquals(countResponse.getCount(), 100);
}
private ElasticSearchSink createDefaultESSink(String index) throws JsonProcessingException {
ObjectMapper jsonMapper = new DefaultObjectMapper();
ElasticSearchSink sink = new ElasticSearchSink(
index,
null,
10,
1000,
Lists.newArrayList("localhost:" + getPort()),
null,
0,0,0,0,1000,
null,
false,
jsonMapper,
null
);
sink.open();
DateTime dt = new DateTime("2014-10-12T12:12:12.000Z");
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1")
.put("f2", "v2")
.put("f3", "v3")
.put("ts", dt.getMillis())
.build();
for (int i = 0; i < 100; ++i) {
sink.writeTo(new DefaultMessageContainer(new Message(index, jsonMapper.writeValueAsBytes(msg)), jsonMapper));
}
sink.close();
return sink;
}
@Test
public void testIndexInfoBuilder() throws IOException {
ObjectMapper jsonMapper = new DefaultObjectMapper();
Properties props = new Properties();
props.setProperty("dateFormat", "YYYYMMdd");
ElasticSearchSink sink = new ElasticSearchSink(
"testIndexInfoBuilder",
null,
1,
1000,
Lists.newArrayList("localhost:" + getPort()),
new DefaultIndexInfoBuilder(
null,
null,
new TimestampField("ts", null),
new IndexSuffixFormatter("date", props),
null,
jsonMapper),
0,0,0,0,0,
null,
false,
jsonMapper,
null
);
sink.open();
DateTime dt = new DateTime("2014-10-12T12:12:12.000Z");
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1")
.put("f2", "v2")
.put("f3", "v3")
.put("ts", dt.getMillis())
.build();
String routingKey = "topic";
String index = "topic20141012";
for (int i = 0; i < 100; ++i) {
sink.writeTo(new DefaultMessageContainer(new Message(routingKey, jsonMapper.writeValueAsBytes(msg)), jsonMapper));
}
sink.close();
refresh();
CountResponse countResponse = client().count(new CountRequest(index)).actionGet();
assertEquals(countResponse.getCount(), 100);
}
@Test
public void testCreate() throws IOException {
String desc = " {\n" +
" \"type\": \"elasticsearch\",\n" +
" \"queue4Sink\":{\"type\": \"memory\", \"capacity\": 0 },\n" +
" \"batchSize\": 100,\n" +
" \"batchTimeout\": 1000,\n" +
" \"clientName\": \"es_test\",\n" +
" \"cluster.name\": \"es_test\",\n" +
" \"addressList\": [\"http://host1:8080\", \"http://host2:8080\"],\n" +
" \"indexInfo\":{\n" +
" \"type\": \"default\",\n" +
" \"indexTypeMap\":{\"routingkey1\":\"index1:type1\", \"routingkey2\":\"index2:type2\"},\n" +
" \"idFields\":{\"index\":[\"f1\", \"f2\"]},\n" +
" \"timestamp\": {\"field\":\"ts\"},\n" +
" \"indexSuffixFormatter\":{\"type\": \"date\", \"properties\":{\"dateFormat\":\"YYYYMMdd\"}}\n" +
" }\n" +
" }";
final ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(ElasticSearchSink.class, "elasticsearch"));
jsonMapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(
Object valueId,
DeserializationContext ctxt,
BeanProperty forProperty,
Object beanInstance
) {
if (valueId.equals(ObjectMapper.class.getCanonicalName())) {
return jsonMapper;
} else {
return null;
}
}
});
Sink sink = jsonMapper.readValue(desc, new TypeReference<Sink>(){});
assertTrue(sink instanceof ElasticSearchSink);
ElasticSearchSink esSink = (ElasticSearchSink) sink;
esSink.createClient();
RestClient client = esSink.getClient();
IClientConfig config = ((BaseLoadBalancer) client.getLoadBalancer()).getClientConfig();
assertTrue(config.get(CommonClientConfigKey.OkToRetryOnAllOperations));
assertEquals(2, config.get(CommonClientConfigKey.MaxAutoRetriesNextServer).intValue());
assertEquals(0, esSink.getSleepOverClientException());
assertFalse(esSink.getReenqueueOnException());
}
@Test
public void testRecover() throws Exception {
ObjectMapper jsonMapper = new DefaultObjectMapper();
ElasticSearchSink sink = new ElasticSearchSink(
"default",
null,
10,
1000,
Lists.newArrayList("localhost:" + getPort()),
null,
0,0,0,0,
0,
null,
false,
jsonMapper,
null
);
sink.open();
DateTime dt = new DateTime("2014-10-12T12:12:12.000Z");
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1")
.put("f2", "v2")
.put("f3", "v3")
.put("ts", dt.getMillis())
.build();
String routingKey = "topicrecover";
String index = "topicrecover";
List<Message> msgList = new ArrayList<>();
int msgCount = 100;
for (int i = 0; i < msgCount; ++i) {
msgList.add(new Message(routingKey, jsonMapper.writeValueAsBytes(msg)));
}
for (Message m : msgList) {
sink.recover(m);
}
refresh();
CountResponse countResponse = client().count(new CountRequest(index)).actionGet();
assertEquals(countResponse.getCount(), 100);
}
private ObjectMapper jsonMapper = new DefaultObjectMapper();
// @Test
// public void testStat() throws JsonProcessingException, InterruptedException {
// final long ts = System.currentTimeMillis() - 1;
//
// IndexInfoBuilder indexInfo = mock(IndexInfoBuilder.class);
// doAnswer(new Answer() {
// @Override
// public Object answer(InvocationOnMock invocation) throws Throwable {
// final Message m = (Message) invocation.getArguments()[0];
// if (m.getRoutingKey().startsWith("parsing_failed")) {
// return null;
// } else {
// return new IndexInfo() {
// @Override
// public String getIndex() {
// return m.getRoutingKey();
// }
//
// @Override
// public String getType() {
// return "type";
// }
//
// @Override
// public Object getSource() {
// if (m.getRoutingKey().startsWith("rejected")) {
// return m.getPayload();
// } else {
// return new String(m.getPayload());
// }
// }
//
// @Override
// public String getId() {
// return null;
// }
//
// @Override
// public long getTimestamp() {
// return ts;
// }
// };
// }
// }
// }).when(indexInfo).create(any(Message.class));
//
// ElasticSearchSink sink = new ElasticSearchSink(
// "testStat",
// null, // by default it will be memory queue
// 1000,
// 5000,
// Lists.newArrayList("localhost:" + getPort()),
// indexInfo,
// 0,0,0,0,0,
// null,
// jsonMapper,
// null);
// sink.open();
//
// for (int i = 0; i < 3; ++i) {
// for (int j = 0; j < 3; ++j) {
// sink.writeTo(new DefaultMessageContainer(new Message("parsing_failed_topic" + i, getAnyMessage()), jsonMapper));
// }
// for (int j = 0; j < 3; ++j) {
// sink.writeTo(new DefaultMessageContainer(new Message("indexed" + i, getAnyMessage()), jsonMapper));
// }
// for (int j = 0; j < 3; ++j) {
// sink.writeTo(new DefaultMessageContainer(new Message("rejected" + i, getAnyMessage()), jsonMapper));
// }
// }
//
// sink.close();
// String stat = sink.getStat();
// System.out.println(stat);
// int count = 0;
// for (int i = 0; i < 3; ++i) {
// for (int j = 0; j < 3; ++j) {
// if (stat.contains("parsing_failed_topic" + i + ":3")) {
// ++count;
// }
// }
// for (int j = 0; j < 3; ++j) {
// if (stat.contains("indexed" + i + ":3")) {
// ++count;
// }
// }
// for (int j = 0; j < 3; ++j) {
// if (stat.contains("rejected" + i + ":3")) {
// ++count;
// }
// }
// }
// assertEquals(count, 27);
//
// // check indexDelay section
// ArrayIterator iterator = new ArrayIterator(stat.split("\n"));
// while (iterator.hasNext() && !iterator.next().equals("indexDelay"));
// Set<String> stringSet = new HashSet<>();
// for (int i = 0; i < 6; ++i) {
// String s = (String) iterator.next();
// assertTrue(Long.parseLong(s.split(":")[1]) > 0);
// stringSet.add(s.split(":")[0]);
// }
// assertEquals(stringSet.size(), 6);
// }
private byte[] getAnyMessage() throws JsonProcessingException {
return jsonMapper.writeValueAsBytes(new ImmutableMap.Builder<String, Object>().put("f1", "v1").build());
}
}
| 1,356 |
0 |
Create_ds/suro/suro-elasticsearch/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/test/java/com/netflix/suro/sink/elasticsearch/TestTimestampField.java
|
package com.netflix.suro.sink.elasticsearch;
import com.google.common.collect.ImmutableMap;
import org.joda.time.DateTime;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestTimestampField {
@Test
public void shouldNullFormatReturnsLongTS() {
TimestampField field = new TimestampField("ts", null);
long ts = System.currentTimeMillis();
assertEquals(
field.get(
new ImmutableMap.Builder<String, Object>()
.put("ts", ts)
.put("field1", "value1").build()),
ts);
assertEquals(
field.get(
new ImmutableMap.Builder<String, Object>()
.put("ts", "2014-04-05T00:00:00.000Z")
.put("field1", "value1").build()),
new DateTime("2014-04-05T00:00:00.000Z").getMillis());
}
@Test(expected=IllegalArgumentException.class)
public void shouldNonNullFormatThrowsException() {
TimestampField field = new TimestampField("ts", "YYYY-MM-DD");
long ts = System.currentTimeMillis();
assertEquals(
field.get(
new ImmutableMap.Builder<String, Object>()
.put("ts", ts)
.put("field1", "value1").build()),
ts);
field.get(
new ImmutableMap.Builder<String, Object>()
.put("ts", "2014-04-05T00:00:00.000Z")
.put("field1", "value1").build());
}
@Test
public void testFormat() {
TimestampField field = new TimestampField("ts", "EEE MMM dd HH:mm:ss zzz YYYY");
assertEquals(
field.get(new ImmutableMap.Builder<String, Object>()
.put("ts", "Fri Oct 03 18:25:08 GMT 2014")
.put("field1", "value1").build()),
new DateTime("2014-10-03T18:25:08.000Z").getMillis());
}
@Test
public void testFormat2() {
TimestampField field = new TimestampField("ts", "YYYY-MM-dd HH:mm:ss.SSS");
assertEquals(
field.get(new ImmutableMap.Builder<String, Object>()
.put("ts", "2014-10-17 19:53:26.001")
.put("field1", "value1").build()),
new DateTime("2014-10-17T19:53:26.001Z").getMillis());
}
}
| 1,357 |
0 |
Create_ds/suro/suro-elasticsearch/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/test/java/com/netflix/suro/sink/elasticsearch/TestDefaultIndexInfoBuilder.java
|
package com.netflix.suro.sink.elasticsearch;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.BeanProperty;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.InjectableValues;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.Message;
import com.netflix.suro.sink.DataConverter;
import org.joda.time.DateTime;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
public class TestDefaultIndexInfoBuilder {
private ObjectMapper jsonMapper = new DefaultObjectMapper();
@Before
public void setup() {
System.setProperty("user.timezone", "GMT");
}
@Test
public void shouldReturnNullOnParsingFailure() {
DefaultIndexInfoBuilder builder = new DefaultIndexInfoBuilder(
null,
null,
null,
null,
null,
jsonMapper);
assertNull(builder.create(new Message("routingkey", "message".getBytes())));
}
@Test
public void shouldNullOrEmptyIndexTypeMapReturnRoutingKey() throws JsonProcessingException {
DefaultIndexInfoBuilder builder = new DefaultIndexInfoBuilder(
null,
null,
null,
null,
null,
jsonMapper);
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>().put("f1", "v1").build();
IndexInfo info = builder.create(new Message("routingkey", jsonMapper.writeValueAsBytes(msg)));
assertEquals(info.getIndex(), "routingkey");
assertEquals(info.getType(), "default");
}
@Test
public void shouldIndexTypeMapReturnSetting() throws JsonProcessingException {
DefaultIndexInfoBuilder builder = new DefaultIndexInfoBuilder(
new ImmutableMap.Builder<String, String>()
.put("routingkey1", "index1:type1")
.put("routingkey2", "index2").build(),
null,
null,
null,
null,
jsonMapper);
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>().put("f1", "v1").build();
IndexInfo info = builder.create(new Message("routingkey1", jsonMapper.writeValueAsBytes(msg)));
assertEquals(info.getIndex(), "index1");
assertEquals(info.getType(), "type1");
info = builder.create(new Message("routingkey2", jsonMapper.writeValueAsBytes(msg)));
assertEquals(info.getIndex(), "index2");
assertEquals(info.getType(), "default");
}
@Test
public void shouldIndexFormatterWorkWithTimestampField() throws JsonProcessingException {
Properties props = new Properties();
props.put("dateFormat", "YYYYMMdd");
DefaultIndexInfoBuilder builder = new DefaultIndexInfoBuilder(
new ImmutableMap.Builder<String, String>()
.put("routingkey1", "index1:type1")
.put("routingkey2", "index2").build(),
null,
new TimestampField("ts", null),
new IndexSuffixFormatter("date", props),
null,
jsonMapper);
DateTime dt = new DateTime("2014-10-12T00:00:00.000Z");
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("ts", dt.getMillis())
.put("f1", "v1").build();
IndexInfo info = builder.create(new Message("routingkey1", jsonMapper.writeValueAsBytes(msg)));
assertEquals(info.getIndex(), "index120141012");
assertEquals(info.getType(), "type1");
info = builder.create(new Message("routingkey2", jsonMapper.writeValueAsBytes(msg)));
assertEquals(info.getIndex(), "index220141012");
assertEquals(info.getType(), "default");
}
@Test
public void shouldSourceConvertedOrNot() throws IOException {
DataConverter converter = new DataConverter() {
@Override
public Map<String, Object> convert(Map<String, Object> msg) {
msg.put("app", "app");
return msg;
}
};
DefaultIndexInfoBuilder builder = new DefaultIndexInfoBuilder(
null,
null,
null,
null,
converter,
jsonMapper);
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1").build();
IndexInfo info = builder.create(new Message("routingkey", jsonMapper.writeValueAsBytes(msg)));
Map<String, Object> source = (Map<String, Object>) info.getSource();
assertEquals(source.get("app"), "app");
assertEquals(source.get("f1"), "v1");
builder = new DefaultIndexInfoBuilder(
null,
null,
null,
null,
null,
jsonMapper);
msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1").build();
info = builder.create(new Message("routingkey", jsonMapper.writeValueAsBytes(msg)));
source = jsonMapper.readValue((String)info.getSource(), new TypeReference<Map<String, Object>>() {});
assertNull(source.get("app"));
assertEquals(source.get("f1"), "v1");
}
@Test
public void shouldGetIdReturnNullOnEmptyList() throws JsonProcessingException {
DefaultIndexInfoBuilder builder = new DefaultIndexInfoBuilder(
null,
null,
null,
null,
null,
jsonMapper);
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1").build();
IndexInfo info = builder.create(new Message("routingkey", jsonMapper.writeValueAsBytes(msg)));
assertNull(info.getId());
builder = new DefaultIndexInfoBuilder(
null,
new ImmutableMap.Builder<String, List<String>>().build(),
null,
null,
null,
jsonMapper);
info = builder.create(new Message("routingkey", jsonMapper.writeValueAsBytes(msg)));
assertNull(info.getId());
}
@Test
public void shouldGetIdReturnsConcatenatedStr() throws JsonProcessingException {
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1")
.put("f2", "v2")
.put("f3", "v3")
.build();
DefaultIndexInfoBuilder builder = new DefaultIndexInfoBuilder(
null,
new ImmutableMap.Builder<String, List<String>>().put("routingkey", Lists.newArrayList("f1", "f2")).build(),
null,
null,
null,
jsonMapper);
IndexInfo info = builder.create(new Message("routingkey", jsonMapper.writeValueAsBytes(msg)));
assertEquals(info.getId(), "v1v2");
}
@Test
public void shouldGetIdReturnsConcatedStrWithTimeslice() throws JsonProcessingException {
DateTime dt = new DateTime("2014-10-12T12:12:12.000Z");
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1")
.put("f2", "v2")
.put("f3", "v3")
.put("ts", dt.getMillis())
.build();
DefaultIndexInfoBuilder builder = new DefaultIndexInfoBuilder(
null,
new ImmutableMap.Builder<String, List<String>>().put("routingkey", Lists.newArrayList("f1", "f2", "ts_minute")).build(),
new TimestampField("ts", null),
null,
null,
jsonMapper);
IndexInfo info = builder.create(new Message("routingkey", jsonMapper.writeValueAsBytes(msg)));
assertEquals(info.getId(), ("v1v2" + dt.getMillis() / 60000));
}
@Test
public void testCreation() throws IOException {
String desc = "{\n" +
" \"type\": \"default\",\n" +
" \"indexTypeMap\":{\"routingkey1\":\"index1:type1\", \"routingkey2\":\"index2:type2\"},\n" +
" \"idFields\":{\"routingkey\": [\"f1\", \"f2\", \"ts_minute\"]},\n" +
" \"timestamp\": {\"field\":\"ts\"},\n" +
" \"indexSuffixFormatter\":{\"type\": \"date\", \"properties\":{\"dateFormat\":\"YYYYMMdd\"}}\n" +
"}";
jsonMapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(
Object valueId,
DeserializationContext ctxt,
BeanProperty forProperty,
Object beanInstance
) {
if (valueId.equals(ObjectMapper.class.getCanonicalName())) {
return jsonMapper;
} else {
return null;
}
}
});
DateTime dt = new DateTime("2014-10-12T12:12:12.000Z");
Map<String, Object> msg = new ImmutableMap.Builder<String, Object>()
.put("f1", "v1")
.put("f2", "v2")
.put("f3", "v3")
.put("ts", dt.getMillis())
.build();
IndexInfoBuilder builder = jsonMapper.readValue(desc, new TypeReference<IndexInfoBuilder>(){});
IndexInfo info = builder.create(new Message("routingkey", jsonMapper.writeValueAsBytes(msg)));
assertEquals(info.getId(), ("v1v2" + dt.getMillis() / 60000));
}
}
| 1,358 |
0 |
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink/elasticsearch/ElasticSearchSink.java
|
package com.netflix.suro.sink.elasticsearch;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.netflix.client.ClientFactory;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.http.HttpRequest;
import com.netflix.client.http.HttpResponse;
import com.netflix.config.ConfigurationManager;
import com.netflix.niws.client.http.RestClient;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.monitor.*;
import com.netflix.suro.TagKey;
import com.netflix.suro.message.DefaultMessageContainer;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.queue.MemoryQueue4Sink;
import com.netflix.suro.queue.MessageQueue4Sink;
import com.netflix.suro.servo.Servo;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.ThreadPoolQueuedSink;
import com.netflix.util.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
public class ElasticSearchSink extends ThreadPoolQueuedSink implements Sink {
private static Logger log = LoggerFactory.getLogger(ElasticSearchSink.class);
public static final String TYPE = "elasticsearch";
private static final String INDEXED_ROW = "indexedRow";
private static final String REJECTED_ROW = "rejectedRow";
private static final String PARSING_FAILED = "parsingFailedRow";
private static final String INDEX_DELAY = "indexDelay";
private static final String SINK_ID = "sinkId";
private static final String ABANDONED_MESSAGES_ON_EXCEPTION = "abandonedMessagesOnException";
private RestClient client;
private final List<String> addressList;
private final Properties ribbonEtc;
private final IndexInfoBuilder indexInfo;
private final String clientName;
private final ObjectMapper jsonMapper;
private final Timer timer;
private final int sleepOverClientException;
private final boolean reEnqueueOnException;
public static final class BatchProcessingException extends Exception {
public BatchProcessingException(String message) {
super(message);
}
}
public ElasticSearchSink(
@JsonProperty("clientName") String clientName,
@JsonProperty("queue4Sink") MessageQueue4Sink queue4Sink,
@JsonProperty("batchSize") int batchSize,
@JsonProperty("batchTimeout") int batchTimeout,
@JsonProperty("addressList") List<String> addressList,
@JsonProperty("indexInfo") @JacksonInject IndexInfoBuilder indexInfo,
@JsonProperty("jobQueueSize") int jobQueueSize,
@JsonProperty("corePoolSize") int corePoolSize,
@JsonProperty("maxPoolSize") int maxPoolSize,
@JsonProperty("jobTimeout") long jobTimeout,
@JsonProperty("sleepOverClientException") int sleepOverClientException,
@JsonProperty("ribbon.etc") Properties ribbonEtc,
@JsonProperty("reEnqueueOnException") boolean reEnqueueOnException,
@JacksonInject ObjectMapper jsonMapper,
@JacksonInject RestClient client) {
super(jobQueueSize, corePoolSize, maxPoolSize, jobTimeout, clientName);
this.indexInfo =
indexInfo == null ? new DefaultIndexInfoBuilder(null, null, null, null, null, jsonMapper) : indexInfo;
initialize(
clientName,
queue4Sink == null ? new MemoryQueue4Sink(10000) : queue4Sink,
batchSize,
batchTimeout,
true);
this.jsonMapper = jsonMapper;
this.addressList = addressList;
this.ribbonEtc = ribbonEtc == null ? new Properties() : ribbonEtc;
this.clientName = clientName;
this.client = client;
this.timer = Servo.getTimer(clientName + "_latency");
this.sleepOverClientException = sleepOverClientException;
this.reEnqueueOnException = reEnqueueOnException;
}
@Override
public void writeTo(MessageContainer message) {
enqueue(message.getMessage());
}
@Override
public void open() {
Monitors.registerObject(clientName, this);
if (client == null) {
createClient();
}
setName(clientName);
start();
}
@Override
public String recvNotice() { return null; }
@Override
public String getStat() {
StringBuilder sb = new StringBuilder();
StringBuilder indexDelay = new StringBuilder();
StringBuilder indexed = new StringBuilder();
StringBuilder rejected = new StringBuilder();
StringBuilder parsingFailed = new StringBuilder();
for (Monitor<?> m : DefaultMonitorRegistry.getInstance().getRegisteredMonitors()) {
if (m instanceof BasicCounter) {
BasicCounter counter = (BasicCounter) m;
String sinkId = counter.getConfig().getTags().getValue(SINK_ID);
if (!Strings.isNullOrEmpty(sinkId) && sinkId.equals(getSinkId())) {
if (counter.getConfig().getName().equals(INDEXED_ROW)) {
indexed.append(counter.getConfig().getTags().getValue(TagKey.ROUTING_KEY))
.append(":")
.append(counter.getValue()).append('\n');
} else if (counter.getConfig().getName().equals(REJECTED_ROW)) {
rejected.append(counter.getConfig().getTags().getValue(TagKey.ROUTING_KEY))
.append(":")
.append(counter.getValue()).append('\n');
} else if (counter.getConfig().getName().equals(PARSING_FAILED)) {
parsingFailed.append(counter.getConfig().getTags().getValue(TagKey.ROUTING_KEY))
.append(":")
.append(counter.getValue()).append('\n');
}
}
} else if (m instanceof NumberGauge) {
NumberGauge gauge = (NumberGauge) m;
String sinkId = gauge.getConfig().getTags().getValue(SINK_ID);
if (!Strings.isNullOrEmpty(sinkId) && sinkId.equals(getSinkId())) {
if (gauge.getConfig().getName().equals(INDEX_DELAY)) {
indexDelay.append(gauge.getConfig().getTags().getValue(TagKey.ROUTING_KEY))
.append(":")
.append(gauge.getValue()).append('\n');
}
}
}
}
sb.append('\n').append(INDEX_DELAY).append('\n').append(indexDelay.toString());
sb.append('\n').append(INDEXED_ROW).append('\n').append(indexed.toString());
sb.append('\n').append(REJECTED_ROW).append('\n').append(rejected.toString());
sb.append('\n').append(PARSING_FAILED).append('\n').append(parsingFailed.toString());
return sb.toString();
}
@Override
protected void beforePolling() throws IOException {}
@VisibleForTesting
void createClient() {
if (ribbonEtc.containsKey("eureka")) {
ribbonEtc.setProperty(
clientName + ".ribbon.AppName", clientName);
ribbonEtc.setProperty(
clientName + ".ribbon.NIWSServerListClassName",
"com.netflix.niws.loadbalancer.DiscoveryEnabledNIWSServerList");
String[] host_port = addressList.get(0).split(":");
ribbonEtc.setProperty(
clientName + ".ribbon.DeploymentContextBasedVipAddresses",
host_port[0]);
ribbonEtc.setProperty(
clientName + ".ribbon.Port",
host_port[1]);
} else {
ribbonEtc.setProperty(clientName + ".ribbon.listOfServers", Joiner.on(",").join(addressList));
}
ribbonEtc.setProperty(
clientName + ".ribbon.EnablePrimeConnections",
"true");
String retryPropertyName = clientName + ".ribbon." + CommonClientConfigKey.OkToRetryOnAllOperations;
if (ribbonEtc.getProperty(retryPropertyName) == null) {
// default set this to enable retry on POST operation upon read timeout
ribbonEtc.setProperty(retryPropertyName, "true");
}
String maxRetryProperty = clientName + ".ribbon." + CommonClientConfigKey.MaxAutoRetriesNextServer;
if (ribbonEtc.getProperty(maxRetryProperty) == null) {
// by default retry two different servers upon exception
ribbonEtc.setProperty(maxRetryProperty, "2");
}
ConfigurationManager.loadProperties(ribbonEtc);
client = (RestClient) ClientFactory.getNamedClient(clientName);
}
private String createIndexRequest(Message m) {
IndexInfo info = indexInfo.create(m);
if (info == null) {
Servo.getCounter(
MonitorConfig.builder(PARSING_FAILED)
.withTag(SINK_ID, getSinkId())
.withTag(TagKey.ROUTING_KEY, m.getRoutingKey())
.build()).increment();
return null;
} else {
Servo.getLongGauge(
MonitorConfig.builder(INDEX_DELAY)
.withTag(SINK_ID, getSinkId())
.withTag(TagKey.ROUTING_KEY, m.getRoutingKey())
.build()).set(System.currentTimeMillis() - info.getTimestamp());
try {
StringBuilder sb = new StringBuilder();
sb.append(indexInfo.getActionMetadata(info));
sb.append('\n');
sb.append(indexInfo.getSource(info));
sb.append('\n');
return sb.toString();
} catch (Exception e) {
Servo.getCounter(
MonitorConfig.builder(PARSING_FAILED)
.withTag(SINK_ID, getSinkId())
.withTag(TagKey.ROUTING_KEY, m.getRoutingKey())
.build()).increment();
return null;
}
}
}
@Override
protected void write(List<Message> msgList) throws IOException {
senders.execute(createRunnable(createBulkRequest(msgList)));
}
@VisibleForTesting
protected Pair<HttpRequest, List<Message>> createBulkRequest(List<Message> msgList) {
List<Message> msgListPayload = new LinkedList<>();
StringBuilder sb = new StringBuilder();
for (Message m : msgList) {
String indexRequest = createIndexRequest(m);
if (indexRequest != null) {
sb.append(indexRequest);
msgListPayload.add(m);
}
}
return new Pair<>(
HttpRequest.newBuilder()
.verb(HttpRequest.Verb.POST)
.uri("/_bulk")
.setRetriable(true)
.entity(sb.toString()).build(),
msgListPayload);
}
private Runnable createRunnable(final Pair<HttpRequest, List<Message>> request) {
return new Runnable() {
@Override
public void run() {
Stopwatch stopwatch = timer.start();
HttpResponse response = null;
List items = null;
try {
response = client.executeWithLoadBalancer(request.first());
stopwatch.stop();
if (response.getStatus() / 100 == 2) {
Map<String, Object> result = jsonMapper.readValue(
response.getInputStream(),
new TypeReference<Map<String, Object>>() {
});
log.debug("Response from ES: {}", result);
items = (List) result.get("items");
if (items == null || items.size() == 0) {
throw new BatchProcessingException("No items in the response");
}
throughput.increment(items.size());
} else {
throw new BatchProcessingException("Status is " + response.getStatus());
}
} catch (Exception e) {
// Handling the exception on the batch request here
log.error("Exception on bulk execute: " + e.getMessage(), e);
Servo.getCounter("bulkException").increment();
if (reEnqueueOnException) {
for (Message m : request.second()) {
writeTo(new DefaultMessageContainer(m, jsonMapper));
}
if (sleepOverClientException > 0) {
// sleep on exception for not pushing too much stress
try {
Thread.sleep(sleepOverClientException);
} catch (InterruptedException e1) {
// do nothing
}
}
} else {
for (Message message: request.second()) {
recover(message);
}
}
} finally {
if (response != null) {
response.close();
}
}
if (items != null) {
for (int i = 0; i < items.size(); ++i) {
String routingKey = request.second().get(i).getRoutingKey();
Map<String, Object> resPerMessage = null;
try {
resPerMessage = (Map) ((Map) (items.get(i))).get(indexInfo.getCommand());
} catch (Exception e) {
// could be NPE or cast exception in case the response is unexpected
log.error("Unexpected exception", e);
}
if (resPerMessage == null ||
(isFailed(resPerMessage) && !getErrorMessage(resPerMessage).contains("DocumentAlreadyExistsException"))) {
if (resPerMessage != null) {
log.error("Failed indexing event " + routingKey + " with error message: " + resPerMessage.get("error"));
} else {
log.error("Response for event " + routingKey + " is null. Request is " + request.second().get(i));
}
Servo.getCounter(
MonitorConfig.builder(REJECTED_ROW)
.withTag(SINK_ID, getSinkId())
.withTag(TagKey.ROUTING_KEY, routingKey)
.build()).increment();
recover(request.second().get(i));
} else {
Servo.getCounter(
MonitorConfig.builder(INDEXED_ROW)
.withTag(SINK_ID, getSinkId())
.withTag(TagKey.ROUTING_KEY, routingKey)
.build()).increment();
}
}
}
}
};
}
private String getErrorMessage(Map<String, Object> resPerMessage) {
return (String) resPerMessage.get("error");
}
private boolean isFailed(Map<String, Object> resPerMessage) {
if (resPerMessage != null) {
return (int) resPerMessage.get("status") / 100 != 2;
} else {
return true;
}
}
public void recover(Message message) {
IndexInfo info = indexInfo.create(message);
HttpResponse response = null;
try {
response = client.executeWithLoadBalancer(
HttpRequest.newBuilder()
.verb(HttpRequest.Verb.POST)
.setRetriable(true)
.uri(indexInfo.getIndexUri(info))
.entity(indexInfo.getSource(info))
.build());
if (response.getStatus() / 100 != 2) {
Servo.getCounter(
MonitorConfig.builder("unrecoverableRow")
.withTag(SINK_ID, getSinkId())
.withTag(TagKey.ROUTING_KEY, message.getRoutingKey())
.build()).increment();
}
} catch (Exception e) {
log.error("Exception while recover: " + e.getMessage(), e);
Servo.getCounter("recoverException").increment();
if (reEnqueueOnException) {
writeTo(new DefaultMessageContainer(message, jsonMapper));
} else {
Servo.getCounter(
MonitorConfig.builder("unrecoverableRow")
.withTag(SINK_ID, getSinkId())
.withTag(TagKey.ROUTING_KEY, message.getRoutingKey())
.build()).increment();
}
} finally {
if (response != null) {
response.close();
}
}
}
@Override
protected void innerClose() {
super.innerClose();
client.shutdown();
}
@VisibleForTesting
RestClient getClient() {
return client;
}
@VisibleForTesting
int getSleepOverClientException() {
return sleepOverClientException;
}
@VisibleForTesting
boolean getReenqueueOnException() {
return reEnqueueOnException;
}
}
| 1,359 |
0 |
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink/elasticsearch/IndexInfoBuilder.java
|
package com.netflix.suro.sink.elasticsearch;
import com.fasterxml.jackson.annotation.JsonSubTypes;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.netflix.suro.message.Message;
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
@JsonSubTypes(value = {
@JsonSubTypes.Type(name = "default", value = DefaultIndexInfoBuilder.class)
})
public interface IndexInfoBuilder {
IndexInfo create(Message msg);
String getActionMetadata(IndexInfo info);
String getSource(IndexInfo info) throws Exception;
String getIndexUri(IndexInfo info);
String getCommand();
}
| 1,360 |
0 |
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink/elasticsearch/TimestampSlice.java
|
package com.netflix.suro.sink.elasticsearch;
public enum TimestampSlice {
ts_millisecond {
public String get(long ts) {
return Long.toString(ts);
}
},
ts_second {
public String get(long ts) {
return Long.toString(ts / 1000);
}
},
ts_minute {
public String get(long ts) {
return Long.toString(ts / 60000);
}
};
public abstract String get(long ts);
}
| 1,361 |
0 |
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink/elasticsearch/IndexSuffixFormatter.java
|
package com.netflix.suro.sink.elasticsearch;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Function;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import javax.annotation.Nullable;
import java.util.Properties;
public class IndexSuffixFormatter {
private final Function<IndexInfo, String> formatter;
@JsonCreator
public IndexSuffixFormatter(
@JsonProperty("type") String type,
@JsonProperty("properties") Properties props) {
if (type == null) {
formatter = new Function<IndexInfo, String>() {
@Nullable
@Override
public String apply(@Nullable IndexInfo input) {
return "";
}
};
} else if (type.equals("date")) {
final DateTimeFormatter dateTimeFormatter = DateTimeFormat.forPattern(props.getProperty("dateFormat"));
formatter = new Function<IndexInfo, String>() {
@Nullable
@Override
public String apply(@Nullable IndexInfo input) {
return dateTimeFormatter.print(input.getTimestamp());
}
};
} else {
throw new RuntimeException("unsupported type: " + type);
}
}
public String format(IndexInfo info) {
return formatter.apply(info);
}
}
| 1,362 |
0 |
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink/elasticsearch/TimestampField.java
|
package com.netflix.suro.sink.elasticsearch;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Strings;
import org.joda.time.*;
import org.joda.time.field.DividedDateTimeField;
import org.joda.time.field.OffsetDateTimeField;
import org.joda.time.field.ScaledDurationField;
import org.joda.time.format.*;
import java.util.Locale;
import java.util.Map;
public class TimestampField {
private final String fieldName;
private final FormatDateTimeFormatter formatter;
@JsonCreator
public TimestampField(@JsonProperty("field") String fieldName, @JsonProperty("format") String format) {
if (format == null) {
format = "dateOptionalTime";
}
this.fieldName = fieldName;
this.formatter = Joda.forPattern(format);
}
public long get(Map<String, Object> msg) {
Object value = msg.get(fieldName);
if (value instanceof Number) {
return ((Number) value).longValue();
}
return formatter.parser().parseMillis(value.toString());
}
public static class Joda {
public static FormatDateTimeFormatter forPattern(String input) {
return forPattern(input, Locale.ROOT);
}
/**
* Parses a joda based pattern, including some named ones (similar to the built in Joda ISO ones).
*/
public static FormatDateTimeFormatter forPattern(String input, Locale locale) {
if (!Strings.isNullOrEmpty(input)) {
input = input.trim();
}
if (input == null || input.length() == 0) {
throw new IllegalArgumentException("No date pattern provided");
}
DateTimeFormatter formatter;
if ("basicDate".equals(input) || "basic_date".equals(input)) {
formatter = ISODateTimeFormat.basicDate();
} else if ("basicDateTime".equals(input) || "basic_date_time".equals(input)) {
formatter = ISODateTimeFormat.basicDateTime();
} else if ("basicDateTimeNoMillis".equals(input) || "basic_date_time_no_millis".equals(input)) {
formatter = ISODateTimeFormat.basicDateTimeNoMillis();
} else if ("basicOrdinalDate".equals(input) || "basic_ordinal_date".equals(input)) {
formatter = ISODateTimeFormat.basicOrdinalDate();
} else if ("basicOrdinalDateTime".equals(input) || "basic_ordinal_date_time".equals(input)) {
formatter = ISODateTimeFormat.basicOrdinalDateTime();
} else if ("basicOrdinalDateTimeNoMillis".equals(input) || "basic_ordinal_date_time_no_millis".equals(input)) {
formatter = ISODateTimeFormat.basicOrdinalDateTimeNoMillis();
} else if ("basicTime".equals(input) || "basic_time".equals(input)) {
formatter = ISODateTimeFormat.basicTime();
} else if ("basicTimeNoMillis".equals(input) || "basic_time_no_millis".equals(input)) {
formatter = ISODateTimeFormat.basicTimeNoMillis();
} else if ("basicTTime".equals(input) || "basic_t_Time".equals(input)) {
formatter = ISODateTimeFormat.basicTTime();
} else if ("basicTTimeNoMillis".equals(input) || "basic_t_time_no_millis".equals(input)) {
formatter = ISODateTimeFormat.basicTTimeNoMillis();
} else if ("basicWeekDate".equals(input) || "basic_week_date".equals(input)) {
formatter = ISODateTimeFormat.basicWeekDate();
} else if ("basicWeekDateTime".equals(input) || "basic_week_date_time".equals(input)) {
formatter = ISODateTimeFormat.basicWeekDateTime();
} else if ("basicWeekDateTimeNoMillis".equals(input) || "basic_week_date_time_no_millis".equals(input)) {
formatter = ISODateTimeFormat.basicWeekDateTimeNoMillis();
} else if ("date".equals(input)) {
formatter = ISODateTimeFormat.date();
} else if ("dateHour".equals(input) || "date_hour".equals(input)) {
formatter = ISODateTimeFormat.dateHour();
} else if ("dateHourMinute".equals(input) || "date_hour_minute".equals(input)) {
formatter = ISODateTimeFormat.dateHourMinute();
} else if ("dateHourMinuteSecond".equals(input) || "date_hour_minute_second".equals(input)) {
formatter = ISODateTimeFormat.dateHourMinuteSecond();
} else if ("dateHourMinuteSecondFraction".equals(input) || "date_hour_minute_second_fraction".equals(input)) {
formatter = ISODateTimeFormat.dateHourMinuteSecondFraction();
} else if ("dateHourMinuteSecondMillis".equals(input) || "date_hour_minute_second_millis".equals(input)) {
formatter = ISODateTimeFormat.dateHourMinuteSecondMillis();
} else if ("dateOptionalTime".equals(input) || "date_optional_time".equals(input)) {
// in this case, we have a separate parser and printer since the dataOptionalTimeParser can't print
// this sucks we should use the root local by default and not be dependent on the node
return new FormatDateTimeFormatter(input,
ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC),
ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC), locale);
} else if ("dateTime".equals(input) || "date_time".equals(input)) {
formatter = ISODateTimeFormat.dateTime();
} else if ("dateTimeNoMillis".equals(input) || "date_time_no_millis".equals(input)) {
formatter = ISODateTimeFormat.dateTimeNoMillis();
} else if ("hour".equals(input)) {
formatter = ISODateTimeFormat.hour();
} else if ("hourMinute".equals(input) || "hour_minute".equals(input)) {
formatter = ISODateTimeFormat.hourMinute();
} else if ("hourMinuteSecond".equals(input) || "hour_minute_second".equals(input)) {
formatter = ISODateTimeFormat.hourMinuteSecond();
} else if ("hourMinuteSecondFraction".equals(input) || "hour_minute_second_fraction".equals(input)) {
formatter = ISODateTimeFormat.hourMinuteSecondFraction();
} else if ("hourMinuteSecondMillis".equals(input) || "hour_minute_second_millis".equals(input)) {
formatter = ISODateTimeFormat.hourMinuteSecondMillis();
} else if ("ordinalDate".equals(input) || "ordinal_date".equals(input)) {
formatter = ISODateTimeFormat.ordinalDate();
} else if ("ordinalDateTime".equals(input) || "ordinal_date_time".equals(input)) {
formatter = ISODateTimeFormat.ordinalDateTime();
} else if ("ordinalDateTimeNoMillis".equals(input) || "ordinal_date_time_no_millis".equals(input)) {
formatter = ISODateTimeFormat.ordinalDateTimeNoMillis();
} else if ("time".equals(input)) {
formatter = ISODateTimeFormat.time();
} else if ("tTime".equals(input) || "t_time".equals(input)) {
formatter = ISODateTimeFormat.tTime();
} else if ("tTimeNoMillis".equals(input) || "t_time_no_millis".equals(input)) {
formatter = ISODateTimeFormat.tTimeNoMillis();
} else if ("weekDate".equals(input) || "week_date".equals(input)) {
formatter = ISODateTimeFormat.weekDate();
} else if ("weekDateTime".equals(input) || "week_date_time".equals(input)) {
formatter = ISODateTimeFormat.weekDateTime();
} else if ("weekyear".equals(input) || "week_year".equals(input)) {
formatter = ISODateTimeFormat.weekyear();
} else if ("weekyearWeek".equals(input)) {
formatter = ISODateTimeFormat.weekyearWeek();
} else if ("year".equals(input)) {
formatter = ISODateTimeFormat.year();
} else if ("yearMonth".equals(input) || "year_month".equals(input)) {
formatter = ISODateTimeFormat.yearMonth();
} else if ("yearMonthDay".equals(input) || "year_month_day".equals(input)) {
formatter = ISODateTimeFormat.yearMonthDay();
} else if (!Strings.isNullOrEmpty(input) && input.contains("||")) {
String[] formats = input.split("\\|\\|");
DateTimeParser[] parsers = new DateTimeParser[formats.length];
if (formats.length == 1) {
formatter = forPattern(input, locale).parser();
} else {
DateTimeFormatter dateTimeFormatter = null;
for (int i = 0; i < formats.length; i++) {
FormatDateTimeFormatter currentFormatter = forPattern(formats[i], locale);
DateTimeFormatter currentParser = currentFormatter.parser();
if (dateTimeFormatter == null) {
dateTimeFormatter = currentFormatter.printer();
}
parsers[i] = currentParser.getParser();
}
DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(dateTimeFormatter.withZone(DateTimeZone.UTC).getPrinter(), parsers);
formatter = builder.toFormatter();
}
} else {
try {
formatter = DateTimeFormat.forPattern(input);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Invalid format: [" + input + "]: " + e.getMessage(), e);
}
}
return new FormatDateTimeFormatter(input, formatter.withZone(DateTimeZone.UTC), locale);
}
public static final DurationFieldType Quarters = new DurationFieldType("quarters") {
private static final long serialVersionUID = -8167713675442491871L;
public DurationField getField(Chronology chronology) {
return new ScaledDurationField(chronology.months(), Quarters, 3);
}
};
public static final DateTimeFieldType QuarterOfYear = new DateTimeFieldType("quarterOfYear") {
private static final long serialVersionUID = -5677872459807379123L;
public DurationFieldType getDurationType() {
return Quarters;
}
public DurationFieldType getRangeDurationType() {
return DurationFieldType.years();
}
public DateTimeField getField(Chronology chronology) {
return new OffsetDateTimeField(new DividedDateTimeField(new OffsetDateTimeField(chronology.monthOfYear(), -1), QuarterOfYear, 3), 1);
}
};
}
public static class FormatDateTimeFormatter {
private final String format;
private final DateTimeFormatter parser;
private final DateTimeFormatter printer;
private final Locale locale;
public FormatDateTimeFormatter(String format, DateTimeFormatter parser, Locale locale) {
this(format, parser, parser, locale);
}
public FormatDateTimeFormatter(String format, DateTimeFormatter parser, DateTimeFormatter printer, Locale locale) {
this.format = format;
this.locale = locale;
this.printer = locale == null ? printer.withDefaultYear(1970) : printer.withLocale(locale).withDefaultYear(1970);
this.parser = locale == null ? parser.withDefaultYear(1970) : parser.withLocale(locale).withDefaultYear(1970);
}
public String format() {
return format;
}
public DateTimeFormatter parser() {
return parser;
}
public DateTimeFormatter printer() {
return this.printer;
}
public Locale locale() {
return locale;
}
}
}
| 1,363 |
0 |
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink/elasticsearch/DefaultIndexInfoBuilder.java
|
package com.netflix.suro.sink.elasticsearch;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import com.netflix.suro.message.Message;
import com.netflix.suro.sink.DataConverter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
public class DefaultIndexInfoBuilder implements IndexInfoBuilder {
private static final Logger log = LoggerFactory.getLogger(DefaultIndexInfoBuilder.class);
private final static TypeReference<Map<String, Object>> type = new TypeReference<Map<String, Object>>() {};
private final ObjectMapper jsonMapper;
private final Map<String, String> indexMap;
private final Map<String, String> typeMap;
private final Map<String, List<String>> idFieldsMap;
private final TimestampField timestampField;
private final IndexSuffixFormatter indexSuffixFormatter;
private final DataConverter dataConverter;
@JsonCreator
public DefaultIndexInfoBuilder(
@JsonProperty("indexTypeMap") Map<String, String> indexTypeMap,
@JsonProperty("idFields") Map<String, List<String>> idFieldsMap,
@JsonProperty("timestamp") TimestampField timestampField,
@JsonProperty("indexSuffixFormatter") IndexSuffixFormatter indexSuffixFormatter,
@JacksonInject DataConverter dataConverter,
@JacksonInject ObjectMapper jsonMapper
) {
if (indexTypeMap != null) {
indexMap = Maps.newHashMap();
typeMap = Maps.newHashMap();
for (Map.Entry<String, String> entry : indexTypeMap.entrySet()) {
String[] index_type = entry.getValue().split(":");
indexMap.put(entry.getKey(), index_type[0]);
if (index_type.length > 1) {
typeMap.put(entry.getKey(), index_type[1]);
}
}
} else {
this.indexMap = Maps.newHashMap();
this.typeMap = Maps.newHashMap();
}
this.idFieldsMap = idFieldsMap;
this.indexSuffixFormatter =
indexSuffixFormatter == null ? new IndexSuffixFormatter(null, null) : indexSuffixFormatter;
this.jsonMapper = jsonMapper;
this.timestampField = timestampField;
this.dataConverter = dataConverter;
}
@Override
public IndexInfo create(final Message msg) {
try {
final Map<String, Object> msgMap;
if (dataConverter != null) {
msgMap = dataConverter.convert((Map<String, Object>) jsonMapper.readValue(msg.getPayload(), type));
} else {
msgMap = jsonMapper.readValue(msg.getPayload(), type);
}
return new IndexInfo() {
private long ts = 0; //timestamp caching
@Override
public String getIndex() {
String index = indexMap.get(msg.getRoutingKey());
if (index == null) {
index = msg.getRoutingKey();
}
return index + indexSuffixFormatter.format(this);
}
@Override
public String getType() {
String type = typeMap.get(msg.getRoutingKey());
return type == null ? "default" : type;
}
@Override
public Object getSource() {
if (dataConverter != null) {
return msgMap;
} else {
return new String(msg.getPayload());
}
}
@Override
public String getId() {
if (idFieldsMap == null || !idFieldsMap.containsKey(msg.getRoutingKey())) {
return null;
} else {
StringBuilder sb = new StringBuilder();
for (String id : idFieldsMap.get(msg.getRoutingKey())) {
if (id.startsWith("ts_")) {
sb.append(TimestampSlice.valueOf(id).get(getTimestamp()));
} else {
sb.append(msgMap.get(id));
}
}
return sb.toString();
}
}
@Override
public long getTimestamp() {
if (ts == 0 && timestampField != null) {
ts = timestampField.get(msgMap);
}
return ts;
}
};
} catch (Exception e) {
log.error("Exception on parsing message", e);
return null;
}
}
@Override
public String getActionMetadata(IndexInfo info) {
if (!Strings.isNullOrEmpty(info.getId())) {
return String.format(
"{ \"create\" : { \"_index\" : \"%s\", \"_type\" : \"%s\", \"_id\" : \"%s\" } }",
info.getIndex(), info.getType(), info.getId());
} else {
return String.format(
"{ \"create\" : { \"_index\" : \"%s\", \"_type\" : \"%s\"} }",
info.getIndex(), info.getType());
}
}
@Override
public String getSource(IndexInfo info) throws JsonProcessingException {
if (info.getSource() instanceof Map) {
return jsonMapper.writeValueAsString(info.getSource());
} else {
return info.getSource().toString();
}
}
@Override
public String getIndexUri(IndexInfo info) {
return info.getId() != null ?
String.format(
"/%s/%s/%s",
info.getIndex(),
info.getType(),
info.getId()) :
String.format(
"/%s/%s/",
info.getIndex(),
info.getType());
}
@Override
public String getCommand() {
return "create";
}
}
| 1,364 |
0 |
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-elasticsearch/src/main/java/com/netflix/suro/sink/elasticsearch/IndexInfo.java
|
package com.netflix.suro.sink.elasticsearch;
public interface IndexInfo {
String getIndex();
String getType();
Object getSource();
String getId();
long getTimestamp();
}
| 1,365 |
0 |
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink/localfile/SuroSinkPlugin.java
|
package com.netflix.suro.sink.localfile;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.sink.SuroSink;
import com.netflix.suro.sink.notice.NoNotice;
import com.netflix.suro.sink.notice.QueueNotice;
public class SuroSinkPlugin extends SuroPlugin {
@Override
protected void configure() {
this.addSinkType(LocalFileSink.TYPE, LocalFileSink.class);
this.addSinkType(SuroSink.TYPE, SuroSink.class);
this.addNoticeType(NoNotice.TYPE, NoNotice.class);
this.addNoticeType(QueueNotice.TYPE, QueueNotice.class);
}
}
| 1,366 |
0 |
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink/localfile/TestSequenceFileWriter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.StringSerDe;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class TestSequenceFileWriter {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
private static Injector injector = Guice.createInjector(
new SuroSinkPlugin(),
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
}
}
);
@Test
public void test() throws IOException {
String dir = tempDir.newFolder().getAbsolutePath();
String spec = "{\n" +
" \"type\": \"sequence\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
FileWriter writer = mapper.readValue(spec, new TypeReference<FileWriter>() {});
writer.open(dir);
assertEquals(writer.getLength(), 0);
writer.rotate(dir + "testfile0.suro");
for (int i = 0; i < 10; ++i) {
writer.writeTo(
new Message("routingKey", ("message0" + i).getBytes()));
}
System.out.println("length: " + writer.getLength());
assertEquals(writer.getLength(), 540);
writer.rotate(dir + "testfile1.suro");
assertEquals(writer.getLength(), 100); // empty sequence file length
assertEquals(checkFileContents(dir + "testfile0.suro", "message0"), 10);
writer.setDone(dir + "testfile0.suro", dir + "testfile0.done");
assertFalse(new File(dir + "testfile0.suro").exists());
checkFileContents(dir + "testfile0.done", "message0");
for (int i = 0; i < 10; ++i) {
writer.writeTo(
new Message("routingKey", ("message1" + i).getBytes()));
}
writer.close();
assertEquals(checkFileContents(dir + "testfile1.suro", "message1"), 10);
}
private int checkFileContents(String filePath, String message) throws IOException {
SequenceFile.Reader r = new SequenceFile.Reader(
FileSystem.get(new Configuration()),
new Path(filePath),
new Configuration());
Text routingKey = new Text();
MessageWritable value = new MessageWritable();
StringSerDe serde = new StringSerDe();
int i = 0;
while (r.next(routingKey, value)) {
assertEquals(routingKey.toString(), "routingKey");
assertEquals(serde.deserialize(value.getMessage().getPayload()), message + i);
++i;
}
r.close();
return i;
}
}
| 1,367 |
0 |
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink/localfile/TestTextFileWriter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.Message;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class TestTextFileWriter {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
private static Injector injector = Guice.createInjector(
new SuroSinkPlugin(),
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
}
}
);
@Test
public void test() throws IOException {
String dir = tempDir.newFolder().getAbsolutePath();
String spec = "{\n" +
" \"type\": \"text\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
FileWriter writer = mapper.readValue(spec, new TypeReference<FileWriter>() {});
writer.open(dir);
assertEquals(writer.getLength(), 0);
writer.rotate(dir + "testfile0.suro");
for (int i = 0; i < 10; ++i) {
writer.writeTo(
new Message("routingKey", ("message0" + i).getBytes()));
}
System.out.println("length: " + writer.getLength());
assertEquals(writer.getLength(), 100);
writer.rotate(dir + "testfile1.suro");
assertEquals(writer.getLength(), 0);
assertEquals(checkFileContents(dir + "testfile0.suro", "message0"), 10);
writer.setDone(dir + "testfile0.suro", dir + "testfile0.done");
assertFalse(new File(dir + "testfile0.suro").exists());
checkFileContents(dir + "testfile0.done", "message0");
for (int i = 0; i < 10; ++i) {
writer.writeTo(
new Message("routingKey", ("message1" + i).getBytes()));
}
writer.close();
assertEquals(checkFileContents(dir + "testfile1.suro", "message1"), 10);
}
@Test
public void testWithCodec() throws IOException, ClassNotFoundException {
String dir = tempDir.newFolder().getAbsolutePath();
String spec = "{\n" +
" \"type\": \"text\",\n" +
" \"codec\": \"org.apache.hadoop.io.compress.GzipCodec\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
FileWriter writer = mapper.readValue(spec, new TypeReference<FileWriter>() {});
writer.open(dir);
assertEquals(writer.getLength(), 0); // no files
writer.rotate(dir + "testfile0.suro");
for (int i = 0; i < 100000; ++i) {
writer.writeTo(
new Message("routingKey", ("message0" + i).getBytes()));
}
System.out.println("length: " + writer.getLength());
assertEquals(writer.getLength(), 232456); // compressed one
writer.rotate(dir + "testfile1.suro");
assertEquals(writer.getLength(), 10); // gzip compressed initial size
assertEquals(checkFileContentsWithGzip(dir + "testfile0.suro", "message0"), 100000);
writer.setDone(dir + "testfile0.suro", dir + "testfile0.done");
assertFalse(new File(dir + "testfile0.suro").exists());
assertEquals(checkFileContentsWithGzip(dir + "testfile0.done", "message0"), 100000);
for (int i = 0; i < 100000; ++i) {
writer.writeTo(
new Message("routingKey", ("message1" + i).getBytes()));
}
writer.close();
assertEquals(checkFileContentsWithGzip(dir + "testfile1.suro", "message1"), 100000);
}
private int checkFileContents(String filePath, String message) throws IOException {
BufferedReader br = new BufferedReader(new FileReader(filePath));
String line = null;
int i = 0;
while ((line = br.readLine()) != null) {
assertEquals(line, message + i);
++i;
}
br.close();
return i;
}
private int checkFileContentsWithGzip(String filePath, String message) throws IOException, ClassNotFoundException {
FileSystem fs = FileSystem.get(new Configuration());
FSDataInputStream input = fs.open(new Path(filePath));
CompressionCodec codec = FileWriterBase.createCodecInstance("org.apache.hadoop.io.compress.GzipCodec");
BufferedReader br = new BufferedReader(new InputStreamReader(codec.createInputStream(input)));
String line = null;
int i = 0;
while ((line = br.readLine()) != null) {
assertEquals(line, message + i);
++i;
}
br.close();
return i;
}
}
| 1,368 |
0 |
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink/localfile/TestLocalFileSink.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageSetReader;
import com.netflix.suro.message.StringMessage;
import com.netflix.suro.sink.Sink;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.*;
import java.util.HashSet;
import java.util.Set;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestLocalFileSink {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
private static Injector injector = Guice.createInjector(
new SuroSinkPlugin(),
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
}
}
);
@Test
public void testDefaultParameters() throws IOException {
String testdir = tempDir.newFolder().getAbsolutePath();
final String localFileSinkSpec = "{\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testdir + "\"\n" +
" }\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(localFileSinkSpec, new TypeReference<Sink>(){});
sink.open();
assertNull(sink.recvNotice());
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(10000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
System.out.println(sink.getStat());
int count = 0;
File dir = new File(testdir);
File[] files = dir.listFiles();
for (File file : files) {
assertTrue(file.getName().contains(".done"));
if (!file.getName().contains("crc")) {
BufferedReader br = new BufferedReader(new FileReader(file));
String line = null;
while ((line = br.readLine()) != null) {
assertTrue(line.contains("testMessage"));
++count;
}
br.close();
}
}
assertEquals(count, 10000);
}
@Test
public void testWithPeriodRotation() throws IOException, InterruptedException {
String testdir = tempDir.newFolder().getAbsolutePath();
final String localFileSinkSpec = "{\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testdir + "\",\n" +
" \"writer\": {\n" +
" \"type\": \"text\"\n" +
" },\n" +
" \"maxFileSize\": 100000000,\n" +
" \"minPercentFreeDisk\": 50,\n" +
" \"rotationPeriod\": \"PT5s\",\n" +
" \"notice\": {\n" +
" \"type\": \"queue\"\n" +
" }\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(localFileSinkSpec, new TypeReference<Sink>(){});
sink.open();
assertNull(sink.recvNotice());
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(100))) {
sink.writeTo(new StringMessage(m));
Thread.sleep(100);
}
sink.close();
int count = 0;
int fileCount = 0;
File dir = new File(testdir);
File[] files = dir.listFiles();
for (File file : files) {
assertTrue(file.getName().contains(".done"));
if (!file.getName().contains("crc")) {
++fileCount;
BufferedReader br = new BufferedReader(new FileReader(file));
String line = null;
while ((line = br.readLine()) != null) {
assertTrue(line.contains("testMessage"));
++count;
}
br.close();
}
}
assertEquals(count, 100);
assertTrue(fileCount > 1);
}
@Test
public void testSpaceChecker() throws Exception {
String testdir = tempDir.newFolder().getAbsolutePath();
final String localFileSinkSpec = "{\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testdir + "\",\n" +
" \"writer\": {\n" +
" \"type\": \"text\"\n" +
" },\n" +
" \"maxFileSize\": 10240,\n" +
" \"minPercentFreeDisk\": 50,\n" +
" \"rotationPeriod\": \"PT1m\",\n" +
" \"notice\": {\n" +
" \"type\": \"queue\"\n" +
" }\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
final LocalFileSink.SpaceChecker spaceChecker = mock(LocalFileSink.SpaceChecker.class);
when(spaceChecker.hasEnoughSpace()).thenReturn(false);
Sink sink = mapper.readValue(localFileSinkSpec, new TypeReference<Sink>(){});
sink.open();
Thread.sleep(1000); // wait until thread starts
assertNull(sink.recvNotice());
when(spaceChecker.hasEnoughSpace()).thenReturn(true);
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(10000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
int count = 0;
File dir = new File(testdir);
File[] files = dir.listFiles();
for (File file : files) {
assertTrue(file.getName().contains(".done"));
if (!file.getName().contains("crc")) {
BufferedReader br = new BufferedReader(new FileReader(file));
String line = null;
while ((line = br.readLine()) != null) {
assertTrue(line.contains("testMessage"));
++count;
}
br.close();
}
}
assertEquals(count, 10000);
}
@Test
public void testWithSizeRotation() throws IOException {
String testdir = tempDir.newFolder().getAbsolutePath();
final String localFileSinkSpec = "{\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testdir + "\",\n" +
" \"writer\": {\n" +
" \"type\": \"text\"\n" +
" },\n" +
" \"maxFileSize\": 10240,\n" +
" \"minPercentFreeDisk\": 50,\n" +
" \"rotationPeriod\": \"PT10m\",\n" +
" \"batchSize\": 1,\n" +
" \"notice\": {\n" +
" \"type\": \"queue\"\n" +
" }\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(localFileSinkSpec, new TypeReference<Sink>(){});
sink.open();
assertNull(sink.recvNotice());
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(10000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
int count = 0;
int errorCount = 0;
File dir = new File(testdir);
File[] files = dir.listFiles();
for (File file : files) {
System.out.println(file.getName());
assertTrue(file.getName().contains(".done"));
if (!file.getName().contains("crc")) {
if (file.length() > 12000) {
++errorCount;
// last file can be bigger due to flushing
}
BufferedReader br = new BufferedReader(new FileReader(file));
String line = null;
while ((line = br.readLine()) != null) {
assertTrue(line.contains("testMessage"));
++count;
}
br.close();
}
}
assertEquals(count, 10000);
assertTrue(errorCount <= 1);
}
@Test
public void rotateEmptyFile() throws IOException, InterruptedException {
String testdir = tempDir.newFolder().getAbsolutePath();
final String localFileSinkSpec = "{\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testdir + "\",\n" +
" \"writer\": {\n" +
" \"type\": \"text\"\n" +
" },\n" +
" \"maxFileSize\": 100000000,\n" +
" \"minPercentFreeDisk\": 50,\n" +
" \"rotationPeriod\": \"PT2s\",\n" +
" \"notice\": {\n" +
" \"type\": \"queue\"\n" +
" }\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(localFileSinkSpec, new TypeReference<Sink>(){});
sink.open();
assertNull(sink.recvNotice());
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(100))) {
sink.writeTo(new StringMessage(m));
Thread.sleep(100);
}
Thread.sleep(3000);
sink.close();
int count = 0;
int fileCount = 0;
File dir = new File(testdir);
File[] files = dir.listFiles();
for (File file : files) {
assertTrue(file.getName().contains(".done"));
if (!file.getName().contains("crc")) {
++fileCount;
BufferedReader br = new BufferedReader(new FileReader(file));
String line = null;
while ((line = br.readLine()) != null) {
assertTrue(line.contains("testMessage"));
++count;
}
br.close();
assertTrue(count > 0); // should not empty
}
}
assertEquals(count, 100);
assertTrue(fileCount > 1);
}
@Test
public void testCleanUp() throws IOException, InterruptedException {
String testdir = tempDir.newFolder().getAbsolutePath();
// create files
final int numFiles = 5;
Set<String> filePathSet = new HashSet<String>();
for (int i = 0; i < numFiles; ++i) {
String fileName = "testFile" + i + (i == numFiles - 1 ? LocalFileSink.suffix : LocalFileSink.done);
File f = new File(testdir, fileName);
f.createNewFile();
FileOutputStream o = new FileOutputStream(f);
o.write(100 /*any data*/);
o.close();
if (i != numFiles - 1) {
filePathSet.add(f.getAbsolutePath());
}
}
final String localFileSinkSpec = "{\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"rotationPeriod\": \"PT1m\",\n" +
" \"outputDir\": \"" + testdir + "\"\n" +
" }\n" +
"}";
Thread.sleep(3000); // wait until .suro file is expired
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
LocalFileSink sink = (LocalFileSink)mapper.readValue(
localFileSinkSpec,
new TypeReference<Sink>(){});
assertEquals(sink.cleanUp(testdir, false), numFiles -1); // due to empty file wouldn't be clean up
Set<String> filePathSetResult = new HashSet<String>();
for (int i = 0; i < numFiles - 1; ++i) {
filePathSetResult.add(sink.recvNotice());
}
assertEquals(filePathSet, filePathSetResult);
assertEquals(sink.cleanUp(testdir, true), numFiles);
}
@Test
public void testGetFileExt() {
assertEquals(LocalFileSink.getFileExt("abc.done"), ".done");
assertNull(LocalFileSink.getFileExt("abcdone"));
assertNull(LocalFileSink.getFileExt("abcdone."));
}
}
| 1,369 |
0 |
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/test/java/com/netflix/suro/sink/localfile/TestFileNameFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import org.junit.Test;
import static junit.framework.TestCase.assertEquals;
public class TestFileNameFormatter {
@Test
public void test() {
String name = FileNameFormatter.get("/dir/");
System.out.println(name);
assertEquals(name.indexOf("-"), -1);
assertEquals(name.indexOf(":"), -1);
}
}
| 1,370 |
0 |
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink/localfile/FileWriterBase.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import java.io.DataOutputStream;
import java.io.IOException;
/**
* The base class for both {@link SequenceFileWriter} and {@link TextFileWriter}.
*
* @author jbae
*/
public class FileWriterBase {
private final FileSystem fs;
private final CompressionCodec codec;
private final Configuration conf;
public FileWriterBase(String codecClass, Logger log, Configuration conf) {
this.conf = conf;
try {
fs = FileSystem.getLocal(conf);
fs.setVerifyChecksum(false);
if (codecClass != null) {
codec = createCodecInstance(codecClass);
log.info("Codec:" + codec.getDefaultExtension());
} else {
codec = null;
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Implementation of {@link FileWriter#setDone(String, String)}
*
* @param oldName
* @param newName
* @throws java.io.IOException
*/
public void setDone(String oldName, String newName) throws IOException {
Path oldPath = new Path(oldName);
fs.rename(oldPath, new Path(newName));
}
public FileSystem getFS() {
return fs;
}
public void createOutputDir(String outputDir) throws IOException {
Path pLocalOutputDir = new Path(outputDir);
if (!fs.exists(pLocalOutputDir)) {
boolean exist = fs.mkdirs(pLocalOutputDir);
if (!exist) {
throw new RuntimeException("Cannot create local dataSink dir: " + outputDir);
}
} else {
FileStatus fsLocalOutputDir = fs.getFileStatus(pLocalOutputDir);
if (!fsLocalOutputDir.isDir()) {
throw new RuntimeException("local dataSink dir is not a directory: " + outputDir);
}
}
}
/**
*
* @param codecClass codec class path
* @return
* @throws ClassNotFoundException
*/
public static CompressionCodec createCodecInstance(String codecClass) throws ClassNotFoundException {
Class<?> classDefinition = Class.forName(codecClass);
return (CompressionCodec) ReflectionUtils.newInstance(classDefinition, new Configuration());
}
/**
* Create a new sequence file
*
* @param newPath
* @return
* @throws java.io.IOException
*/
public SequenceFile.Writer createSequenceFile(String newPath) throws IOException {
if (codec != null) {
return SequenceFile.createWriter(
fs, conf, new Path(newPath),
Text.class, MessageWritable.class,
SequenceFile.CompressionType.BLOCK, codec);
} else {
return SequenceFile.createWriter(
fs, conf, new Path(newPath),
Text.class, MessageWritable.class,
SequenceFile.CompressionType.NONE, codec);
}
}
/**
* Create a new FSDataOutputStream from path
*
* @param path
* @return
* @throws java.io.IOException
*/
public FSDataOutputStream createFSDataOutputStream(String path) throws IOException {
return fs.create(new Path(path), false);
}
/**
* Create a DataOutputStream from FSDataOutputStream. If the codec is available,
* it will create compressed DataOutputStream, otherwise, it will return itself.
*
* @param outputStream
* @return
* @throws java.io.IOException
*/
public DataOutputStream createDataOutputStream(FSDataOutputStream outputStream) throws IOException {
if (codec != null) {
return new FSDataOutputStream(codec.createOutputStream(outputStream), null);
} else {
return outputStream;
}
}
}
| 1,371 |
0 |
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink/localfile/SequenceFileWriter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.suro.message.Message;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
public class SequenceFileWriter implements FileWriter {
public static final String TYPE = "sequence";
static Logger log = LoggerFactory.getLogger(SequenceFileWriter.class);
private final FileWriterBase base;
private SequenceFile.Writer seqFileWriter;
private Text routingKey = new Text();
@JsonCreator
public SequenceFileWriter(@JsonProperty("codec") String codec) {
base = new FileWriterBase(codec, log, new Configuration());
}
@Override
public void open(String outputDir) throws IOException {
base.createOutputDir(outputDir);
}
@Override
public long getLength() {
if (seqFileWriter != null) {
try {
return seqFileWriter.getLength();
} catch (IOException e) {
log.error("IOException while getLength: " + e.getMessage());
return -1;
}
} else {
return 0;
}
}
@Override
public void writeTo(Message message) throws IOException {
routingKey.set(message.getRoutingKey());
seqFileWriter.append(routingKey, new MessageWritable(message));
}
@Override
public void rotate(String newPath) throws IOException {
if (seqFileWriter != null) {
seqFileWriter.close();
}
seqFileWriter = base.createSequenceFile(newPath);
}
@Override
public FileSystem getFS() {
return base.getFS();
}
@Override
public void close() throws IOException {
if (seqFileWriter != null) {
seqFileWriter.close();
}
}
@Override
public void setDone(String oldName, String newName) throws IOException {
base.setDone(oldName, newName);
}
@Override
public void sync() throws IOException {
seqFileWriter.sync();
}
}
| 1,372 |
0 |
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink/localfile/FileWriter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import com.fasterxml.jackson.annotation.JsonSubTypes;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.netflix.suro.message.Message;
import org.apache.hadoop.fs.FileSystem;
import java.io.IOException;
/**
* {@link LocalFileSink} is using Hadoop file IO module. For the text file, it's
* using <a href=http://hadoop.apache.org/docs/r1.0.4/api/org/apache/hadoop/fs/FSDataOutputStream.html>FSDataOutputStream</a>
* and for the binary formatted file, it's using
* <a href=http://hadoop.apache.org/docs/r1.0.4/api/org/apache/hadoop/io/SequenceFile.html>SequenceFile</a>.
*
* @author jbae
*/
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
@JsonSubTypes(value = {
@JsonSubTypes.Type(name = SequenceFileWriter.TYPE, value = SequenceFileWriter.class),
@JsonSubTypes.Type(name = TextFileWriter.TYPE, value = TextFileWriter.class)
})
public interface FileWriter {
/**
* Open the file under outputDir with the file name formatted by
* {@link FileNameFormatter}
*
* @param outputDir the directory where the file is located
* @throws java.io.IOException
*/
void open(String outputDir) throws IOException;
/**
* @return the file length
* @throws java.io.IOException
*/
long getLength() throws IOException;
void writeTo(Message message) throws IOException;
/**
* Flush all data to the disk
*
* @throws java.io.IOException
*/
void sync() throws IOException;
/**
* Close the current file, create and open the new file.
*
* @param newPath The path that points to the newly rotated file.
* @throws java.io.IOException
*/
void rotate(String newPath) throws IOException;
FileSystem getFS();
void close() throws IOException;
/**
* Marks the file as done. When the file is marked as done,
* it can be processed further such as uploading it to S3.
*
* @param oldName The name of the file when it is not done.
* @param newName The new name of the file after is is marked as done.
* @throws java.io.IOException
*/
void setDone(String oldName, String newName) throws IOException;
}
| 1,373 |
0 |
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink/localfile/TextFileWriter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.SerDe;
import com.netflix.suro.message.StringSerDe;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
public class TextFileWriter implements FileWriter {
static Logger log = LoggerFactory.getLogger(TextFileWriter.class);
public static final String TYPE = "text";
private static final byte[] newline;
private static final String utf8 = "UTF-8";
static {
try {
newline = "\n".getBytes(utf8);
} catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException("can't find " + utf8 + " encoding");
}
}
private final FileWriterBase base;
private FSDataOutputStream fsOutputStream;
private DataOutputStream outputStream;
private final SerDe<String> serde = new StringSerDe();
@JsonCreator
public TextFileWriter(@JsonProperty("codec") String codecClass) {
base = new FileWriterBase(codecClass, log, new Configuration());
}
@Override
public void open(String outputDir) throws IOException {
base.createOutputDir(outputDir);
}
@Override
public long getLength() throws IOException {
if (fsOutputStream != null) {
return fsOutputStream.getPos();
} else {
return 0;
}
}
@Override
public void writeTo(Message message) throws IOException {
String strMessage = serde.deserialize(message.getPayload());
outputStream.write(strMessage.getBytes());
outputStream.write(newline);
}
@Override
public void rotate(String newPath) throws IOException {
close();
fsOutputStream = base.createFSDataOutputStream(newPath);
outputStream = base.createDataOutputStream(fsOutputStream);
}
@Override
public FileSystem getFS() {
return base.getFS();
}
@Override
public void close() throws IOException {
if (outputStream != null) {
outputStream.close();
fsOutputStream.close();
}
}
@Override
public void setDone(String oldName, String newName) throws IOException {
base.setDone(oldName, newName);
}
@Override
public void sync() throws IOException {
outputStream.flush();
fsOutputStream.sync();
}
}
| 1,374 |
0 |
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink/localfile/SequenceFileViewer.java
|
package com.netflix.suro.sink.localfile;
import com.netflix.suro.message.serde.SerDeFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import java.io.IOException;
/**
* Simple tool viewing sequence file
*
* @author jbae
*/
public class SequenceFileViewer {
public static void main(String[] args) throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(args[0]), conf);
Text routingKey = new Text();
MessageWritable message = new MessageWritable();
while (r.next(routingKey, message)) {
System.out.println("###routing key: " + routingKey);
System.out.println(SerDeFactory.create(args[1]).deserialize(message.getMessage().getPayload()));
}
r.close();
}
}
| 1,375 |
0 |
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink/localfile/LocalFileSink.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.TagKey;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.queue.MemoryQueue4Sink;
import com.netflix.suro.queue.MessageQueue4Sink;
import com.netflix.suro.sink.QueuedSink;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.notice.Notice;
import com.netflix.suro.sink.notice.QueueNotice;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.joda.time.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
/**
* LocalFileSink appends messages to the file in local file system and rotates
* the file when the file size reaches to the threshold or in the regular basis
* whenever it comes earlier. When {@link com.netflix.suro.sink.localfile.LocalFileSink.SpaceChecker} checks not enough disk
* space, it triggers pause not to take the traffic anymore.
*
* @author jbae
*/
public class LocalFileSink extends QueuedSink implements Sink {
private static final Logger log = LoggerFactory.getLogger(LocalFileSink.class);
public static final String EMPTY_ROUTING_KEY_REPLACEMENT = "_empty_routing_key";
public static final String TYPE = "local";
public static final String suffix = ".suro";
public static final String done = ".done";
private final String outputDir;
private final FileWriter writer;
private final long maxFileSize;
private final Period rotationPeriod;
private final int minPercentFreeDisk;
private final Notice<String> notice;
private SpaceChecker spaceChecker;
private String filePath;
private long nextRotation;
private long writtenMessages;
private long writtenBytes;
private long errorClosedFiles;
private long emptyRoutingKeyCount;
private boolean messageWrittenInRotation = false;
@JsonCreator
public LocalFileSink(
@JsonProperty("outputDir") String outputDir,
@JsonProperty("writer") FileWriter writer,
@JsonProperty("notice") Notice notice,
@JsonProperty("maxFileSize") long maxFileSize,
@JsonProperty("rotationPeriod") String rotationPeriod,
@JsonProperty("minPercentFreeDisk") int minPercentFreeDisk,
@JsonProperty("queue4Sink") MessageQueue4Sink queue4Sink,
@JsonProperty("batchSize") int batchSize,
@JsonProperty("batchTimeout") int batchTimeout,
@JsonProperty("pauseOnLongQueue") boolean pauseOnLongQueue,
@JacksonInject SpaceChecker spaceChecker) {
if (!outputDir.endsWith("/")) {
outputDir += "/";
}
Preconditions.checkNotNull(outputDir, "outputDir is needed");
this.outputDir = outputDir;
this.writer = writer == null ? new TextFileWriter(null) : writer;
this.maxFileSize = maxFileSize == 0 ? 200 * 1024 * 1024 : maxFileSize;
this.rotationPeriod = new Period(rotationPeriod == null ? "PT2m" : rotationPeriod);
this.minPercentFreeDisk = minPercentFreeDisk == 0 ? 15 : minPercentFreeDisk;
this.notice = notice == null ? new QueueNotice<String>() : notice;
this.spaceChecker = spaceChecker;
Monitors.registerObject(outputDir.replace('/', '_'), this);
initialize("localfile_" + outputDir.replace('/', '_'),
queue4Sink == null ? new MemoryQueue4Sink(10000) : queue4Sink,
batchSize,
batchTimeout,
pauseOnLongQueue);
}
public String getOutputDir() {
return outputDir;
}
@Override
public void open() {
try {
if (spaceChecker == null) {
spaceChecker = new SpaceChecker(minPercentFreeDisk, outputDir);
}
notice.init();
writer.open(outputDir);
setName(LocalFileSink.class.getSimpleName() + "-" + outputDir);
start();
} catch (Throwable e) {
throw new RuntimeException(e);
}
}
@Override
public void writeTo(MessageContainer message) {
enqueue(message.getMessage());
}
public static class SpaceChecker {
private final int minPercentFreeDisk;
private final File outputDir;
@Monitor(name = "freeSpace", type = DataSourceType.GAUGE)
private long freeSpace;
/**
* When the disk free space percentage becomes less than minPercentFreeDisk
* we should stop taking the traffic.
*
* @param minPercentFreeDisk minimum percentage of free space
* @param outputDir
*/
public SpaceChecker(int minPercentFreeDisk, String outputDir) {
this.minPercentFreeDisk = minPercentFreeDisk;
this.outputDir = new File(outputDir);
Monitors.registerObject(this);
}
/**
*
* @return true when the local disk on output directory has enough space, otherwise false
*/
public boolean hasEnoughSpace() {
long totalSpace = outputDir.getTotalSpace();
freeSpace = outputDir.getFreeSpace();
long minFreeAvailable = (totalSpace * minPercentFreeDisk) / 100;
return freeSpace >= minFreeAvailable;
}
}
private long pause;
private void rotate() throws IOException {
String newName = FileNameFormatter.get(outputDir) + suffix;
writer.rotate(newName);
renameAndNotify(filePath);
filePath = newName;
nextRotation = new DateTime().plus(rotationPeriod).getMillis();
if (!spaceChecker.hasEnoughSpace()) {
pause = rotationPeriod.toStandardDuration().getMillis();
} else {
pause = 0;
}
}
@Override
public long checkPause() {
return super.checkPause() + pause;
}
/**
* Before polling messages from the queue, it should check whether to rotate
* the file and start to write to new file.
*
* @throws java.io.IOException
*/
@Override
protected void beforePolling() throws IOException {
// Don't rotate if we are not running
if (isRunning &&
(writer.getLength() > maxFileSize ||
System.currentTimeMillis() > nextRotation)) {
rotate();
}
}
/**
* Write all messages in msgList to file writer, sync the file,
* commit the queue and clear messages
*
* @param msgList
* @throws java.io.IOException
*/
@Override
protected void write(List<Message> msgList) throws IOException {
for (Message msg : msgList) {
writer.writeTo(msg);
String routingKey = normalizeRoutingKey(msg);
DynamicCounter.increment(
MonitorConfig.builder("writtenMessages")
.withTag(TagKey.DATA_SOURCE, routingKey)
.build());
++writtenMessages;
DynamicCounter.increment(
MonitorConfig.builder("writtenBytes")
.withTag(TagKey.DATA_SOURCE, routingKey)
.build(), msg.getPayload().length);
writtenBytes += msg.getPayload().length;
messageWrittenInRotation = true;
}
writer.sync();
throughput.increment(msgList.size());
}
private String normalizeRoutingKey(Message msg) {
String routingKey = msg.getRoutingKey();
if(routingKey == null || routingKey.trim().length() == 0) {
emptyRoutingKeyCount += 1;
DynamicCounter.increment("emptyRoutingKeyCount");
if(log.isDebugEnabled()) {
log.debug("Message {} with empty routing key", Arrays.asList(msg.getPayload()));
}
return EMPTY_ROUTING_KEY_REPLACEMENT;
}
return routingKey;
}
@Override
protected void innerClose() throws IOException {
writer.close();
renameAndNotify(filePath);
}
@Override
public String recvNotice() {
return notice.recv();
}
private void renameAndNotify(String filePath) throws IOException {
if (filePath != null) {
if (messageWrittenInRotation) {
// if we have the previous file
String doneFile = filePath.replace(suffix, done);
writer.setDone(filePath, doneFile);
notice.send(doneFile);
} else {
// delete it
deleteFile(filePath);
}
}
messageWrittenInRotation = false;
}
/**
* This method calls {@link #cleanUp(String, boolean)} with outputDir
*
* @return
*/
public int cleanUp(boolean fetchAll) {
return cleanUp(outputDir, fetchAll);
}
/**
* List all files under the directory. If the file is marked as done, the
* notice for that file would be sent. Otherwise, it checks the file
* is not closed properly, the file is marked as done and the notice
* would be sent. That file would cause EOFException when reading.
*
* @param dir
* @return the number of files found in the directory
*/
public int cleanUp(String dir, boolean fetchAll) {
if (!dir.endsWith("/")) {
dir += "/";
}
int count = 0;
try {
FileSystem fs = writer.getFS();
FileStatus[] files = fs.listStatus(new Path(dir));
for (FileStatus file: files) {
if (file.getLen() > 0) {
String fileName = file.getPath().getName();
String fileExt = getFileExt(fileName);
if (fileExt != null && fileExt.equals(done)) {
notice.send(dir + fileName);
++count;
} else if (fileExt != null) {
long lastPeriod =
new DateTime().minus(rotationPeriod).minus(rotationPeriod).getMillis();
if (file.getModificationTime() < lastPeriod) {
++errorClosedFiles;
DynamicCounter.increment("closedFileError");
log.error(dir + fileName + " is not closed properly!!!");
String doneFile = fileName.replace(fileExt, done);
writer.setDone(dir + fileName, dir + doneFile);
notice.send(dir + doneFile);
++count;
} else if (fetchAll) {
++count;
}
}
}
}
} catch (Exception e) {
log.error("Exception while on cleanUp: " + e.getMessage(), e);
return Integer.MAX_VALUE; // return non-zero value
}
return count;
}
/**
* Simply returns file extension from the file path
*
* @param fileName
* @return
*/
public static String getFileExt(String fileName) {
int dotPos = fileName.lastIndexOf('.');
if (dotPos != -1 && dotPos != fileName.length() - 1) {
return fileName.substring(dotPos);
} else {
return null;
}
}
@Override
public String getStat() {
return String.format(
"%d msgs, %s written, %s have empty routing key. %s failures of closing files",
writtenMessages,
FileUtils.byteCountToDisplaySize(writtenBytes),
emptyRoutingKeyCount,
errorClosedFiles
);
}
private static final int deleteFileRetryCount = 5;
/**
* With AWS EBS, sometimes deletion failure without any IOException was
* observed To prevent the surplus files, let's iterate file deletion.
* By default, it will try for five times.
*
* @param filePath
*/
public void deleteFile(String filePath) {
int retryCount = 1;
while (retryCount <= deleteFileRetryCount) {
try {
if (writer.getFS().exists(new Path(filePath))) {
Thread.sleep(1000 * retryCount);
writer.getFS().delete(new Path(filePath), false);
++retryCount;
} else {
break;
}
} catch (Exception e) {
log.warn("Exception while deleting the file: " + e.getMessage(), e);
}
}
}
}
| 1,376 |
0 |
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink/localfile/FileNameFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.localfile;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.rmi.server.UID;
/**
* File name formatter used in LocalFileSink
* File name is formated as [PyyyyMMDDtHHmmss][hostname][random UID]
*
* @author jbae
*/
public class FileNameFormatter {
public static String localHostAddr;
static {
try {
localHostAddr = InetAddress.getLocalHost().getHostName() ;
} catch (UnknownHostException e) {
localHostAddr = "UNKONWN_HOST";
}
}
private static DateTimeFormatter fmt = DateTimeFormat.forPattern("'P'yyyyMMdd'T'HHmmss");
/**
*
* @param dir directory path where the files are written
* @return full file path with the directory suffixed
*/
public static String get(String dir) {
StringBuilder sb = new StringBuilder(dir);
if (!dir.endsWith("/")) {
sb.append('/');
}
sb.append(fmt.print(new DateTime()))
.append(localHostAddr)
.append(new UID().toString());
return sb.toString().replaceAll("[-:]", "");
}
}
| 1,377 |
0 |
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-localfile/src/main/java/com/netflix/suro/sink/localfile/MessageWritable.java
|
package com.netflix.suro.sink.localfile;
import com.netflix.suro.message.Message;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class MessageWritable implements Writable {
private final Message m;
public MessageWritable(Message m) {
this.m = m;
}
public MessageWritable() {
m = new Message();
}
@Override
public void write(DataOutput out) throws IOException {
m.write(out);
}
@Override
public void readFields(DataInput in) throws IOException {
m.readFields(in);
}
public Message getMessage() { return m; }
}
| 1,378 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/TestUtils.java
|
package com.netflix.suro;
import java.io.IOException;
import java.net.ServerSocket;
public class TestUtils {
public static int pickPort() throws IOException {
ServerSocket socket = new ServerSocket(0);
int port = socket.getLocalPort();
socket.close();
return port;
}
}
| 1,379 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/TestSuroService.java
|
package com.netflix.suro;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.google.inject.Injector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.suro.input.DynamicPropertyInputConfigurator;
import com.netflix.suro.input.SuroInput;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.routing.DynamicPropertyRoutingMapConfigurator;
import com.netflix.suro.sink.DynamicPropertySinkConfigurator;
import com.netflix.suro.sink.Sink;
import org.junit.Test;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import static org.junit.Assert.assertTrue;
public class TestSuroService {
private static final AtomicBoolean sinkOpened = new AtomicBoolean(false);
private static final AtomicBoolean inputOpened = new AtomicBoolean(false);
private static final CountDownLatch latch = new CountDownLatch(2);
public static class TestSuroServiceSink implements Sink {
@JsonCreator
public TestSuroServiceSink() {}
@Override
public void writeTo(MessageContainer message) {
}
@Override
public void open() {
latch.countDown();
sinkOpened.set(true);
}
@Override
public void close() {
}
@Override
public String recvNotice() {
return null;
}
@Override
public String getStat() {
return null;
}
@Override
public long getNumOfPendingMessages() {
return 0;
}
@Override
public long checkPause() {
return 0;
}
}
public static class TestSuroServiceInput implements SuroInput {
@JsonCreator
public TestSuroServiceInput() {}
@Override
public String getId() {
return "testsuroservice";
}
@Override
public void start() throws Exception {
latch.countDown();
assertTrue(sinkOpened.get());
inputOpened.set(true);
}
@Override
public void shutdown() {
}
@Override
public void setPause(long ms) {
}
}
@Test
public void test() throws Exception {
AtomicReference<Injector> injector = new AtomicReference<Injector>();
Properties properties = new Properties();
properties.setProperty(DynamicPropertyRoutingMapConfigurator.ROUTING_MAP_PROPERTY, "{}");
properties.setProperty(DynamicPropertySinkConfigurator.SINK_PROPERTY,
"{\n" +
" \"default\": {\n" +
" \"type\": \"testsuroservice\"\n" +
" }\n" +
" }\n" +
"}");
properties.setProperty(DynamicPropertyInputConfigurator.INPUT_CONFIG_PROPERTY,
"[\n" +
" {\n" +
" \"type\": \"testsuroservice\"\n" +
" }\n" +
"]");
SuroServer.create(injector, properties, new SuroPlugin() {
@Override
protected void configure() {
addInputType("testsuroservice", TestSuroServiceInput.class);
addSinkType("testsuroservice", TestSuroServiceSink.class);
}
});
injector.get().getInstance(LifecycleManager.class).start();
latch.await(5000, TimeUnit.MILLISECONDS);
assertTrue(inputOpened.get());
assertTrue(sinkOpened.get());
}
}
| 1,380 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/input/TestInputManager.java
|
package com.netflix.suro.input;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.suro.TestUtils;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.sink.kafka.KafkaServerExternalResource;
import com.netflix.suro.sink.kafka.ZkExternalResource;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import java.util.List;
import java.util.Properties;
import static org.junit.Assert.assertNotNull;
public class TestInputManager {
public static ZkExternalResource zk = new ZkExternalResource();
public static KafkaServerExternalResource kafkaServer = new KafkaServerExternalResource(zk);
@ClassRule
public static TestRule chain = RuleChain
.outerRule(zk)
.around(kafkaServer);
private String inputConfig = "[\n" +
" {\n" +
" \"type\": \"thrift\"\n" +
" },\n" +
" {\n" +
" \"type\": \"kafka\",\n" +
" \"topic\": \"kafka_topic\",\n" +
" \"consumerProps\": {\n" +
" \"group.id\": \"kafka1\",\n" +
" \"zookeeper.connect\":\"" + zk.getConnectionString() + "\",\n" +
" \"consumer.timeout.ms\": \"1000\"\n" +
" }\n" +
" }\n" +
"]";
private String addInputConfig = "[\n" +
" {\n" +
" \"type\": \"thrift\"\n" +
" },\n" +
" {\n" +
" \"type\": \"kafka\",\n" +
" \"topic\": \"kafka_topic\",\n" +
" \"consumerProps\": {\n" +
" \"group.id\": \"kafka1\",\n" +
" \"zookeeper.connect\":\"" + zk.getConnectionString() + "\",\n" +
" \"consumer.timeout.ms\": \"1000\"\n" +
" }\n" +
" },\n" +
" {\n" +
" \"type\": \"kafka\",\n" +
" \"topic\": \"kafka_topic\",\n" +
" \"consumerProps\": {\n" +
" \"group.id\": \"kafka2\",\n" +
" \"zookeeper.connect\":\"" + zk.getConnectionString() + "\",\n" +
" \"consumer.timeout.ms\": \"1000\"\n" +
" }\n" +
" }\n" +
"]";
@Test
public void test() throws Exception {
int statusPort = TestUtils.pickPort();
int serverPort = TestUtils.pickPort();
final Properties props = new Properties();
props.put("SuroServer.statusServerPort", Integer.toString(statusPort));
props.put("SuroServer.port", Integer.toString(serverPort));
Injector injector = LifecycleInjector.builder().withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
}
}).withModules(
new SuroInputPlugin(),
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
}
}
).build().createInjector();
LifecycleManager lifecycleManager = injector.getInstance(LifecycleManager.class);
lifecycleManager.start();
InputManager inputManager = new InputManager();
List<SuroInput> inputList = injector.getInstance(ObjectMapper.class).readValue(
inputConfig,
new TypeReference<List<SuroInput>>() {
});
inputManager.set(inputList);
assertNotNull(inputManager.getInput("thrift"));
assertNotNull(inputManager.getInput("kafka_topic-kafka1"));
inputList = injector.getInstance(ObjectMapper.class).readValue(
addInputConfig,
new TypeReference<List<SuroInput>>() {
});
inputManager.set(inputList);
assertNotNull(inputManager.getInput("thrift"));
assertNotNull(inputManager.getInput("kafka_topic-kafka1"));
assertNotNull(inputManager.getInput("kafka_topic-kafka2"));
inputList = injector.getInstance(ObjectMapper.class).readValue(
inputConfig,
new TypeReference<List<SuroInput>>() {
});
inputManager.set(inputList);
assertNotNull(inputManager.getInput("thrift"));
assertNotNull(inputManager.getInput("kafka_topic-kafka1"));
}
}
| 1,381 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/input
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/input/thrift/TestMessageSetProcessor.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input.thrift;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.input.thrift.MessageSetProcessor;
import com.netflix.suro.input.thrift.ServerConfig;
import com.netflix.suro.thrift.ResultCode;
import com.netflix.suro.thrift.ServiceStatus;
import com.netflix.suro.thrift.TMessageSet;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
public class TestMessageSetProcessor {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
private Injector injector;
@Test
public void testMemoryQueue() throws Exception {
final Properties props = new Properties();
testQueue(props);
}
@Test
public void testFileQueue() throws Exception {
final Properties props = new Properties();
props.setProperty(ServerConfig.QUEUE_TYPE, "file");
props.setProperty(ServerConfig.FILEQUEUE_PATH, tempDir.newFolder().getAbsolutePath());
testQueue(props);
}
private void testQueue(final Properties props) throws Exception {
injector = LifecycleInjector.builder()
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(props));
}
}).build().createInjector();
injector.getInstance(LifecycleManager.class).start();
MessageSetProcessor queue = injector.getInstance(MessageSetProcessor.class);
assertEquals(queue.getQueueSize(), 0);
assertEquals(queue.getStatus(), ServiceStatus.ALIVE);
TMessageSet messageSet = TestConnectionPool.createMessageSet(100);
assertEquals(queue.process(messageSet).getResultCode(), ResultCode.OK);
assertEquals(queue.getQueueSize(), 1);
assertEquals(queue.poll(1, TimeUnit.MILLISECONDS), messageSet);
assertEquals(queue.getQueueSize(), 0);
queue.stopTakingTraffic();
assertEquals(queue.process(messageSet).getResultCode(), ResultCode.OTHER_ERROR);
queue.startTakingTraffic();
assertEquals(queue.getStatus(), ServiceStatus.ALIVE);
assertEquals(queue.process(messageSet).getResultCode(), ResultCode.OK);
injector.getInstance(LifecycleManager.class).close();
}
}
| 1,382 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/input
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/input/thrift/TestMessageSetSerDe.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.input.thrift;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.input.thrift.MessageSetSerDe;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageSetReader;
import com.netflix.suro.thrift.TMessageSet;
import org.junit.Test;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestMessageSetSerDe {
@Test
public void test() {
TMessageSet messageSet = TestConnectionPool.createMessageSet(100);
MessageSetSerDe serde = new MessageSetSerDe();
byte[] payload = serde.serialize(messageSet);
TMessageSet d = serde.deserialize(payload);
assertTrue(Arrays.equals(d.getMessages(), messageSet.getMessages()));
List<Message> messageList = new LinkedList<Message>();
for (Message m : new MessageSetReader(messageSet)) {
messageList.add(m);
}
List<Message> dMessasgeList = new LinkedList<Message>();
for (Message m : new MessageSetReader(d)) {
dMessasgeList.add(m);
}
assertEquals(messageList, dMessasgeList);
}
}
| 1,383 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/sink/TestSinkManager.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Injector;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.MessageContainer;
import org.junit.Test;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.*;
public class TestSinkManager {
private Map<String, Sink> getSinkMap(ObjectMapper mapper, String desc) throws Exception {
return mapper.<Map<String, Sink>>readValue(
desc,
new TypeReference<Map<String, Sink>>() {});
}
public static class TestSink implements Sink {
public static final String TYPE = "TestSink";
private final String message;
private String status;
private static AtomicInteger numOfSink = new AtomicInteger();
public static int getNumOfSinks() {
return numOfSink.get();
}
@JsonCreator
public TestSink(@JsonProperty("message") String message) {
this.message = message;
}
@Override
public void writeTo(MessageContainer message) {}
@Override
public void open() {
status = "open";
numOfSink.incrementAndGet();
}
@Override
public void close() {
status = "closed";
numOfSink.decrementAndGet();
}
@Override
public String recvNotice() {
return null;
}
@Override
public String getStat() {
return message + " " + status;
}
@Override
public long getNumOfPendingMessages() {
return 0;
}
@Override
public long checkPause() {
return 0;
}
}
@Test
public void test() throws Exception {
Injector injector = LifecycleInjector.builder().withModules(
new SuroPlugin() {
@Override
protected void configure() {
this.addSinkType("TestSink", TestSink.class);
}
},
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
}
}
).build().createInjector();
LifecycleManager lifecycleManager = injector.getInstance(LifecycleManager.class);
lifecycleManager.start();
String desc = "{\n" +
" \"default\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"defaultTestSink\"\n" +
" },\n" +
" \"topic1\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"topic1TestSink\"\n" +
" }\n" +
"}";
SinkManager sinkManager = injector.getInstance(SinkManager.class);
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
sinkManager.set(getSinkMap(mapper, desc));
assertEquals(sinkManager.getSink("topic1").getStat(), "topic1TestSink open");
assertEquals(sinkManager.getSink("default").getStat(), "defaultTestSink open");
assertEquals(sinkManager.getSink("topic7").getStat(), "defaultTestSink open");
assertTrue(
sinkManager.reportSinkStat().equals("default:defaultTestSink open\n\ntopic1:topic1TestSink open\n\n") ||
sinkManager.reportSinkStat().equals("topic1:topic1TestSink open\n\ndefault:defaultTestSink open\n\n"));
assertEquals(TestSink.getNumOfSinks(), 2);
// change desc - test removal
desc = "{\n" +
" \"default\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"defaultTestSink\"\n" +
" }\n" +
"}";
sinkManager.set(getSinkMap(mapper, desc));
assertEquals(sinkManager.getSink("topic1").getStat(), "defaultTestSink open");
assertEquals(sinkManager.getSink("default").getStat(), "defaultTestSink open");
assertEquals(sinkManager.reportSinkStat(),
String.format("default:defaultTestSink open\n\n"));
assertEquals(TestSink.getNumOfSinks(), 1);
// change desc - test addition
desc = "{\n" +
" \"default\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"defaultTestSink\"\n" +
" },\n" +
" \"topic2\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"topic2TestSink\"\n" +
" }\n" +
"}";
sinkManager.set(getSinkMap(mapper, desc));
assertEquals(sinkManager.getSink("topic1").getStat(), "defaultTestSink open");
assertEquals(sinkManager.getSink("default").getStat(), "defaultTestSink open");
assertEquals(sinkManager.getSink("topic2").getStat(), "topic2TestSink open");
assertTrue(
sinkManager.reportSinkStat().equals("default:defaultTestSink open\n\ntopic2:topic2TestSink open\n\n") ||
sinkManager.reportSinkStat().equals("topic2:topic2TestSink open\n\ndefault:defaultTestSink open\n\n")
);
assertEquals(TestSink.getNumOfSinks(), 2);
// test exception - nothing changed
desc = "{\n" +
" \"default\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"defaultTestSink\"\n" +
" },\n" +
" \"topic2\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"topic2TestSink\"\n" +
" }\n" +
"},";
sinkManager.set(getSinkMap(mapper, desc));
assertEquals(sinkManager.getSink("topic1").getStat(), "defaultTestSink open");
assertEquals(sinkManager.getSink("default").getStat(), "defaultTestSink open");
assertEquals(sinkManager.getSink("topic2").getStat(), "topic2TestSink open");
assertTrue(
sinkManager.reportSinkStat().equals("default:defaultTestSink open\n\ntopic2:topic2TestSink open\n\n") ||
sinkManager.reportSinkStat().equals("topic2:topic2TestSink open\n\ndefault:defaultTestSink open\n\n")
);
assertEquals(TestSink.getNumOfSinks(), 2);
// test destroy
lifecycleManager.close();
assertEquals(TestSink.getNumOfSinks(), 0);
assertNull(sinkManager.getSink("any"));
}
}
| 1,384 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/sink/notice/TestNotice.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.notice;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.sqs.AmazonSQSClient;
import com.amazonaws.services.sqs.model.*;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Charsets;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Provider;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.aws.PropertyAWSCredentialsProvider;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.sink.TestSinkManager;
import org.apache.commons.codec.binary.Base64;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import java.io.IOException;
import java.util.Arrays;
import static junit.framework.Assert.assertNull;
import static junit.framework.TestCase.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
public class TestNotice {
private Injector injector;
@Before
public void setup() {
injector = Guice.createInjector(
new SuroPlugin() {
@Override
protected void configure() {
this.addSinkType("TestSink", TestSinkManager.TestSink.class);
this.addNoticeType(NoNotice.TYPE, NoNotice.class);
this.addNoticeType(QueueNotice.TYPE, QueueNotice.class);
this.addNoticeType(SQSNotice.TYPE, SQSNotice.class);
}
},
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
bind(AWSCredentialsProvider.class).to(PropertyAWSCredentialsProvider.class);
bind(AmazonSQSClient.class).toProvider(AmazonSQSClientProvider.class).asEagerSingleton();
}
}
);
}
public static class AmazonSQSClientProvider implements Provider<AmazonSQSClient> {
@Override
public AmazonSQSClient get() {
AmazonSQSClient client = mock(AmazonSQSClient.class);
doReturn(new SendMessageResult()).when(client).sendMessage(any(SendMessageRequest.class));
ReceiveMessageResult result = new ReceiveMessageResult();
result.setMessages(Arrays.asList(new Message[]{new Message().withBody("receivedMessage")}));
doReturn(result).when(client).receiveMessage(any(ReceiveMessageRequest.class));
doReturn(new GetQueueUrlResult().withQueueUrl("queueURL")).when(client).getQueueUrl(any(GetQueueUrlRequest.class));
return client;
}
}
@Test
public void testQueue() throws IOException {
String desc = "{\n" +
" \"type\": \"queue\" \n" +
"}";
ObjectMapper mapper = injector.getInstance(DefaultObjectMapper.class);
Notice queueNotice = mapper.readValue(desc, new TypeReference<Notice>(){});
queueNotice.init();
queueNotice.send("message");
assertEquals(queueNotice.recv(), "message");
assertNull(queueNotice.recv());
}
@Test
public void testSQS() throws IOException {
String desc = "{\n" +
" \"type\": \"sqs\",\n" +
" \"queues\": [\n" +
" \"queue1\"\n" +
" ],\n" +
" \"region\": \"us-east-1\",\n" +
" \"connectionTimeout\": 3000,\n" +
" \"maxConnections\": 3,\n" +
" \"socketTimeout\": 1000,\n" +
" \"maxRetries\": 3\n" +
"}";
SqsTest sqsTest = new SqsTest(desc).invoke();
ArgumentCaptor<SendMessageRequest> captor = sqsTest.getCaptor();
Notice queueNotice = sqsTest.getQueueNotice();
assertEquals(captor.getValue().getMessageBody(), "message");
assertEquals(captor.getValue().getQueueUrl(), "queueURL");
assertEquals(queueNotice.recv(), "receivedMessage");
}
@Test
public void testSQSBase64() throws IOException {
String desc = "{\n" +
" \"type\": \"sqs\",\n" +
" \"queues\": [\n" +
" \"queue1\"\n" +
" ],\n" +
" \"enableBase64Encoding\": true,\n" +
" \"region\": \"us-east-1\",\n" +
" \"connectionTimeout\": 3000,\n" +
" \"maxConnections\": 3,\n" +
" \"socketTimeout\": 1000,\n" +
" \"maxRetries\": 3\n" +
"}";
SqsTest sqsTest = new SqsTest(desc).invoke();
ArgumentCaptor<SendMessageRequest> captor = sqsTest.getCaptor();
Notice queueNotice = sqsTest.getQueueNotice();
assertEquals(captor.getValue().getMessageBody(), new String(Base64.encodeBase64("message".getBytes()),
Charsets.UTF_8));
assertEquals(captor.getValue().getQueueUrl(), "queueURL");
assertEquals(queueNotice.recv(), new String(Base64.decodeBase64("receivedMessage".getBytes()), Charsets.UTF_8));
}
private class SqsTest {
private String desc;
private Notice queueNotice;
private ArgumentCaptor<SendMessageRequest> captor;
public SqsTest(String desc) {
this.desc = desc;
}
public Notice getQueueNotice() {
return queueNotice;
}
public ArgumentCaptor<SendMessageRequest> getCaptor() {
return captor;
}
public SqsTest invoke() throws IOException {
ObjectMapper mapper = injector.getInstance(DefaultObjectMapper.class);
AmazonSQSClient client = injector.getInstance(AmazonSQSClient.class);
queueNotice = mapper.readValue(desc, new TypeReference<Notice>() {});
queueNotice.init();
queueNotice.send("message");
captor = ArgumentCaptor.forClass(SendMessageRequest.class);
verify(client).sendMessage(captor.capture());
return this;
}
}
}
| 1,385 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/sink/queue/TestMemoryQueue.java
|
package com.netflix.suro.sink.queue;
import com.netflix.suro.message.Message;
import com.netflix.suro.queue.MemoryQueue4Sink;
import org.junit.Test;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import static org.junit.Assert.assertEquals;
public class TestMemoryQueue {
@Test
public void test() throws IOException {
MemoryQueue4Sink queue = new MemoryQueue4Sink(100);
assertEquals(queue.size(), 0);
assertEquals(queue.isEmpty(), true);
assertEquals(queue.drain(100, new LinkedList<Message>()), 0);
for (int i = 0; i < 100; ++i) {
queue.offer(new Message("routingkey" + i, ("value" + i).getBytes()));
}
assertEquals(queue.size(), 100);
assertEquals(queue.isEmpty(), false);
List<Message> msgList = new LinkedList<Message>();
assertEquals(queue.drain(100, msgList), 100);
int i = 0;
for (Message m : msgList) {
assertEquals(m.getRoutingKey(), "routingkey" + i);
assertEquals(new String(m.getPayload()), "value" + i);
++i;
}
assertEquals(i, 100);
assertEquals(queue.size(), 0);
assertEquals(queue.isEmpty(), true);
}
}
| 1,386 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/sink/queue/TestFileQueue.java
|
package com.netflix.suro.sink.queue;
import com.netflix.suro.message.Message;
import com.netflix.suro.queue.FileQueue4Sink;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import static org.junit.Assert.assertEquals;
public class TestFileQueue {
@Rule
public TemporaryFolder folder= new TemporaryFolder();
@Test
public void test() throws IOException {
FileQueue4Sink queue = new FileQueue4Sink(folder.newFolder().getAbsolutePath(), "testqueue", "PT1m", 1024 * 1024 * 1024);
assertEquals(queue.size(), 0);
assertEquals(queue.isEmpty(), true);
assertEquals(queue.drain(100, new LinkedList<Message>()), 0);
for (int i = 0; i < 100; ++i) {
queue.offer(new Message("routingkey" + i, ("value" + i).getBytes()));
}
assertEquals(queue.size(), 100);
assertEquals(queue.isEmpty(), false);
List<Message> msgList = new LinkedList<Message>();
assertEquals(queue.drain(100, msgList), 100);
int i = 0;
for (Message m : msgList) {
assertEquals(m.getRoutingKey(), "routingkey" + i);
assertEquals(new String(m.getPayload()), "value" + i);
++i;
}
assertEquals(i, 100);
assertEquals(queue.size(), 0);
assertEquals(queue.isEmpty(), true);
}
}
| 1,387 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/server/TestStatusServer.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.server;
import com.netflix.suro.input.InputManager;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import org.junit.ClassRule;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.HashSet;
import static org.junit.Assert.assertEquals;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class TestStatusServer {
@ClassRule
public static SuroServerExternalResource suroServer = new SuroServerExternalResource();
@Test
public void _2_connectionFailureShouldBeDetected() throws Exception {
suroServer.getInjector().getInstance(InputManager.class).getInput("thrift").shutdown();
HttpResponse response = runQuery("surohealthcheck");
assertEquals(500, response.getStatusLine().getStatusCode());
}
private HttpResponse runQuery(String path) throws IOException {
DefaultHttpClient client = new DefaultHttpClient();
HttpGet httpget = new HttpGet(String.format("http://localhost:%d/%s", suroServer.getStatusPort(), path));
try{
return client.execute(httpget);
} finally{
client.getConnectionManager().shutdown();
}
}
@Test
public void _0_healthcheckShouldPassForHealthyServer() throws Exception {
HttpResponse response = runQuery("surohealthcheck");
assertEquals(200, response.getStatusLine().getStatusCode());
}
@Test
public void _1_testSinkStat() throws IOException {
HttpResponse response = runQuery("surosinkstat");
InputStream data = response.getEntity().getContent();
BufferedReader br = new BufferedReader(new InputStreamReader(data));
String line = null;
StringBuilder sb = new StringBuilder();
try {
while ((line = br.readLine()) != null) {
sb.append(line).append('\n');
}
} catch (Exception e) {
e.printStackTrace();
}
// The order of output lines may change due to implementation of the Map instance. Therefore, we should ignore order of output lines
assertEquals(new HashSet<>(Arrays.asList(sb.toString().trim().split("\\n+"))), new HashSet<>(Arrays.asList("sink1:sink1 open\n\ndefault:default open\n\n".trim().split("\\n+"))));
}
}
| 1,388 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/server/TestSuroControl.java
|
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.server;
import com.netflix.suro.SuroControl;
import org.junit.Before;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.junit.Assert.*;
/**
*
*/
public class TestSuroControl {
private SuroControl server;
@Before
public void setup() {
server = new SuroControl();
}
@Test
public void testOnlyExitCommandWorks() throws Exception {
final CountDownLatch serverDone = new CountDownLatch(1);
final AtomicBoolean serverExited = new AtomicBoolean(false);
final AtomicBoolean serverCrashed = new AtomicBoolean(false);
final int port = findAvailablePort(2000, Short.MAX_VALUE);
new Thread(new Runnable() {
@Override
public void run() {
try {
server.start(port);
serverExited.set(true);
} catch (IOException e) {
serverCrashed.set(true);
} finally {
serverDone.countDown();
}
}
}).start();
Socket client = null;
int testRetry = 100;
for (int i = 0; i < testRetry; ++i) {
try {
client = new Socket("127.0.0.1", port);
break;
} catch (Exception e) {
Thread.sleep(50);
if (i == testRetry - 1) {
throw new RuntimeException("we tried to create client socket but it fails: " + e.getMessage(), e);
}
}
}
PrintWriter out = new PrintWriter(client.getOutputStream(), true);
BufferedReader reader = new BufferedReader(new InputStreamReader(client.getInputStream()));
try {
String unknownCmd = "testtest";
out.append(unknownCmd);
out.append("\n");
out.flush();
String response = reader.readLine();
assertTrue(String.format("Expected '%s' in the response, but the response was %s", unknownCmd, response), response.contains(String.format("'%s'", unknownCmd)));
assertFalse(serverExited.get());
assertFalse(serverCrashed.get());
out.append(" exit \n");
out.flush();
response = reader.readLine();
assertEquals("ok", response);
serverDone.await(5, TimeUnit.SECONDS);
assertTrue(serverExited.get());
assertFalse(serverCrashed.get());
} finally {
out.close();
reader.close();
client.close();
}
}
private int findAvailablePort(int low, int high) {
for (int i = low; i < high; ++i) {
if (isAvailable(i)) {
return i;
}
}
throw new IllegalStateException(String.format("Can't find any available port between %d and %d", low, high));
}
private boolean isAvailable(int port) {
ServerSocket socket = null;
try {
socket = new ServerSocket(port);
socket.setReuseAddress(true);
return true;
} catch (IOException e) {
return false;
} finally {
if (socket != null) {
try {
socket.close();
} catch (IOException e) {
//ignore
}
}
}
}
}
| 1,389 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/server/SuroServerExternalResource.java
|
package com.netflix.suro.server;
import com.google.inject.Injector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.SuroServer;
import com.netflix.suro.TestUtils;
import com.netflix.suro.input.DynamicPropertyInputConfigurator;
import com.netflix.suro.routing.DynamicPropertyRoutingMapConfigurator;
import com.netflix.suro.routing.TestMessageRouter;
import com.netflix.suro.sink.DynamicPropertySinkConfigurator;
import org.junit.rules.ExternalResource;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicReference;
public class SuroServerExternalResource extends ExternalResource {
private int statusPort;
private int serverPort;
private AtomicReference<Injector> injector = new AtomicReference<Injector>();
private String sinkDesc = "{\n" +
" \"default\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"default\"\n" +
" },\n" +
" \"sink1\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"sink1\"\n" +
" }\n" +
"}";
private String mapDesc="{}";
private String inputConfig = "[\n" +
" {\n" +
" \"type\": \"thrift\"\n" +
" }\n" +
"]";
public SuroServerExternalResource() {}
public SuroServerExternalResource(String inputConfig, String sinkDesc, String mapDesc) {
this.inputConfig = inputConfig;
this.sinkDesc = sinkDesc;
this.mapDesc = mapDesc;
}
@Override
protected void before() throws Exception {
statusPort = TestUtils.pickPort();
serverPort = TestUtils.pickPort();
Properties props = new Properties();
props.put("SuroServer.statusServerPort", Integer.toString(statusPort));
props.put("SuroServer.port", Integer.toString(serverPort));
props.put(DynamicPropertySinkConfigurator.SINK_PROPERTY, sinkDesc);
props.put(DynamicPropertyInputConfigurator.INPUT_CONFIG_PROPERTY, inputConfig);
if (mapDesc != null) {
props.put(DynamicPropertyRoutingMapConfigurator.ROUTING_MAP_PROPERTY, mapDesc);
}
SuroServer.create(injector, props, new SuroPlugin() {
@Override
protected void configure() {
this.addSinkType("TestSink", TestMessageRouter.TestMessageRouterSink.class);
}
});
injector.get().getInstance(LifecycleManager.class).start();
injector.get().getInstance(StatusServer.class).waitUntilStarted();
}
@Override
protected void after() {
injector.get().getInstance(LifecycleManager.class).close();
}
public int getServerPort() {
return serverPort;
}
public int getStatusPort() { return statusPort; }
public Injector getInjector() { return injector.get(); }
}
| 1,390 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/server/TestHealthCheck.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.server;
import com.netflix.suro.input.InputManager;
import com.netflix.suro.input.SuroInput;
import com.netflix.suro.input.thrift.ServerConfig;
import org.apache.thrift.transport.TTransportException;
import org.junit.Rule;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
public class TestHealthCheck {
@Rule
public SuroServerExternalResource suroServer = new SuroServerExternalResource();
@Test
public void test() throws TTransportException, IOException {
InputManager inputManager = mock(InputManager.class);
doReturn(mock(SuroInput.class)).when(inputManager).getInput("thrift");
HealthCheck healthCheck = new HealthCheck(new ServerConfig() {
@Override
public int getPort() {
return suroServer.getServerPort();
}
}, inputManager);
healthCheck.get();
suroServer.getInjector().getInstance(InputManager.class).getInput("thrift").shutdown();
try {
healthCheck.get();
fail("exception should be thrown");
} catch (RuntimeException e) {
assertEquals(e.getMessage(), "NOT ALIVE!!!");
}
}
}
| 1,391 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/routing/TestRoutingMap.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.routing;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Function;
import com.google.common.collect.*;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.routing.RoutingMap.Route;
import com.netflix.suro.routing.RoutingMap.RoutingInfo;
import com.netflix.suro.sink.TestSinkManager.TestSink;
import org.junit.Test;
import org.mockito.Mockito;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.*;
public class TestRoutingMap {
private static Injector injector = Guice.createInjector(
new SuroPlugin() {
@Override
protected void configure() {
this.addSinkType("TestSink", TestSink.class);
}
},
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
}
},
new RoutingPlugin()
);
private Map<String, RoutingInfo> getRoutingMap(String desc) throws Exception {
return injector.getInstance(ObjectMapper.class).<Map<String, RoutingInfo>>readValue(
desc,
new TypeReference<Map<String, RoutingInfo>>() {});
}
@Test
public void generateRoutingInfo() throws Exception {
RoutingInfo route1 = new RoutingInfo(
ImmutableList.<Route>of(
new Route(
"sink2",
new XPathFilter("xpath(\"//customerInfo/country\") =~ \"(?i)^US\"", new JsonMapConverter()),
null
),
new Route(
"sink3",
new XPathFilter("xpath(\"//responseInfo/status\") >= 400", new JsonMapConverter()),
null
)
),
null
);
System.out.println(new DefaultObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(route1));
}
@Test
public void testRoutingMapWithXPathFilter () throws Exception {
String mapDesc = "{\n" +
" \"request_trace\" : {\n" +
" \"where\" : [ {\n" +
" \"sink\" : \"sink1\",\n" +
" \"filter\" : {\n" +
" \"type\" : \"xpath\",\n" +
" \"filter\" : \"xpath(\\\"//foo/bar\\\") =~ \\\"(?i)test\\\"\",\n" +
" \"converter\" : {\n" +
" \"type\" : \"jsonmap\"\n" +
" }\n" +
" }\n" +
" } ],\n" +
" \"filter\" : {\n" +
" \"type\" : \"regex\",\n" +
" \"regex\" : \"[a-b]+\"\n" +
" }\n" +
" }\n" +
"}";
Map<String, RoutingInfo> map = getRoutingMap(mapDesc);
String routingKey = "request_trace";
assertEquals("There should be one and only one key", 1, map.size());
assertTrue("The only key is " + routingKey, map.containsKey(routingKey));
RoutingInfo info = map.get(routingKey);
List<Route> routes = info.getWhere();
assertEquals("There should be only one sink", 1, routes.size());
Route route = routes.get(0);
assertEquals("sink1", route.getSink());
Map<String, Map<String, String>> obj = Maps.newHashMap();
obj.put("foo", ImmutableMap.of("bar", "tESt"));
MessageContainer container = Mockito.mock(MessageContainer.class);
Mockito.when(container.getEntity(Map.class)).thenReturn(obj);
assertEquals(true, route.getFilter().doFilter(container));
}
@Test
public void test() throws Exception {
String mapDesc = "{\n" +
" \"request_trace\": {\n" +
" \"where\": [\n" +
" {\"sink\":\"sink1\"},\n" +
" {\"sink\":\"sink2\"},\n" +
" {\"sink\":\"sink3\"}\n" +
" ]\n" +
" },\n" +
" \"nf_errors_log\": {\n" +
" \"where\": [\n" +
" {\"sink\":\"sink3\"},\n" +
" {\"sink\":\"sink4\"}\n" +
" ]\n" +
" }\n" +
"}";
RoutingMap routingMap = new RoutingMap();
routingMap.set(getRoutingMap(mapDesc));
assertTrue(
Arrays.equals(
getSinkNames(routingMap.getRoutingInfo("request_trace").getWhere()),
new String[]{"sink1", "sink2", "sink3"}));
assertTrue(
Arrays.equals(
getSinkNames(routingMap.getRoutingInfo("nf_errors_log").getWhere()),
new String[]{"sink3", "sink4"}));
assertNull(routingMap.getRoutingInfo("streaming"));
// test error
// map description changed with json syntax error
// nothing should be changed
mapDesc = "{\n" +
" \"request_trace\": {\n" +
" \"where\": [\n" +
" {\"sink\":\"sink1\"},\n" +
" {\"sink\":\"sink2\"},\n" +
" {\"sink\":\"sink3\"}\n" +
" ]\n" +
" },\n" +
" \"nf_errors_log\": {\n" +
" \"where\": [\n" +
" {\"sink\":\"sink3\"},\n" +
" {\"sink\":\"sink4\"}\n" +
" ]\n" +
" }\n" +
"}";
routingMap.set(getRoutingMap(mapDesc));
assertTrue(
Arrays.equals(
getSinkNames(routingMap.getRoutingInfo("request_trace").getWhere()),
new String[]{"sink1", "sink2", "sink3"}));
assertTrue(
Arrays.equals(
getSinkNames(routingMap.getRoutingInfo("nf_errors_log").getWhere()),
new String[]{"sink3", "sink4"}));
assertNull(routingMap.getRoutingInfo("streaming"));
// description changed
mapDesc = "{\n" +
" \"request_trace\": {\n" +
" \"where\": [\n" +
" {\"sink\":\"sink1\"},\n" +
" {\"sink\":\"sink2\"},\n" +
" {\"sink\":\"sink3\"}\n" +
" ]\n" +
" }\n" +
"}";
routingMap.set(getRoutingMap(mapDesc));
assertTrue(
Arrays.equals(
getSinkNames(routingMap.getRoutingInfo("request_trace").getWhere()),
new String[]{"sink1", "sink2", "sink3"}));
assertNull(routingMap.getRoutingInfo("nf_errors_log"));
assertNull(routingMap.getRoutingInfo("streaming"));
}
private Object[] getSinkNames(List<Route> routes) {
return Lists.newArrayList(Collections2.transform(routes, new Function<Route, String>() {
@Override
@Nullable
public String apply(@Nullable Route input) {
return input.getSink();
}
})).toArray();
}
@Test
public void testDefaultRoutingIsOptional() throws Exception {
// description changed
String mapDesc = "{\n" +
" \"request_trace\": {\n" +
" \"where\": [\n" +
" {\"sink\":\"sink1\"},\n" +
" {\"sink\":\"sink2\"},\n" +
" {\"sink\":\"sink3\"}\n" +
" ]\n" +
" }\n" +
"}";
RoutingMap routingMap = new RoutingMap();
routingMap.set(getRoutingMap(mapDesc));
assertTrue(
Arrays.equals(
getSinkNames(routingMap.getRoutingInfo("request_trace").getWhere()),
new String[]{"sink1", "sink2", "sink3"}));
// Verify that
assertNull(routingMap.getRoutingInfo("nf_errors_log"));
assertNull(routingMap.getRoutingInfo("streaming"));
}
@Test
public void testDefaultRouting() throws Exception {
String mapDesc = "{\n" +
" \"__default__\": {\n" +
" \"where\": [\n" +
" {\"sink\": \"sinkD1\"},\n" +
" {\"sink\": \"sinkD2\"}\n" +
" ]\n" +
" },\n" +
" \"request_trace\": {\n" +
" \"where\": [\n" +
" {\"sink\":\"sink1\"},\n" +
" {\"sink\":\"sink2\"},\n" +
" {\"sink\":\"sink3\"}\n" +
" ]\n" +
" },\n" +
" \"nf_errors_log\": {\n" +
" \"where\": [\n" +
" {\"sink\":\"sink3\"},\n" +
" {\"sink\":\"sink4\"}\n" +
" ]\n" +
" }\n" +
"}";
RoutingMap routingMap = new RoutingMap();
routingMap.set(getRoutingMap(mapDesc));
// Verify that default routes are in place
assertArrayEquals(getSinkNames(routingMap.getRoutingInfo("some non existing key").getWhere()), new String[]{"sinkD1", "sinkD2"});
// Ensure that overrides still work
assertTrue(
Arrays.equals(
getSinkNames(routingMap.getRoutingInfo("request_trace").getWhere()),
new String[]{"sink1", "sink2", "sink3"}));
assertTrue(
Arrays.equals(
getSinkNames(routingMap.getRoutingInfo("nf_errors_log").getWhere()),
new String[]{"sink3", "sink4"}));
}
}
| 1,392 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/routing/TestFilter.java
|
package com.netflix.suro.routing;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.StringMessage;
import com.netflix.suro.sink.TestSinkManager.TestSink;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestFilter {
private static Injector injector = Guice.createInjector(
new SuroPlugin() {
@Override
protected void configure() {
this.addSinkType("TestSink", TestSink.class);
this.addFilterType("regex", RegexFilter.class);
}
},
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
}
}
);
@Test
public void testRegexFilter() throws Exception {
String desc = "{\n" +
" \"type\":\"regex\",\n" +
" \"regex\":\"abcd\"\n" +
"}";
ObjectMapper jsonMapper = injector.getInstance(ObjectMapper.class);
Filter filter = jsonMapper.readValue(desc, new TypeReference<Filter>(){});
assertTrue(filter.doFilter(StringMessage.from(null, "abcdefg")));
assertFalse(filter.doFilter(StringMessage.from(null, "zcb")));
assertTrue(filter.doFilter(StringMessage.from("routingKey", "abcdefg")));
assertFalse(filter.doFilter(StringMessage.from("routingKey", "zcb")));
}
}
| 1,393 |
0 |
Create_ds/suro/suro-server/src/test/java/com/netflix/suro
|
Create_ds/suro/suro-server/src/test/java/com/netflix/suro/routing/TestMessageRouter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.routing;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Injector;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.input.SuroInput;
import com.netflix.suro.input.thrift.MessageSetProcessor;
import com.netflix.suro.input.thrift.ServerConfig;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.message.MessageSetBuilder;
import com.netflix.suro.message.SerDe;
import com.netflix.suro.message.StringSerDe;
import com.netflix.suro.routing.RoutingMap.RoutingInfo;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.SinkManager;
import org.junit.Assert;
import org.junit.Test;
import java.util.*;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
public class TestMessageRouter {
public static Map<String, Integer> messageCount = new HashMap<String, Integer>();
public static class TestMessageRouterSink implements Sink {
private final String message;
private String status;
private final List<String> messageList;
private SerDe<String> serde = new StringSerDe();
@JsonCreator
public TestMessageRouterSink(@JsonProperty("message") String message) {
this.message = message;
this.messageList = new LinkedList<String>();
}
@Override
public synchronized void writeTo(MessageContainer message) {
try {
System.out.println("message: " + this.message + " msg: " + message.getEntity(String.class));
} catch (Exception e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
Integer count = messageCount.get(this.message);
if (count == null) {
messageCount.put(this.message, 1);
} else {
messageCount.put(this.message, count + 1);
}
try {
messageList.add(serde.deserialize(message.getEntity(byte[].class)));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
@Override
public void open() {
status = "open";
}
@Override
public void close() {
status = "closed";
}
@Override
public String recvNotice() {
return null;
}
@Override
public String getStat() {
return message + " " + status;
}
@Override
public long getNumOfPendingMessages() {
return 0;
}
@Override
public long checkPause() {
return 0;
}
public List<String> getMessageList() {
return messageList;
}
}
private static Map<String, Sink> getSinkMap(ObjectMapper jsonMapper, String desc) throws Exception {
return jsonMapper.readValue(
desc,
new TypeReference<Map<String, Sink>>() {
});
}
private static Map<String, RoutingInfo> getRoutingMap(ObjectMapper jsonMapper, String desc) throws Exception {
return jsonMapper.readValue(
desc,
new TypeReference<Map<String, RoutingInfo>>() {
});
}
@Test
public void test() throws Exception {
final Properties properties = new Properties();
properties.setProperty(ServerConfig.MESSAGE_ROUTER_THREADS, "1");
Injector injector = LifecycleInjector.builder()
.withModules(
new SuroPlugin() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
this.addSinkType("TestSink", TestMessageRouterSink.class);
}
},
new RoutingPlugin()
)
.withBootstrapModule(new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(new PropertiesConfigurationProvider(properties));
}
}).build().createInjector();
SinkManager sinkManager = startSinkMakager(injector);
startMessageRouter(injector);
MessageSetProcessor queue = injector.getInstance(MessageSetProcessor.class);
queue.setInput(mock(SuroInput.class));
queue.start();
MessageSetBuilder builder = new MessageSetBuilder(new ClientConfig());
for (int i = 0; i < 10; ++i) {
builder.withMessage("topic1", Integer.toString(i).getBytes());
}
// sink1: 10
// default: 10
queue.process(builder.build());
for (int i = 0; i < 5; ++i) {
builder.withMessage("topic2", Integer.toString(i).getBytes());
}
// sink1: 5
queue.process(builder.build());
for (int i = 0; i < 15; ++i) {
builder.withMessage("topic3", Integer.toString(i).getBytes());
}
queue.process(builder.build());
// sink3: 15 with topic3_alias
for (int i = 0; i < 20; ++i) {
builder.withMessage("topic4", Integer.toString(i).getBytes());
}
// default: 20
queue.process(builder.build());
// total sink1: 15, default: 30
int count = 10;
while (!answer() && count > 0) {
Thread.sleep(1000);
--count;
}
assertTrue(count > 0);
queue.shutdown();
sinkManager.shutdown();
}
public static MessageRouter startMessageRouter(Injector injector) throws Exception {
String mapDesc = "{\n" +
" \"topic1\": {\n" +
" \"where\": [\n" +
" {\n" +
" \"sink\": \"sink1\"\n" +
" },\n" +
" {\n" +
" \"sink\": \"default\"\n" +
" }\n" +
" ]\n" +
" },\n" +
" \"topic2\": {\n" +
" \"where\": [\n" +
" {\n" +
" \"sink\": \"sink1\"\n" +
" },\n" +
" {\n" +
" \"sink\": \"sink2\",\n" +
" \"filter\": {\n" +
" \"type\": \"regex\",\n" +
" \"regex\": \"1\"\n" +
" }\n" +
" }\n" +
" ]\n" +
" },\n" +
" \"topic3\": {\n" +
" \"where\": [\n" +
" {\n" +
" \"sink\": \"sink3\",\n" +
" \"alias\": \"topic3_alias\"\n" +
" }\n" +
" ]\n" +
" }\n" +
"}";
RoutingMap routingMap = injector.getInstance(RoutingMap.class);
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
routingMap.set(getRoutingMap(mapper, mapDesc));
return injector.getInstance(MessageRouter.class);
}
public static SinkManager startSinkMakager(Injector injector) throws Exception {
String sinkDesc = "{\n" +
" \"default\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"default\"\n" +
" },\n" +
" \"sink1\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"sink1\"\n" +
" },\n" +
" \"sink2\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"sink2\"\n" +
" },\n" +
" \"sink3\": {\n" +
" \"type\": \"TestSink\",\n" +
" \"message\": \"sink3\"\n" +
" }\n" +
"}";
SinkManager sinkManager = injector.getInstance(SinkManager.class);
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
sinkManager.set(getSinkMap(mapper, sinkDesc));
return sinkManager;
}
private boolean answer() {
Integer sink1 = messageCount.get("sink1");
Integer sink2 = messageCount.get("sink2");
Integer sink3 = messageCount.get("sink3");
Integer defaultV = messageCount.get("default");
return sink1 != null && sink1 == 15 && defaultV != null && defaultV == 30 && sink2 == 1 && sink3 != null && sink3 == 15;
}
}
| 1,394 |
0 |
Create_ds/suro/suro-server/src/main/java/com/netflix
|
Create_ds/suro/suro-server/src/main/java/com/netflix/suro/SuroControl.java
|
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro;
import com.google.common.io.Closeables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.net.ServerSocket;
import java.net.Socket;
/**
* A simple blocking control server that processes user-sent command
*/
public class SuroControl {
private static final Logger log = LoggerFactory.getLogger(SuroControl.class);
public void start(int port) throws IOException {
ServerSocket serverSocket;
try {
serverSocket = new ServerSocket(port);
log.info("Suro control service started at port " + port);
} catch (IOException e) {
throw new IOException(String.format("Can't start server socket at port %d for Suro's control service: %s", port, e.getMessage()), e);
}
try{
while (true) {
Socket clientSocket = null;
try{
clientSocket = listen(port, serverSocket);
Command cmd = processCommand(clientSocket);
if(cmd == Command.EXIT) {
return;
}
}finally {
closeAndIgnoreError(clientSocket);
}
}
}finally {
closeAndIgnoreError(serverSocket);
log.info("Suro's control service exited");
}
}
private Socket listen(int port, ServerSocket serverSocket) throws IOException {
try {
return serverSocket.accept();
}catch(IOException e) {
throw new IOException(String.format("Error when Suro control was accepting user connection at port %d: %s", port, e.getMessage()), e);
}
}
/**
* Processes user command. For now it supports only "exit", case insensitive.
* @param clientSocket The client socket after connection is established between a client and this server
*
* @return the last processed command
*/
private Command processCommand(Socket clientSocket) {
BufferedReader in = null;
BufferedWriter out = null;
try{
in = new BufferedReader(new InputStreamReader(clientSocket.getInputStream()));
out = new BufferedWriter(new OutputStreamWriter(clientSocket.getOutputStream()));
String s;
while ((s = in.readLine()) != null) {
if("exit".equalsIgnoreCase(s.trim())) {
respond(out, "ok");
return Command.EXIT;
}else {
respond(out, String.format("Unknown command '%s'", s));
}
}
}catch (IOException e) {
log.warn(String.format("Failed to accept user command at port %d: %s", clientSocket.getPort(), e.getMessage()), e);
}finally {
try{
Closeables.close(in, true);
Closeables.close(out, true);
} catch(IOException ignored) {
}
}
return null;
}
// Implemented this method for both Socket and ServerSocket because we want to
// make Suro compilable and runnable under Java 6.
private static void closeAndIgnoreError(Socket socket) {
if(socket == null) return;
try{
socket.close();
}catch(IOException e) {
log.warn(String.format("Failed to close the client socket on port %d: %s. Exception ignored.", socket.getPort(), e.getMessage()), e);
}
}
private static void closeAndIgnoreError(ServerSocket socket) {
if(socket == null) return;
try{
socket.close();
}catch(IOException e) {
log.warn(String.format("Failed to close the server socket on port %d: %s. Exception ignored.", socket.getLocalPort(), e.getMessage()), e);
}
}
/**
* Writes line-based response.
* @param out the channel used to write back response
* @param response A string that ends with a new line
* @throws IOException
*/
private void respond(BufferedWriter out, String response) throws IOException {
out.append(response);
out.append("\n");
out.flush();
}
private static enum Command {
EXIT("exit")
;
private final String name;
private Command(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
public static void main(String[] args) throws Exception {
SuroControl control = new SuroControl();
control.start(8080);
}
}
| 1,395 |
0 |
Create_ds/suro/suro-server/src/main/java/com/netflix
|
Create_ds/suro/suro-server/src/main/java/com/netflix/suro/SuroModule.java
|
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.netflix.suro.aws.PropertyAWSCredentialsProvider;
import com.netflix.suro.input.thrift.MessageSetProcessor;
import com.netflix.suro.input.thrift.ServerConfig;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.routing.RoutingMap;
import com.netflix.suro.server.StatusServer;
import com.netflix.suro.sink.SinkManager;
/**
* Guice module for binding {@link AWSCredentialsProvider},
* Jackson {@link ObjectMapper}, {@link SinkManager}, {@link RoutingMap},
* {@link SuroService}, {@link StatusServer}
*
* @author elandau
*/
public class SuroModule extends AbstractModule {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class).asEagerSingleton();
bind(AWSCredentialsProvider.class).to(PropertyAWSCredentialsProvider.class);
bind(SuroService.class).asEagerSingleton();
bind(StatusServer.class).asEagerSingleton();
bind(ServerConfig.class).asEagerSingleton();
bind(MessageSetProcessor.class).asEagerSingleton();
}
}
| 1,396 |
0 |
Create_ds/suro/suro-server/src/main/java/com/netflix
|
Create_ds/suro/suro-server/src/main/java/com/netflix/suro/SuroService.java
|
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro;
import com.google.common.base.Throwables;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.suro.input.InputManager;
import com.netflix.suro.input.thrift.MessageSetProcessor;
import com.netflix.suro.server.StatusServer;
import com.netflix.suro.input.thrift.ThriftServer;
import com.netflix.suro.sink.SinkManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
/**
* Main service for suro to control all subsystems including
* {@link StatusServer}, {@link ThriftServer}, {@link MessageSetProcessor}, and
* {@link SinkManager}
*
* @author elandau
*/
@Singleton
public class SuroService {
static Logger log = LoggerFactory.getLogger(SuroServer.class);
private final StatusServer statusServer;
private final InputManager inputManager;
private final SinkManager sinkManager;
@Inject
private SuroService(StatusServer statusServer, InputManager inputManager, SinkManager sinkManager) {
this.statusServer = statusServer;
this.inputManager = inputManager;
this.sinkManager = sinkManager;
}
@PostConstruct
public void start() {
try {
statusServer.start();
sinkManager.initialStart();
inputManager.initialStart();
} catch (Exception e) {
log.error("Exception while starting up server: " + e.getMessage(), e);
Throwables.propagate(e);
}
}
@PreDestroy
public void shutdown() {
try {
inputManager.shutdown();
statusServer.shutdown();
sinkManager .shutdown();
} catch (Exception e) {
//ignore every exception while shutting down but loggign should be done for debugging
log.error("Exception while shutting down SuroServer: " + e.getMessage(), e);
}
}
}
| 1,397 |
0 |
Create_ds/suro/suro-server/src/main/java/com/netflix
|
Create_ds/suro/suro-server/src/main/java/com/netflix/suro/SuroDynamicPropertyModule.java
|
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro;
import com.google.inject.AbstractModule;
import com.netflix.suro.input.DynamicPropertyInputConfigurator;
import com.netflix.suro.routing.DynamicPropertyRoutingMapConfigurator;
import com.netflix.suro.sink.DynamicPropertySinkConfigurator;
/**
* Guice module for binding DynamicProperty based configuration of sink and routing map
*
* @author elandau
*/
public class SuroDynamicPropertyModule extends AbstractModule {
@Override
protected void configure() {
bind(DynamicPropertyInputConfigurator.class).asEagerSingleton();
bind(DynamicPropertySinkConfigurator.class).asEagerSingleton();
bind(DynamicPropertyRoutingMapConfigurator.class).asEagerSingleton();
}
}
| 1,398 |
0 |
Create_ds/suro/suro-server/src/main/java/com/netflix
|
Create_ds/suro/suro-server/src/main/java/com/netflix/suro/SuroServer.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.io.Closeables;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.netflix.governator.configuration.PropertiesConfigurationProvider;
import com.netflix.governator.guice.BootstrapBinder;
import com.netflix.governator.guice.BootstrapModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.suro.input.DynamicPropertyInputConfigurator;
import com.netflix.suro.input.SuroInputPlugin;
import com.netflix.suro.routing.DynamicPropertyRoutingMapConfigurator;
import com.netflix.suro.routing.RoutingPlugin;
import com.netflix.suro.server.StatusServer;
import com.netflix.suro.sink.DynamicPropertySinkConfigurator;
import com.netflix.suro.sink.ServerSinkPlugin;
import org.apache.commons.cli.*;
import org.apache.commons.io.FileUtils;
import javax.annotation.Nullable;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicReference;
/**
* Command line driver for Suro
*
* @author jbae
* @author elandau
*/
public class SuroServer {
private static final String PROP_PREFIX = "SuroServer.";
private static final int DEFAULT_CONTROL_PORT = 9090;
public static final String OPT_CONTROL_PORT = "controlPort";
public static void main(String[] args) throws IOException {
final AtomicReference<Injector> injector = new AtomicReference<Injector>();
try {
// Parse the command line
Options options = createOptions();
final CommandLine line = new BasicParser().parse(options, args);
// Load the properties file
final Properties properties = new Properties();
if (line.hasOption('p')) {
properties.load(new FileInputStream(line.getOptionValue('p')));
}
// Bind all command line options to the properties with prefix "SuroServer."
for (Option opt : line.getOptions()) {
String name = opt.getOpt();
String value = line.getOptionValue(name);
String propName = PROP_PREFIX + opt.getArgName();
if (propName.equals(DynamicPropertyRoutingMapConfigurator.ROUTING_MAP_PROPERTY)) {
properties.setProperty(DynamicPropertyRoutingMapConfigurator.ROUTING_MAP_PROPERTY,
FileUtils.readFileToString(new File(value)));
} else if (propName.equals(DynamicPropertySinkConfigurator.SINK_PROPERTY)) {
properties.setProperty(DynamicPropertySinkConfigurator.SINK_PROPERTY,
FileUtils.readFileToString(new File(value)));
} else if (propName.equals(DynamicPropertyInputConfigurator.INPUT_CONFIG_PROPERTY)) {
properties.setProperty(DynamicPropertyInputConfigurator.INPUT_CONFIG_PROPERTY,
FileUtils.readFileToString(new File(value)));
} else {
properties.setProperty(propName, value);
}
}
List<Module> extensionModules = null;
if (line.hasOption('x')) {
String moduleFile = line.getOptionValue('x');
List<String> extensionModuleClasses = new ObjectMapper().readValue(
FileUtils.readFileToString(new File(moduleFile)),
new TypeReference<List<String>>(){});
if(extensionModuleClasses != null){
extensionModules = Lists.transform(extensionModuleClasses, new Function<String, Module>() {
@Nullable
@Override
public Module apply(String input) {
try {
return (Module)Class.forName(input).newInstance();
} catch (Throwable e) {
throw new RuntimeException(String.format("Unable to load module class %s", input), e);
}
}
});
}
}
if(extensionModules == null) { //catch-all for either no configuration or empty configuration file
extensionModules = Lists.newArrayList();
}
create(injector, properties, extensionModules.toArray(new Module[extensionModules.size()]));
injector.get().getInstance(LifecycleManager.class).start();
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
Closeables.close(injector.get().getInstance(LifecycleManager.class), true);
} catch (IOException e) {
// do nothing because Closeables.close will swallow IOException
}
}
});
waitForShutdown(getControlPort(options));
} catch (Throwable e) {
System.err.println("SuroServer startup failed: " + e.getMessage());
System.exit(-1);
} finally {
Closeables.close(injector.get().getInstance(LifecycleManager.class), true);
}
}
public static void create(AtomicReference<Injector> injector, final Properties properties, Module... modules) throws Exception {
// Create the injector
injector.set(LifecycleInjector.builder()
.withBootstrapModule(
new BootstrapModule() {
@Override
public void configure(BootstrapBinder binder) {
binder.bindConfigurationProvider().toInstance(
new PropertiesConfigurationProvider(properties));
}
}
)
.withModules(
new RoutingPlugin(),
new ServerSinkPlugin(),
new SuroInputPlugin(),
new SuroDynamicPropertyModule(),
new SuroModule(),
StatusServer.createJerseyServletModule()
)
.withAdditionalModules(modules)
.build().createInjector());
}
private static void waitForShutdown(int port) throws IOException {
new SuroControl().start(port);
}
private static int getControlPort(Options options) {
Option opt = options.getOption("c");
String value = opt.getValue();
if(value == null) {
return DEFAULT_CONTROL_PORT;
}
return Integer.parseInt(value);
}
@SuppressWarnings("static-access")
private static Options createOptions() {
Option propertyFile = OptionBuilder.withArgName("serverProperty")
.hasArg()
.withDescription("server property file path")
.create('p');
Option mapFile = OptionBuilder.withArgName("routingMap")
.hasArg()
.isRequired(true)
.withDescription("message routing map file path")
.create('m');
Option sinkFile = OptionBuilder.withArgName("sinkConfig" )
.hasArg()
.isRequired(true)
.withDescription("sink")
.create('s');
Option inputFile = OptionBuilder.withArgName("inputConfig" )
.hasArg()
.isRequired(true)
.withDescription("input")
.create('i');
Option accessKey = OptionBuilder.withArgName("AWSAccessKey" )
.hasArg()
.isRequired(false)
.withDescription("AWSAccessKey")
.create('a');
Option secretKey = OptionBuilder.withArgName("AWSSecretKey" )
.hasArg()
.isRequired(false)
.withDescription("AWSSecretKey")
.create('k');
Option controlPort = OptionBuilder.withArgName(OPT_CONTROL_PORT)
.hasArg()
.isRequired(false)
.withDescription("The port used to send command to this server")
.create('c');
Option extensions = OptionBuilder.withArgName("extensions")
.hasArg()
.isRequired(false)
.withDescription("extension module list configuration file")
.create('x');
Options options = new Options();
options.addOption(propertyFile);
options.addOption(mapFile);
options.addOption(sinkFile);
options.addOption(inputFile);
options.addOption(accessKey);
options.addOption(secretKey);
options.addOption(controlPort);
options.addOption(extensions);
return options;
}
}
| 1,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.