index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/MockHelixTaskExecutor.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.concurrent.Future;
import org.apache.helix.messaging.handling.HelixTaskExecutor;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageTask;
public class MockHelixTaskExecutor extends HelixTaskExecutor {
boolean completionInvoked = false;
@Override public void finishTask(MessageTask task) {
System.out.println("Mocks.MockCMTaskExecutor.finishTask()");
completionInvoked = true;
}
public boolean isDone(String taskId) {
Future<HelixTaskResult> future = _taskMap.get(taskId).getFuture();
if (future != null) {
return future.isDone();
}
return false;
}
}
| 9,700 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/StoreAccessOneNodeTransition.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.AccessOption;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.Message;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.apache.helix.zookeeper.zkclient.exception.ZkNoNodeException;
// simulate access property store and update one znode
public class StoreAccessOneNodeTransition extends MockTransition {
@Override
public void doTransition(Message message, NotificationContext context) {
HelixManager manager = context.getManager();
ZkHelixPropertyStore<ZNRecord> store = manager.getHelixPropertyStore();
final String setPath = "/TEST_PERF/set";
final String updatePath = "/TEST_PERF/update";
final String key = message.getPartitionName();
try {
// get/set once
ZNRecord record = null;
try {
record = store.get(setPath, null, 0);
} catch (ZkNoNodeException e) {
record = new ZNRecord(setPath);
}
record.setSimpleField("setTimestamp", "" + System.currentTimeMillis());
store.set(setPath, record, AccessOption.PERSISTENT);
// update once
store.update(updatePath, new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
if (currentData == null) {
currentData = new ZNRecord(updatePath);
}
currentData.setSimpleField(key, "" + System.currentTimeMillis());
return currentData;
}
}, AccessOption.PERSISTENT);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
| 9,701 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/ErrTransition.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
// simulate error transition
public class ErrTransition extends MockTransition {
private final Map<String, Set<String>> _errPartitions;
public ErrTransition(Map<String, Set<String>> errPartitions) {
if (errPartitions != null) {
// change key to upper case
_errPartitions = new HashMap<String, Set<String>>();
for (String key : errPartitions.keySet()) {
String upperKey = key.toUpperCase();
_errPartitions.put(upperKey, errPartitions.get(key));
}
} else {
_errPartitions = Collections.emptyMap();
}
}
@Override
public void doTransition(Message message, NotificationContext context) {
String fromState = message.getFromState();
String toState = message.getToState();
String partition = message.getPartitionName();
String key = (fromState + "-" + toState).toUpperCase();
if (_errPartitions.containsKey(key) && _errPartitions.get(key).contains(partition)) {
String errMsg =
"IGNORABLE: test throw exception in msgId: " + message.getId() + " for " + partition
+ " transit from " + fromState + " to " + toState;
throw new RuntimeException(errMsg);
}
}
}
| 9,702 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/MockBootstrapStateModel.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
import org.apache.helix.participant.statemachine.StateModel;
import org.apache.helix.participant.statemachine.StateModelInfo;
import org.apache.helix.participant.statemachine.Transition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// mock Bootstrap state model
@StateModelInfo(initialState = "OFFLINE", states = {
"ONLINE", "BOOTSTRAP", "OFFLINE", "IDLE"
})
public class MockBootstrapStateModel extends StateModel {
private static Logger LOG = LoggerFactory.getLogger(MockBootstrapStateModel.class);
// Overwrite the default value of intial state
MockBootstrapStateModel() {
_currentState = "IDLE";
}
@Transition(to = "OFFLINE", from = "IDLE")
public void onBecomeOfflineFromIdle(Message message, NotificationContext context) {
LOG.info("Become OFFLINE from IDLE");
}
@Transition(to = "BOOTSTRAP", from = "OFFLINE")
public void onBecomeBootstrapFromOffline(Message message, NotificationContext context) {
LOG.info("Become BOOTSTRAP from OFFLINE");
}
@Transition(to = "ONLINE", from = "BOOSTRAP")
public void onBecomeOnlineFromBootstrap(Message message, NotificationContext context) {
LOG.info("Become ONLINE from BOOTSTRAP");
}
@Transition(to = "OFFLINE", from = "ONLINE")
public void onBecomeOfflineFromOnline(Message message, NotificationContext context) {
LOG.info("Become OFFLINE from ONLINE");
}
}
| 9,703 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/MockSchemataStateModel.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
import org.apache.helix.participant.statemachine.StateModel;
import org.apache.helix.participant.statemachine.StateModelInfo;
import org.apache.helix.participant.statemachine.Transition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// mock STORAGE_DEFAULT_SM_SCHEMATA state model
@StateModelInfo(initialState = "OFFLINE", states = {
"MASTER", "DROPPED", "ERROR"
})
public class MockSchemataStateModel extends StateModel {
private static Logger LOG = LoggerFactory.getLogger(MockSchemataStateModel.class);
@Transition(to = "MASTER", from = "OFFLINE")
public void onBecomeMasterFromOffline(Message message, NotificationContext context) {
LOG.info("Become MASTER from OFFLINE");
}
@Transition(to = "OFFLINE", from = "MASTER")
public void onBecomeOfflineFromMaster(Message message, NotificationContext context) {
LOG.info("Become OFFLINE from MASTER");
}
@Transition(to = "DROPPED", from = "OFFLINE")
public void onBecomeDroppedFromOffline(Message message, NotificationContext context) {
LOG.info("Become DROPPED from OFFLINE");
}
@Transition(to = "OFFLINE", from = "ERROR")
public void onBecomeOfflineFromError(Message message, NotificationContext context) {
LOG.info("Become OFFLINE from ERROR");
}
}
| 9,704 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/DummyProcess.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.participant.statemachine.StateModel;
import org.apache.helix.participant.statemachine.StateModelFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DummyProcess {
private static final Logger logger = LoggerFactory.getLogger(DummyProcess.class);
public static final String zkServer = "zkSvr";
public static final String cluster = "cluster";
public static final String hostAddress = "host";
public static final String hostPort = "port";
public static final String relayCluster = "relayCluster";
public static final String help = "help";
public static final String transDelay = "transDelay";
public static final String helixManagerType = "helixManagerType";
// public static final String rootNamespace = "rootNamespace";
private final String _zkConnectString;
private final String _clusterName;
private final String _instanceName;
private DummyMasterSlaveStateModelFactory stateModelFactory;
// private StateMachineEngine genericStateMachineHandler;
private int _transDelayInMs = 0;
private final String _clusterMangerType;
public DummyProcess(String zkConnectString, String clusterName, String instanceName,
String clusterMangerType, int delay) {
_zkConnectString = zkConnectString;
_clusterName = clusterName;
_instanceName = instanceName;
_clusterMangerType = clusterMangerType;
_transDelayInMs = delay > 0 ? delay : 0;
}
static void sleep(long transDelay) {
try {
if (transDelay > 0) {
Thread.sleep(transDelay);
}
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public HelixManager start() throws Exception {
HelixManager manager = null;
// zk cluster manager
if (_clusterMangerType.equalsIgnoreCase("zk")) {
manager =
HelixManagerFactory.getZKHelixManager(_clusterName, _instanceName,
InstanceType.PARTICIPANT, _zkConnectString);
} else {
throw new IllegalArgumentException("Unsupported cluster manager type:" + _clusterMangerType);
}
stateModelFactory = new DummyMasterSlaveStateModelFactory(_transDelayInMs);
DummyLeaderStandbyStateModelFactory stateModelFactory1 =
new DummyLeaderStandbyStateModelFactory(_transDelayInMs);
DummyOnlineOfflineStateModelFactory stateModelFactory2 =
new DummyOnlineOfflineStateModelFactory(_transDelayInMs);
// genericStateMachineHandler = new StateMachineEngine();
StateMachineEngine stateMach = manager.getStateMachineEngine();
stateMach.registerStateModelFactory("MasterSlave", stateModelFactory);
stateMach.registerStateModelFactory("LeaderStandby", stateModelFactory1);
stateMach.registerStateModelFactory("OnlineOffline", stateModelFactory2);
manager.connect();
// manager.getMessagingService().registerMessageHandlerFactory(MessageType.STATE_TRANSITION.name(),
// genericStateMachineHandler);
return manager;
}
public static class DummyMasterSlaveStateModelFactory extends StateModelFactory<DummyMasterSlaveStateModel> {
int _delay;
public DummyMasterSlaveStateModelFactory(int delay) {
_delay = delay;
}
@Override
public DummyMasterSlaveStateModel createNewStateModel(String resourceName, String stateUnitKey) {
DummyMasterSlaveStateModel model = new DummyMasterSlaveStateModel();
model.setDelay(_delay);
return model;
}
}
public static class DummyLeaderStandbyStateModelFactory extends
StateModelFactory<DummyLeaderStandbyStateModel> {
int _delay;
public DummyLeaderStandbyStateModelFactory(int delay) {
_delay = delay;
}
@Override
public DummyLeaderStandbyStateModel createNewStateModel(String resourceName, String stateUnitKey) {
DummyLeaderStandbyStateModel model = new DummyLeaderStandbyStateModel();
model.setDelay(_delay);
return model;
}
}
public static class DummyOnlineOfflineStateModelFactory extends
StateModelFactory<DummyOnlineOfflineStateModel> {
int _delay;
public DummyOnlineOfflineStateModelFactory(int delay) {
_delay = delay;
}
@Override
public DummyOnlineOfflineStateModel createNewStateModel(String resourceName, String stateUnitKey) {
DummyOnlineOfflineStateModel model = new DummyOnlineOfflineStateModel();
model.setDelay(_delay);
return model;
}
}
public static class DummyMasterSlaveStateModel extends StateModel {
int _transDelay = 0;
public void setDelay(int delay) {
_transDelay = delay > 0 ? delay : 0;
}
public void onBecomeSlaveFromOffline(Message message, NotificationContext context) {
String db = message.getPartitionName();
String instanceName = context.getManager().getInstanceName();
DummyProcess.sleep(_transDelay);
logger.info("DummyStateModel.onBecomeSlaveFromOffline(), instance:" + instanceName + ", db:"
+ db);
}
public void onBecomeSlaveFromMaster(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyStateModel.onBecomeSlaveFromMaster()");
}
public void onBecomeMasterFromSlave(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyStateModel.onBecomeMasterFromSlave()");
}
public void onBecomeOfflineFromSlave(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyStateModel.onBecomeOfflineFromSlave()");
}
public void onBecomeDroppedFromOffline(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyStateModel.onBecomeDroppedFromOffline()");
}
}
public static class DummyOnlineOfflineStateModel extends StateModel {
int _transDelay = 0;
public void setDelay(int delay) {
_transDelay = delay > 0 ? delay : 0;
}
public void onBecomeOnlineFromOffline(Message message, NotificationContext context) {
String db = message.getPartitionName();
String instanceName = context.getManager().getInstanceName();
DummyProcess.sleep(_transDelay);
logger.info("DummyStateModel.onBecomeOnlineFromOffline(), instance:" + instanceName + ", db:"
+ db);
}
public void onBecomeOfflineFromOnline(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyStateModel.onBecomeOfflineFromOnline()");
}
public void onBecomeDroppedFromOffline(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyStateModel.onBecomeDroppedFromOffline()");
}
}
public static class DummyLeaderStandbyStateModel extends StateModel {
int _transDelay = 0;
public void setDelay(int delay) {
_transDelay = delay > 0 ? delay : 0;
}
public void onBecomeLeaderFromStandby(Message message, NotificationContext context) {
String db = message.getPartitionName();
String instanceName = context.getManager().getInstanceName();
DummyProcess.sleep(_transDelay);
logger.info("DummyLeaderStandbyStateModel.onBecomeLeaderFromStandby(), instance:"
+ instanceName + ", db:" + db);
}
public void onBecomeStandbyFromLeader(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyLeaderStandbyStateModel.onBecomeStandbyFromLeader()");
}
public void onBecomeDroppedFromOffline(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyLeaderStandbyStateModel.onBecomeDroppedFromOffline()");
}
public void onBecomeStandbyFromOffline(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyLeaderStandbyStateModel.onBecomeStandbyFromOffline()");
}
public void onBecomeOfflineFromStandby(Message message, NotificationContext context) {
DummyProcess.sleep(_transDelay);
logger.info("DummyLeaderStandbyStateModel.onBecomeOfflineFromStandby()");
}
}
// TODO hack OptionBuilder is not thread safe
@SuppressWarnings("static-access")
synchronized private static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info")
.create();
Option clusterOption =
OptionBuilder.withLongOpt(cluster).withDescription("Provide cluster name").create();
clusterOption.setArgs(1);
clusterOption.setRequired(true);
clusterOption.setArgName("Cluster name (Required)");
Option hostOption =
OptionBuilder.withLongOpt(hostAddress).withDescription("Provide host name").create();
hostOption.setArgs(1);
hostOption.setRequired(true);
hostOption.setArgName("Host name (Required)");
Option portOption =
OptionBuilder.withLongOpt(hostPort).withDescription("Provide host port").create();
portOption.setArgs(1);
portOption.setRequired(true);
portOption.setArgName("Host port (Required)");
Option cmTypeOption =
OptionBuilder
.withLongOpt(helixManagerType)
.withDescription(
"Provide cluster manager type (e.g. 'zk', 'static-file', or 'dynamic-file'")
.create();
cmTypeOption.setArgs(1);
cmTypeOption.setRequired(true);
cmTypeOption
.setArgName("Clsuter manager type (e.g. 'zk', 'static-file', or 'dynamic-file') (Required)");
Option zkServerOption =
OptionBuilder.withLongOpt(zkServer).withDescription("Provide zookeeper address").create();
zkServerOption.setArgs(1);
zkServerOption.setRequired(true);
zkServerOption.setArgName("ZookeeperServerAddress(Required for zk-based cluster manager)");
// Option rootNsOption = OptionBuilder.withLongOpt(rootNamespace)
// .withDescription("Provide root namespace for dynamic-file based cluster manager").create();
// rootNsOption.setArgs(1);
// rootNsOption.setRequired(true);
// rootNsOption.setArgName("Root namespace (Required for dynamic-file based cluster manager)");
Option transDelayOption =
OptionBuilder.withLongOpt(transDelay).withDescription("Provide state trans delay").create();
transDelayOption.setArgs(1);
transDelayOption.setRequired(false);
transDelayOption.setArgName("Delay time in state transition, in MS");
OptionGroup optionGroup = new OptionGroup();
optionGroup.addOption(zkServerOption);
Options options = new Options();
options.addOption(helpOption);
options.addOption(clusterOption);
options.addOption(hostOption);
options.addOption(portOption);
options.addOption(transDelayOption);
options.addOption(cmTypeOption);
options.addOptionGroup(optionGroup);
return options;
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("java " + DummyProcess.class.getName(), cliOptions);
}
public static CommandLine processCommandLineArgs(String[] cliArgs) throws Exception {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
// CommandLine cmd = null;
try {
return cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("CommandLineClient: failed to parse command-line options: "
+ pe.toString());
printUsage(cliOptions);
System.exit(1);
}
return null;
}
public static void main(String[] args) throws Exception {
String cmType = "zk";
String zkConnectString = "localhost:2181";
String clusterName = "testCluster";
String instanceName = "localhost_8900";
String cvFileStr = null;
// String rootNs = null;
int delay = 0;
if (args.length > 0) {
CommandLine cmd = processCommandLineArgs(args);
zkConnectString = cmd.getOptionValue(zkServer);
clusterName = cmd.getOptionValue(cluster);
String host = cmd.getOptionValue(hostAddress);
String portString = cmd.getOptionValue(hostPort);
int port = Integer.parseInt(portString);
instanceName = host + "_" + port;
cmType = cmd.getOptionValue(helixManagerType);
if (cmd.hasOption(transDelay)) {
try {
delay = Integer.parseInt(cmd.getOptionValue(transDelay));
if (delay < 0) {
throw new Exception("delay must be positive");
}
} catch (Exception e) {
e.printStackTrace();
delay = 0;
}
}
}
// Espresso_driver.py will consume this
logger.info("Dummy process started, instanceName:" + instanceName);
DummyProcess process =
new DummyProcess(zkConnectString, clusterName, instanceName, cmType, delay);
HelixManager manager = process.start();
try {
Thread.currentThread().join();
} catch (InterruptedException e) {
// ClusterManagerFactory.disconnectManagers(instanceName);
logger.info("participant:" + instanceName + ", " + Thread.currentThread().getName()
+ " interrupted");
// if (manager != null)
// {
// manager.disconnect();
// }
} finally {
if (manager != null) {
manager.disconnect();
}
}
}
}
| 9,705 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/MockDelayMSStateModelFactory.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.participant.statemachine.StateModelFactory;
public class MockDelayMSStateModelFactory extends StateModelFactory<MockDelayMSStateModel> {
private long _delay;
@Override
public MockDelayMSStateModel createNewStateModel(String resourceName, String partitionKey) {
MockDelayMSStateModel model = new MockDelayMSStateModel(_delay);
return model;
}
public MockDelayMSStateModelFactory setDelay(long delay) {
_delay = delay;
return this;
}
}
| 9,706 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/StoreAccessDiffNodeTransition.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.AccessOption;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.Message;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.apache.helix.zookeeper.zkclient.exception.ZkNoNodeException;
// simulate access property store and update different znodes
public class StoreAccessDiffNodeTransition extends MockTransition {
@Override
public void doTransition(Message message, NotificationContext context) {
HelixManager manager = context.getManager();
ZkHelixPropertyStore<ZNRecord> store = manager.getHelixPropertyStore();
final String setPath = "/TEST_PERF/set/" + message.getPartitionName();
final String updatePath = "/TEST_PERF/update/" + message.getPartitionName();
// final String key = message.getPartitionName();
try {
// get/set once
ZNRecord record = null;
try {
record = store.get(setPath, null, 0);
} catch (ZkNoNodeException e) {
// record = new ZNRecord(setPath);
}
if (record == null) {
record = new ZNRecord(setPath);
}
record.setSimpleField("setTimestamp", "" + System.currentTimeMillis());
store.set(setPath, record, AccessOption.PERSISTENT);
// update once
store.update(updatePath, new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
if (currentData == null) {
currentData = new ZNRecord(updatePath);
}
currentData.setSimpleField("updateTimestamp", "" + System.currentTimeMillis());
return currentData;
}
}, AccessOption.PERSISTENT);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
| 9,707 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/MockDelayMSStateModel.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
import org.apache.helix.participant.statemachine.StateModel;
import org.apache.helix.participant.statemachine.StateModelInfo;
import org.apache.helix.participant.statemachine.Transition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// mock delay master-slave state model
@StateModelInfo(initialState = "OFFLINE", states = {
"MASTER", "SLAVE", "ERROR"
})
public class MockDelayMSStateModel extends StateModel {
private static Logger LOG = LoggerFactory.getLogger(MockDelayMSStateModel.class);
private long _delay;
public MockDelayMSStateModel(long delay) {
_delay = delay;
_cancelled = false;
}
@Transition(to = "SLAVE", from = "OFFLINE")
public void onBecomeSlaveFromOffline(Message message, NotificationContext context) {
if (_delay > 0) {
try {
Thread.sleep(_delay);
} catch (InterruptedException e) {
LOG.error("Failed to sleep for " + _delay);
}
}
LOG.info("Become SLAVE from OFFLINE");
}
@Transition(to = "MASTER", from = "SLAVE")
public void onBecomeMasterFromSlave(Message message, NotificationContext context)
throws InterruptedException {
if (_delay < 0) {
Thread.sleep(Math.abs(_delay));
}
LOG.error("Become MASTER from SLAVE");
}
@Transition(to = "OFFLINE", from = "SLAVE")
public void onBecomeOfflineFromSlave(Message message, NotificationContext context) {
LOG.info("Become OFFLINE from SLAVE");
}
}
| 9,708 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/MockBootstrapModelFactory.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.participant.statemachine.StateModelFactory;
// mock Bootstrap state model factory
public class MockBootstrapModelFactory extends StateModelFactory<MockBootstrapStateModel> {
@Override
public MockBootstrapStateModel createNewStateModel(String resourceName, String partitionKey) {
MockBootstrapStateModel model = new MockBootstrapStateModel();
return model;
}
}
| 9,709 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/participant/SleepTransition.java
|
package org.apache.helix.mock.participant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
// simulate long transition
public class SleepTransition extends MockTransition {
private final long _delay;
public SleepTransition(long delay) {
_delay = delay > 0 ? delay : 0;
}
@Override
public void doTransition(Message message, NotificationContext context)
throws InterruptedException {
Thread.sleep(_delay);
}
}
| 9,710 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/statemodel/MockStateModelAnnotated.java
|
package org.apache.helix.mock.statemodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
import org.apache.helix.participant.statemachine.StateModel;
import org.apache.helix.participant.statemachine.StateModelInfo;
import org.apache.helix.participant.statemachine.Transition;
@StateModelInfo(states = "{'OFFLINE','SLAVE','MASTER'}", initialState = "OFFINE")
public class MockStateModelAnnotated
extends StateModel {
public boolean stateModelInvoked = false;
@Transition(from = "SLAVE", to = "MASTER") public void slaveToMaster(Message msg,
NotificationContext context) {
stateModelInvoked = true;
}
@Transition(from = "OFFLINE", to = "SLAVE") public void offlineToSlave(Message msg,
NotificationContext context) {
stateModelInvoked = true;
}
}
| 9,711 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/statemodel/MockMasterSlaveStateModel.java
|
package org.apache.helix.mock.statemodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
import org.apache.helix.participant.statemachine.StateModel;
public class MockMasterSlaveStateModel extends StateModel {
public boolean stateModelInvoked = false;
public void onBecomeMasterFromSlave(Message msg, NotificationContext context) {
stateModelInvoked = true;
}
public void onBecomeSlaveFromOffline(Message msg, NotificationContext context) {
stateModelInvoked = true;
}
}
| 9,712 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/statemodel/MockTaskStateModelFactory.java
|
package org.apache.helix.mock.statemodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import com.google.common.annotations.VisibleForTesting;
import org.apache.helix.HelixManager;
import org.apache.helix.participant.statemachine.StateModelFactory;
import org.apache.helix.task.TaskFactory;
public class MockTaskStateModelFactory extends StateModelFactory<MockTaskStateModel> {
private final HelixManager _manager;
private final Map<String, TaskFactory> _taskFactoryRegistry;
private final ScheduledExecutorService _taskExecutor;
private final static int TASK_THREADPOOL_SIZE = 40;
public MockTaskStateModelFactory(HelixManager manager, Map<String, TaskFactory> taskFactoryRegistry) {
_manager = manager;
_taskFactoryRegistry = taskFactoryRegistry;
_taskExecutor = Executors.newScheduledThreadPool(TASK_THREADPOOL_SIZE, new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "Mock-TaskStateModel-thread-pool");
}
});
}
@Override
public MockTaskStateModel createNewStateModel(String resourceName, String partitionKey) {
return new MockTaskStateModel(_manager, _taskFactoryRegistry, _taskExecutor);
}
public void shutdown() {
_taskExecutor.shutdown();
}
public boolean isShutdown() {
return _taskExecutor.isShutdown();
}
public boolean isTerminated() {
return _taskExecutor.isTerminated();
}
@VisibleForTesting
public void shutdownNow() {
_taskExecutor.shutdownNow();
}
}
| 9,713 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/statemodel/MockTaskStateModel.java
|
package org.apache.helix.mock.statemodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
import org.apache.helix.task.TaskFactory;
import org.apache.helix.task.TaskStateModel;
public class MockTaskStateModel extends TaskStateModel {
public MockTaskStateModel(HelixManager manager, Map<String, TaskFactory> taskFactoryRegistry,
ScheduledExecutorService taskExecutor) {
super(manager, taskFactoryRegistry, taskExecutor);
}
@Override
public void onBecomeRunningFromInit(Message msg, NotificationContext context) {
try {
Thread.sleep(99999999);
} catch (InterruptedException e) {
}
super.onBecomeRunningFromInit(msg, context);
}
}
| 9,714 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/controller/MockController.java
|
package org.apache.helix.mock.controller;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState.IdealStateProperty;
import org.apache.helix.model.LiveInstance.LiveInstanceProperty;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageState;
import org.apache.helix.model.Message.MessageType;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
public class MockController {
private final HelixZkClient client;
private final String srcName;
private final String clusterName;
public MockController(String src, String zkServer, String cluster) {
srcName = src;
clusterName = cluster;
client = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkServer));
client.setZkSerializer(new ZNRecordSerializer());
}
void sendMessage(String msgId, String instanceName, String fromState, String toState,
String partitionKey, int partitionId) throws InterruptedException, JsonGenerationException,
JsonMappingException, IOException {
Message message = new Message(MessageType.STATE_TRANSITION, msgId);
message.setMsgId(msgId);
message.setSrcName(srcName);
message.setTgtName(instanceName);
message.setMsgState(MessageState.NEW);
message.setFromState(fromState);
message.setToState(toState);
// message.setPartitionId(partitionId);
message.setPartitionName(partitionKey);
String path = PropertyPathBuilder.instanceMessage(clusterName, instanceName, message.getId());
ObjectMapper mapper = new ObjectMapper();
StringWriter sw = new StringWriter();
mapper.writeValue(sw, message);
System.out.println(sw.toString());
client.delete(path);
Thread.sleep(10000);
ZNRecord record = client.readData(PropertyPathBuilder.liveInstance(clusterName, instanceName));
message.setTgtSessionId(record.getSimpleField(LiveInstanceProperty.SESSION_ID.toString())
.toString());
client.createPersistent(path, message);
}
public void createExternalView(List<String> instanceNames, int partitions, int replicas,
String dbName, long randomSeed) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(client));
Builder keyBuilder = accessor.keyBuilder();
ExternalView externalView =
new ExternalView(computeRoutingTable(instanceNames, partitions, replicas, dbName,
randomSeed));
accessor.setProperty(keyBuilder.externalView(dbName), externalView);
}
public ZNRecord computeRoutingTable(List<String> instanceNames, int partitions, int replicas,
String dbName, long randomSeed) {
assert (instanceNames.size() > replicas);
Collections.sort(instanceNames);
ZNRecord result = new ZNRecord(dbName);
Map<String, Object> externalView = new TreeMap<String, Object>();
List<Integer> partitionList = new ArrayList<Integer>(partitions);
for (int i = 0; i < partitions; i++) {
partitionList.add(new Integer(i));
}
Random rand = new Random(randomSeed);
// Shuffle the partition list
Collections.shuffle(partitionList, rand);
for (int i = 0; i < partitionList.size(); i++) {
int partitionId = partitionList.get(i);
Map<String, String> partitionAssignment = new TreeMap<String, String>();
int masterNode = i % instanceNames.size();
// the first in the list is the node that contains the master
partitionAssignment.put(instanceNames.get(masterNode), "MASTER");
// for the jth replica, we put it on (masterNode + j) % nodes-th
// node
for (int j = 1; j <= replicas; j++) {
partitionAssignment
.put(instanceNames.get((masterNode + j) % instanceNames.size()), "SLAVE");
}
String partitionName = dbName + ".partition-" + partitionId;
result.setMapField(partitionName, partitionAssignment);
}
result.setSimpleField(IdealStateProperty.NUM_PARTITIONS.toString(), "" + partitions);
return result;
}
}
| 9,715 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/controller/MockControllerProcess.java
|
package org.apache.helix.mock.controller;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
public class MockControllerProcess {
/**
* @param args
* @throws IOException
* @throws JsonMappingException
* @throws JsonGenerationException
* @throws InterruptedException
*/
public static void main(String[] args) throws JsonGenerationException, JsonMappingException,
InterruptedException, IOException {
MockController storageController =
new MockController("cm-instance-0", "localhost:2181", "storage-cluster");
MockController relayController =
new MockController("cm-instance-0", "localhost:2181", "relay-cluster");
ArrayList<String> instanceNames = new ArrayList<String>();
instanceNames.add("relay0");
instanceNames.add("relay1");
instanceNames.add("relay2");
instanceNames.add("relay3");
instanceNames.add("relay4");
relayController.createExternalView(instanceNames, 10, 2, "EspressoDB", 0);
// Messages to initiate offline->slave->master->slave transitions
storageController.sendMessage("TestMessageId1", "localhost_8900", "Offline", "Slave",
"EspressoDB.partition-0", 0);
Thread.sleep(10000);
storageController.sendMessage("TestMessageId2", "localhost_8900", "Slave", "Master",
"EspressoDB.partition-0", 0);
Thread.sleep(10000);
storageController.sendMessage("TestMessageId3", "localhost_8900", "Master", "Slave",
"EspressoDB.partition-0", 0);
Thread.sleep(10000);
// Change the external view to trigger the consumer to listen from
// another relay
relayController.createExternalView(instanceNames, 10, 2, "EspressoDB", 10);
storageController.sendMessage("TestMessageId4", "localhost_8900", "Slave", "Offline",
"EspressoDB.partition-0", 0);
Thread.sleep(10000);
}
}
| 9,716 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/mock/spectator/MockSpectatorProcess.java
|
package org.apache.helix.mock.spectator;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.spectator.RoutingTableProvider;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.zookeeper.zkclient.IDefaultNameSpace;
import org.apache.helix.zookeeper.zkclient.ZkClient;
import org.apache.helix.zookeeper.zkclient.ZkServer;
/**
* A MockSpectatorProcess to demonstrate the integration with cluster manager.
* This uses Zookeeper in local mode and runs at port 2188
*/
public class MockSpectatorProcess {
private static final int port = 2188;
static long runId = System.currentTimeMillis();
private static final String dataDir = "/tmp/zkDataDir-" + runId;
private static final String logDir = "/tmp/zkLogDir-" + runId;
static String clusterName = "mock-cluster-" + runId;
static String zkConnectString = "localhost:2188";
private final RoutingTableProvider _routingTableProvider;
private static ZkServer zkServer;
public MockSpectatorProcess() {
_routingTableProvider = new RoutingTableProvider();
}
public static void main(String[] args) throws Exception {
setup();
zkServer.getZkClient().setZkSerializer(new ZNRecordSerializer());
ZNRecord record = zkServer.getZkClient().readData(PropertyPathBuilder.idealState(clusterName, "TestDB"));
String externalViewPath = PropertyPathBuilder.externalView(clusterName, "TestDB");
MockSpectatorProcess process = new MockSpectatorProcess();
process.start();
// try to route, there is no master or slave available
process.routeRequest("TestDB", "TestDB_1");
// update the externalview on zookeeper
zkServer.getZkClient().createPersistent(externalViewPath, record);
// sleep for sometime so that the ZK Callback is received.
Thread.sleep(1000);
process.routeRequest("TestDB", "TestDB_1");
System.exit(1);
}
private static void setup() {
IDefaultNameSpace defaultNameSpace = new IDefaultNameSpace() {
@Override
public void createDefaultNameSpace(ZkClient client) {
client.deleteRecursive("/" + clusterName);
}
};
zkServer = new ZkServer(dataDir, logDir, defaultNameSpace, port);
zkServer.start();
ClusterSetup clusterSetup = new ClusterSetup(zkConnectString);
clusterSetup.setupTestCluster(clusterName);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void routeRequest(String database, String partition) {
List<InstanceConfig> masters;
List<InstanceConfig> slaves;
masters = _routingTableProvider.getInstances(database, partition, "MASTER");
if (masters != null && !masters.isEmpty()) {
System.out.println("Available masters to route request");
for (InstanceConfig config : masters) {
System.out.println("HostName:" + config.getHostName() + " Port:" + config.getPort());
}
} else {
System.out.println("No masters available to route request");
}
slaves = _routingTableProvider.getInstances(database, partition, "SLAVE");
if (slaves != null && !slaves.isEmpty()) {
System.out.println("Available slaves to route request");
for (InstanceConfig config : slaves) {
System.out.println("HostName:" + config.getHostName() + " Port:" + config.getPort());
}
} else {
System.out.println("No slaves available to route request");
}
}
public void start() {
try {
HelixManager manager =
HelixManagerFactory.getZKHelixManager(clusterName, null, InstanceType.SPECTATOR,
zkConnectString);
manager.connect();
manager.addExternalViewChangeListener(_routingTableProvider);
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 9,717 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/healthcheck/TestParticipantHealthReportCollectorImpl.java
|
package org.apache.helix.healthcheck;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import org.apache.helix.HelixManager;
import org.apache.helix.mock.MockManager;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
public class TestParticipantHealthReportCollectorImpl {
protected ParticipantHealthReportCollectorImpl _providerImpl;
protected ParticipantHealthReportTask _providerTask;
protected HelixManager _manager;
protected MockHealthReportProvider _mockProvider;
public static class MockHealthReportProvider extends HealthReportProvider {
@Override
public Map<String, String> getRecentHealthReport() {
// TODO Auto-generated method stub
return null;
}
@Override
public void resetStats() {
// TODO Auto-generated method stub
}
}
@BeforeMethod()
public void setup() {
_providerImpl = new ParticipantHealthReportCollectorImpl(new MockManager(), "instance_123");
_providerTask = new ParticipantHealthReportTask(_providerImpl);
_mockProvider = new MockHealthReportProvider();
}
@Test()
public void testStart() throws Exception {
_providerTask.start();
_providerTask.start();
}
@Test()
public void testStop() throws Exception {
_providerTask.stop();
_providerTask.stop();
}
@Test()
public void testAddProvider() throws Exception {
_providerImpl.removeHealthReportProvider(_mockProvider);
_providerImpl.addHealthReportProvider(_mockProvider);
_providerImpl.addHealthReportProvider(_mockProvider);
}
@Test()
public void testRemoveProvider() throws Exception {
_providerImpl.addHealthReportProvider(_mockProvider);
_providerImpl.removeHealthReportProvider(_mockProvider);
_providerImpl.removeHealthReportProvider(_mockProvider);
}
}
| 9,718 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/changedetector/TestResourceChangeDetector.java
|
package org.apache.helix.controller.changedetector;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixConstants.ChangeType;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.tools.ClusterVerifiers.HelixClusterVerifier;
import org.apache.helix.tools.ClusterVerifiers.StrictMatchExternalViewVerifier;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* This test contains a series of unit tests for ResourceChangeDetector.
*/
public class TestResourceChangeDetector extends ZkTestBase {
// All possible change types for ResourceChangeDetector except for ClusterConfig
// since we don't provide the names of changed fields for ClusterConfig
private static final ChangeType[] RESOURCE_CHANGE_TYPES =
{ChangeType.IDEAL_STATE, ChangeType.INSTANCE_CONFIG, ChangeType.LIVE_INSTANCE, ChangeType.RESOURCE_CONFIG, ChangeType.CLUSTER_CONFIG};
private static final String CLUSTER_NAME = TestHelper.getTestClassName();
private static final String RESOURCE_NAME = "TestDB";
private static final String NEW_RESOURCE_NAME = "TestDB2";
private static final String STATE_MODEL = "MasterSlave";
// There are 5 possible change types for ResourceChangeDetector
private static final int NUM_CHANGE_TYPES = 5;
private static final int NUM_RESOURCES = 1;
private static final int NUM_PARTITIONS = 10;
private static final int NUM_REPLICAS = 3;
private static final int NUM_NODES = 5;
// Create a mock of ResourceControllerDataProvider so that we could manipulate it
private ResourceControllerDataProvider _dataProvider;
private ResourceChangeDetector _resourceChangeDetector;
private ClusterControllerManager _controller;
private MockParticipantManager[] _participants = new MockParticipantManager[NUM_NODES];
private HelixDataAccessor _dataAccessor;
private PropertyKey.Builder _keyBuilder;
@BeforeClass
public void beforeClass() throws Exception {
super.beforeClass();
// Set up a mock cluster
TestHelper.setupCluster(CLUSTER_NAME, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
RESOURCE_NAME, // resource name prefix
NUM_RESOURCES, // resources
NUM_PARTITIONS, // partitions per resource
NUM_NODES, // nodes
NUM_REPLICAS, // replicas
STATE_MODEL, true); // do rebalance
// Start a controller
_controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, "controller_0");
_controller.syncStart();
// Start Participants
for (int i = 0; i < NUM_NODES; i++) {
String instanceName = "localhost_" + (12918 + i);
_participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
_participants[i].syncStart();
}
_dataAccessor = new ZKHelixDataAccessor(CLUSTER_NAME, _baseAccessor);
_keyBuilder = _dataAccessor.keyBuilder();
_resourceChangeDetector = new ResourceChangeDetector();
// Create a custom data provider
_dataProvider = new ResourceControllerDataProvider(CLUSTER_NAME);
}
@AfterClass
public void afterClass() throws Exception {
for (MockParticipantManager participant : _participants) {
if (participant != null && participant.isConnected()) {
participant.syncStop();
}
}
_controller.syncStop();
deleteCluster(CLUSTER_NAME);
Assert.assertFalse(TestHelper.verify(() -> _dataAccessor.getBaseDataAccessor()
.exists("/" + CLUSTER_NAME, AccessOption.PERSISTENT), 20000L));
}
/**
* Tests the initialization of the change detector. It should tell us that there's been changes
* for every change type and for all items per type.
* @throws Exception
*/
@Test
public void testResourceChangeDetectorInit() {
_dataProvider.refresh(_dataAccessor);
_resourceChangeDetector.updateSnapshots(_dataProvider);
Collection<ChangeType> changeTypes = _resourceChangeDetector.getChangeTypes();
Assert.assertEquals(changeTypes.size(), NUM_CHANGE_TYPES,
"Not all change types have been detected for ResourceChangeDetector!");
// Check that the right amount of resources show up as added
checkDetectionCounts(_resourceChangeDetector, ChangeType.IDEAL_STATE, NUM_RESOURCES, 0, 0);
// Check that the right amount of instances show up as added
checkDetectionCounts(_resourceChangeDetector, ChangeType.LIVE_INSTANCE, NUM_NODES, 0, 0);
checkDetectionCounts(_resourceChangeDetector, ChangeType.INSTANCE_CONFIG, NUM_NODES, 0, 0);
// Check that the right amount of cluster config item show up
checkDetectionCounts(_resourceChangeDetector, ChangeType.CLUSTER_CONFIG, 1, 0, 0);
}
/**
* Add a resource (IS and ResourceConfig) and see if the detector detects it.
*/
@Test(dependsOnMethods = "testResourceChangeDetectorInit")
public void testAddResource() {
// Create an IS and ResourceConfig
_gSetupTool.getClusterManagementTool()
.addResource(CLUSTER_NAME, NEW_RESOURCE_NAME, NUM_PARTITIONS, STATE_MODEL);
ResourceConfig resourceConfig = new ResourceConfig(NEW_RESOURCE_NAME);
_dataAccessor.setProperty(_keyBuilder.resourceConfig(NEW_RESOURCE_NAME), resourceConfig);
// Manually notify dataProvider
_dataProvider.notifyDataChange(ChangeType.IDEAL_STATE);
_dataProvider.notifyDataChange(ChangeType.RESOURCE_CONFIG);
// Refresh the data provider
_dataProvider.refresh(_dataAccessor);
// Update the detector
_resourceChangeDetector.updateSnapshots(_dataProvider);
checkChangeTypes(ChangeType.IDEAL_STATE, ChangeType.RESOURCE_CONFIG);
// Check the counts
for (ChangeType type : RESOURCE_CHANGE_TYPES) {
if (type == ChangeType.IDEAL_STATE || type == ChangeType.RESOURCE_CONFIG) {
checkDetectionCounts(_resourceChangeDetector, type, 1, 0, 0);
} else {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 0);
}
}
// Check that detector gives the right item
Assert.assertTrue(_resourceChangeDetector.getAdditionsByType(ChangeType.RESOURCE_CONFIG)
.contains(NEW_RESOURCE_NAME));
}
/**
* Modify a resource config for the new resource and test that detector detects it.
*/
@Test(dependsOnMethods = "testAddResource")
public void testModifyResource() {
// Modify resource config
ResourceConfig resourceConfig =
_dataAccessor.getProperty(_keyBuilder.resourceConfig(NEW_RESOURCE_NAME));
resourceConfig.getRecord().setSimpleField("Did I change?", "Yes!");
_dataAccessor.updateProperty(_keyBuilder.resourceConfig(NEW_RESOURCE_NAME), resourceConfig);
// Notify data provider and check
_dataProvider.notifyDataChange(ChangeType.RESOURCE_CONFIG);
_dataProvider.refresh(_dataAccessor);
_resourceChangeDetector.updateSnapshots(_dataProvider);
checkChangeTypes(ChangeType.RESOURCE_CONFIG);
// Check the counts
for (ChangeType type : RESOURCE_CHANGE_TYPES) {
if (type == ChangeType.RESOURCE_CONFIG) {
checkDetectionCounts(_resourceChangeDetector, type, 0, 1, 0);
} else {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 0);
}
}
Assert.assertTrue(_resourceChangeDetector.getChangesByType(ChangeType.RESOURCE_CONFIG)
.contains(NEW_RESOURCE_NAME));
}
/**
* Delete the new resource and test that detector detects it.
*/
@Test(dependsOnMethods = "testModifyResource")
public void testDeleteResource() {
// Delete the newly added resource
_dataAccessor.removeProperty(_keyBuilder.idealStates(NEW_RESOURCE_NAME));
_dataAccessor.removeProperty(_keyBuilder.resourceConfig(NEW_RESOURCE_NAME));
// Notify data provider and check
_dataProvider.notifyDataChange(ChangeType.IDEAL_STATE);
_dataProvider.notifyDataChange(ChangeType.RESOURCE_CONFIG);
_dataProvider.refresh(_dataAccessor);
_resourceChangeDetector.updateSnapshots(_dataProvider);
checkChangeTypes(ChangeType.RESOURCE_CONFIG, ChangeType.IDEAL_STATE);
// Check the counts
for (ChangeType type : RESOURCE_CHANGE_TYPES) {
if (type == ChangeType.IDEAL_STATE || type == ChangeType.RESOURCE_CONFIG) {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 1);
} else {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 0);
}
}
}
/**
* Disconnect and reconnect a Participant and see if detector detects.
*/
@Test(dependsOnMethods = "testDeleteResource")
public void testDisconnectReconnectInstance() {
// Disconnect a Participant
_participants[0].syncStop();
_dataProvider.notifyDataChange(ChangeType.LIVE_INSTANCE);
_dataProvider.refresh(_dataAccessor);
_resourceChangeDetector.updateSnapshots(_dataProvider);
checkChangeTypes(ChangeType.LIVE_INSTANCE);
// Check the counts
for (ChangeType type : RESOURCE_CHANGE_TYPES) {
if (type == ChangeType.LIVE_INSTANCE) {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 1);
} else {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 0);
}
}
// Reconnect the Participant
_participants[0] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, "localhost_12918");
_participants[0].syncStart();
_dataProvider.notifyDataChange(ChangeType.LIVE_INSTANCE);
_dataProvider.refresh(_dataAccessor);
_resourceChangeDetector.updateSnapshots(_dataProvider);
checkChangeTypes(ChangeType.LIVE_INSTANCE);
// Check the counts
for (ChangeType type : RESOURCE_CHANGE_TYPES) {
if (type == ChangeType.LIVE_INSTANCE) {
checkDetectionCounts(_resourceChangeDetector, type, 1, 0, 0);
} else {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 0);
}
}
}
/**
* Remove an instance completely and see if detector detects.
*/
@Test(dependsOnMethods = "testDisconnectReconnectInstance")
public void testRemoveInstance() throws Exception {
String instanceName = _participants[0].getInstanceName();
_participants[0].syncStop();
InstanceConfig instanceConfig =
_dataAccessor.getProperty(_keyBuilder.instanceConfig(_participants[0].getInstanceName()));
_gSetupTool.getClusterManagementTool().dropInstance(CLUSTER_NAME, instanceConfig);
// Verify that instance has been removed
Assert.assertTrue(TestHelper.verify(() -> _dataAccessor
.getProperty(_dataAccessor.keyBuilder().instance(_participants[0].getInstanceName()))
== null, TestHelper.WAIT_DURATION));
_dataProvider.notifyDataChange(ChangeType.LIVE_INSTANCE);
_dataProvider.notifyDataChange(ChangeType.INSTANCE_CONFIG);
_dataProvider.refresh(_dataAccessor);
_resourceChangeDetector.updateSnapshots(_dataProvider);
checkChangeTypes(ChangeType.LIVE_INSTANCE, ChangeType.INSTANCE_CONFIG);
// Check the counts
for (ChangeType type : RESOURCE_CHANGE_TYPES) {
if (type == ChangeType.LIVE_INSTANCE || type == ChangeType.INSTANCE_CONFIG) {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 1);
} else {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 0);
}
}
// recovery the environment
_gSetupTool.addInstanceToCluster(CLUSTER_NAME, instanceName);
_participants[0] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
_participants[0].syncStart();
}
/**
* Modify cluster config and see if detector detects.
*/
@Test(dependsOnMethods = "testRemoveInstance")
public void testModifyClusterConfig() {
// Modify cluster config
ClusterConfig clusterConfig = _dataAccessor.getProperty(_keyBuilder.clusterConfig());
clusterConfig.setTopology("Change");
_dataAccessor.updateProperty(_keyBuilder.clusterConfig(), clusterConfig);
_dataProvider.notifyDataChange(ChangeType.CLUSTER_CONFIG);
_dataProvider.refresh(_dataAccessor);
_resourceChangeDetector.updateSnapshots(_dataProvider);
checkChangeTypes(ChangeType.CLUSTER_CONFIG);
// Check the counts for other types
for (ChangeType type : RESOURCE_CHANGE_TYPES) {
if (type == ChangeType.CLUSTER_CONFIG) {
checkDetectionCounts(_resourceChangeDetector, type, 0, 1, 0);
} else {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 0);
}
}
}
/**
* Test that change detector gives correct results when there are no changes after updating
* snapshots.
*/
@Test(dependsOnMethods = "testModifyClusterConfig")
public void testNoChange() {
// Test twice to make sure that no change is stable across different runs
for (int i = 0; i < 2; i++) {
_dataProvider.refresh(_dataAccessor);
_resourceChangeDetector.updateSnapshots(_dataProvider);
Assert.assertEquals(_resourceChangeDetector.getChangeTypes().size(), 0);
// Check the counts for all the other types
for (ChangeType type : RESOURCE_CHANGE_TYPES) {
checkDetectionCounts(_resourceChangeDetector, type, 0, 0, 0);
}
}
}
/**
* Modify IdealState mapping fields for a FULL_AUTO resource and see if detector detects.
*/
@Test(dependsOnMethods = "testNoChange")
public void testIgnoreNonTopologyChanges() {
// Modify cluster config and IdealState to ensure the mapping field of the IdealState will be
// considered as the fields that are modified by Helix logic.
ClusterConfig clusterConfig = _dataAccessor.getProperty(_keyBuilder.clusterConfig());
clusterConfig.setPersistBestPossibleAssignment(true);
_dataAccessor.updateProperty(_keyBuilder.clusterConfig(), clusterConfig);
// Create an new IS
String resourceName = "Resource" + TestHelper.getTestMethodName();
_gSetupTool.getClusterManagementTool()
.addResource(CLUSTER_NAME, resourceName, NUM_PARTITIONS, STATE_MODEL);
IdealState idealState = _dataAccessor.getProperty(_keyBuilder.idealStates(resourceName));
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
idealState.getRecord().getMapFields().put("Partition1", new HashMap<>());
_dataAccessor.updateProperty(_keyBuilder.idealStates(resourceName), idealState);
Arrays.stream(ChangeType.values()).forEach(type -> {
_dataProvider.notifyDataChange(type);
});
_dataProvider.refresh(_dataAccessor);
// Test with ignore option to be true
ResourceChangeDetector changeDetector = new ResourceChangeDetector(true);
changeDetector.updateSnapshots(_dataProvider);
// 1. Modify ideal state map fields of a FULL_AUTO resource
idealState.getRecord().getMapFields().put("Partition1", Collections.singletonMap("foo", "bar"));
_dataAccessor.updateProperty(_keyBuilder.idealStates(resourceName), idealState);
_dataProvider.notifyDataChange(ChangeType.IDEAL_STATE);
_dataProvider.refresh(_dataAccessor);
changeDetector.updateSnapshots(_dataProvider);
Assert.assertEquals(changeDetector.getChangeTypes(),
Collections.singleton(ChangeType.IDEAL_STATE));
checkDetectionCounts(changeDetector, ChangeType.IDEAL_STATE, 0, 0, 0);
// 2. Modify an instance "enabled" state
String instanceName = _participants[0].getInstanceName();
InstanceConfig instanceConfig =
_dataAccessor.getProperty(_keyBuilder.instanceConfig(instanceName));
Assert.assertTrue(instanceConfig.getInstanceEnabled());
try {
instanceConfig.setInstanceEnabled(false);
_dataAccessor.updateProperty(_keyBuilder.instanceConfig(instanceName), instanceConfig);
_dataProvider.notifyDataChange(ChangeType.INSTANCE_CONFIG);
_dataProvider.refresh(_dataAccessor);
changeDetector.updateSnapshots(_dataProvider);
Assert.assertEquals(changeDetector.getChangeTypes(),
Collections.singleton(ChangeType.INSTANCE_CONFIG));
checkDetectionCounts(changeDetector, ChangeType.INSTANCE_CONFIG, 0, 0, 0);
} finally {
// remove newly added resource/ideastate
_gSetupTool.getClusterManagementTool().dropResource(CLUSTER_NAME, resourceName);
instanceConfig.setInstanceEnabled(true);
_dataAccessor.updateProperty(_keyBuilder.instanceConfig(instanceName), instanceConfig);
}
}
@Test(dependsOnMethods = "testIgnoreNonTopologyChanges")
public void testResetSnapshots() {
// ensure the cluster converged before the test to ensure IS is not modified unexpectedly
HelixClusterVerifier _clusterVerifier =
new StrictMatchExternalViewVerifier.Builder(CLUSTER_NAME).setZkClient(_gZkClient)
.setDeactivatedNodeAwareness(true)
.setResources(new HashSet<>(_dataAccessor.getChildNames(_keyBuilder.idealStates())))
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME).build();
try {
Assert.assertTrue(_clusterVerifier.verify());
} finally {
_clusterVerifier.close();
}
// Initialize a new detector with the existing data
ResourceChangeDetector changeDetector = new ResourceChangeDetector();
_dataProvider.notifyDataChange(ChangeType.IDEAL_STATE);
_dataProvider.refresh(_dataAccessor);
changeDetector.updateSnapshots(_dataProvider);
checkDetectionCounts(changeDetector, ChangeType.IDEAL_STATE, 1, 0, 0);
// Update the detector with old data, since nothing changed, the result shall be empty.
changeDetector.updateSnapshots(_dataProvider);
checkDetectionCounts(changeDetector, ChangeType.IDEAL_STATE, 0, 0, 0);
// Reset the snapshots
changeDetector.resetSnapshots();
// After reset, all the data in the data provider will be treated as new changes
changeDetector.updateSnapshots(_dataProvider);
checkDetectionCounts(changeDetector, ChangeType.IDEAL_STATE, 1, 0, 0);
}
/**
* Check that the given change types appear in detector's change types.
* @param types
*/
private void checkChangeTypes(ChangeType... types) {
for (ChangeType type : types) {
Assert.assertTrue(_resourceChangeDetector.getChangeTypes().contains(type));
}
}
/**
* Convenience method for checking three types of detections.
* @param changeType
* @param additions
* @param changes
* @param deletions
*/
private void checkDetectionCounts(ChangeDetector changeDetector, ChangeType changeType,
int additions, int changes, int deletions) {
Assert.assertEquals(changeDetector.getAdditionsByType(changeType).size(), additions);
Assert.assertEquals(changeDetector.getChangesByType(changeType).size(), changes);
Assert.assertEquals(changeDetector.getRemovalsByType(changeType).size(), deletions);
}
}
| 9,719 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/changedetector
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/changedetector/trimmer/TestHelixPropoertyTimmer.java
|
package org.apache.helix.controller.changedetector.trimmer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.HelixConstants;
import org.apache.helix.HelixProperty;
import org.apache.helix.controller.changedetector.ResourceChangeDetector;
import org.apache.helix.controller.changedetector.trimmer.HelixPropertyTrimmer.FieldType;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.ResourceConfig;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Mockito.when;
public class TestHelixPropoertyTimmer {
private final String CLUSTER_NAME = "CLUSTER";
private final String INSTANCE_NAME = "INSTANCE";
private final String RESOURCE_NAME = "RESOURCE";
private final String PARTITION_NAME = "DEFAULT_PARTITION";
private final Set<HelixConstants.ChangeType> _changeTypes = new HashSet<>();
private final Map<String, InstanceConfig> _instanceConfigMap = new HashMap<>();
private final Map<String, IdealState> _idealStateMap = new HashMap<>();
private final Map<String, ResourceConfig> _resourceConfigMap = new HashMap<>();
private ClusterConfig _clusterConfig;
private ResourceControllerDataProvider _dataProvider;
@BeforeMethod
public void beforeMethod() {
_changeTypes.clear();
_instanceConfigMap.clear();
_idealStateMap.clear();
_resourceConfigMap.clear();
_changeTypes.add(HelixConstants.ChangeType.INSTANCE_CONFIG);
_changeTypes.add(HelixConstants.ChangeType.IDEAL_STATE);
_changeTypes.add(HelixConstants.ChangeType.RESOURCE_CONFIG);
_changeTypes.add(HelixConstants.ChangeType.CLUSTER_CONFIG);
InstanceConfig instanceConfig = new InstanceConfig(INSTANCE_NAME);
instanceConfig.setInstanceEnabledForPartition(RESOURCE_NAME, PARTITION_NAME, false);
fillKeyValues(instanceConfig);
_instanceConfigMap.put(INSTANCE_NAME, instanceConfig);
IdealState idealState = new IdealState(RESOURCE_NAME);
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
idealState.setPreferenceList(PARTITION_NAME, new ArrayList<>());
idealState.setPartitionState(PARTITION_NAME, INSTANCE_NAME, "LEADER");
fillKeyValues(idealState);
_idealStateMap.put(RESOURCE_NAME, idealState);
ResourceConfig resourceConfig = new ResourceConfig(RESOURCE_NAME);
Map<String, List<String>> configPreferenceList = new HashMap<>();
configPreferenceList.put(PARTITION_NAME, new ArrayList<>());
resourceConfig.setPreferenceLists(configPreferenceList);
fillKeyValues(resourceConfig);
_resourceConfigMap.put(RESOURCE_NAME, resourceConfig);
_clusterConfig = new ClusterConfig(CLUSTER_NAME);
fillKeyValues(_clusterConfig);
_dataProvider =
getMockDataProvider(_changeTypes, _instanceConfigMap, _idealStateMap, _resourceConfigMap,
_clusterConfig);
}
// Fill the testing helix property to ensure that we have at least one sample in every types of data.
private void fillKeyValues(HelixProperty helixProperty) {
helixProperty.getRecord().setSimpleField("MockFieldKey", "MockValue");
helixProperty.getRecord()
.setMapField("MockFieldKey", Collections.singletonMap("MockKey", "MockValue"));
helixProperty.getRecord().setListField("MockFieldKey", Collections.singletonList("MockValue"));
}
private ResourceControllerDataProvider getMockDataProvider(
Set<HelixConstants.ChangeType> changeTypes, Map<String, InstanceConfig> instanceConfigMap,
Map<String, IdealState> idealStateMap, Map<String, ResourceConfig> resourceConfigMap,
ClusterConfig clusterConfig) {
ResourceControllerDataProvider dataProvider =
Mockito.mock(ResourceControllerDataProvider.class);
when(dataProvider.getRefreshedChangeTypes()).thenReturn(changeTypes);
when(dataProvider.getInstanceConfigMap()).thenReturn(instanceConfigMap);
when(dataProvider.getIdealStates()).thenReturn(idealStateMap);
when(dataProvider.getResourceConfigMap()).thenReturn(resourceConfigMap);
when(dataProvider.getClusterConfig()).thenReturn(clusterConfig);
when(dataProvider.getLiveInstances()).thenReturn(Collections.emptyMap());
return dataProvider;
}
@Test
public void testDetectNonTrimmableFieldChanges() {
// Fill mock data to initialize the detector
ResourceChangeDetector detector = new ResourceChangeDetector(true);
detector.updateSnapshots(_dataProvider);
// Verify that all the non-trimmable field changes will be detected
// 1. Cluster Config
changeNonTrimmableValuesAndVerifyDetector(
ClusterConfigTrimmer.getInstance().getNonTrimmableFields(_clusterConfig), _clusterConfig,
HelixConstants.ChangeType.CLUSTER_CONFIG, detector, _dataProvider);
// 2. Ideal States
for (IdealState idealState : _idealStateMap.values()) {
changeNonTrimmableValuesAndVerifyDetector(
IdealStateTrimmer.getInstance().getNonTrimmableFields(idealState), idealState,
HelixConstants.ChangeType.IDEAL_STATE, detector, _dataProvider);
modifyListMapfieldKeysAndVerifyDetector(idealState, HelixConstants.ChangeType.IDEAL_STATE,
detector, _dataProvider);
// Additional test to ensure Ideal State map/list fields are detected correctly according to
// the rebalance mode.
// For the following test, we can only focus on the smaller scope defined by the following map.
Map<FieldType, Set<String>> overwriteFieldMap = new HashMap<>();
// SEMI_AUTO: List fields are non-trimmable
idealState.setRebalanceMode(IdealState.RebalanceMode.SEMI_AUTO);
// refresh the detector cache after modification to avoid unexpected change detected.
detector.updateSnapshots(_dataProvider);
overwriteFieldMap.put(FieldType.LIST_FIELD, Collections.singleton(PARTITION_NAME));
changeNonTrimmableValuesAndVerifyDetector(overwriteFieldMap, idealState,
HelixConstants.ChangeType.IDEAL_STATE, detector, _dataProvider);
// CUSTOMZIED: Map fields are non-trimmable
idealState.setRebalanceMode(IdealState.RebalanceMode.CUSTOMIZED);
// refresh the detector cache after modification to avoid unexpected change detected.
detector.updateSnapshots(_dataProvider);
overwriteFieldMap.clear();
overwriteFieldMap.put(FieldType.MAP_FIELD, Collections.singleton(PARTITION_NAME));
changeNonTrimmableValuesAndVerifyDetector(overwriteFieldMap, idealState,
HelixConstants.ChangeType.IDEAL_STATE, detector, _dataProvider);
}
// 3. Resource Config
for (ResourceConfig resourceConfig : _resourceConfigMap.values()) {
changeNonTrimmableValuesAndVerifyDetector(
ResourceConfigTrimmer.getInstance().getNonTrimmableFields(resourceConfig), resourceConfig,
HelixConstants.ChangeType.RESOURCE_CONFIG, detector, _dataProvider);
modifyListMapfieldKeysAndVerifyDetector(resourceConfig,
HelixConstants.ChangeType.RESOURCE_CONFIG, detector, _dataProvider);
}
// 4. Instance Config
for (InstanceConfig instanceConfig : _instanceConfigMap.values()) {
changeNonTrimmableValuesAndVerifyDetector(
InstanceConfigTrimmer.getInstance().getNonTrimmableFields(instanceConfig), instanceConfig,
HelixConstants.ChangeType.INSTANCE_CONFIG, detector, _dataProvider);
modifyListMapfieldKeysAndVerifyDetector(instanceConfig,
HelixConstants.ChangeType.INSTANCE_CONFIG, detector, _dataProvider);
}
}
@Test
public void testIgnoreTrimmableFieldChanges() {
// Fill mock data to initialize the detector
ResourceChangeDetector detector = new ResourceChangeDetector(true);
detector.updateSnapshots(_dataProvider);
// Verify that all the trimmable field changes will not be detected
// 1. Cluster Config
changeTrimmableValuesAndVerifyDetector(FieldType.values(), _clusterConfig, detector,
_dataProvider);
// 2. Ideal States
for (IdealState idealState : _idealStateMap.values()) {
changeTrimmableValuesAndVerifyDetector(FieldType.values(), idealState, detector,
_dataProvider);
// Additional test to ensure Ideal State map/list fields are detected correctly according to
// the rebalance mode.
// SEMI_AUTO: List fields are non-trimmable
idealState.setRebalanceMode(IdealState.RebalanceMode.SEMI_AUTO);
// refresh the detector cache after modification to avoid unexpected change detected.
detector.updateSnapshots(_dataProvider);
changeTrimmableValuesAndVerifyDetector(
new FieldType[]{FieldType.SIMPLE_FIELD, FieldType.MAP_FIELD}, idealState, detector,
_dataProvider);
// CUSTOMZIED: List and Map fields are non-trimmable
idealState.setRebalanceMode(IdealState.RebalanceMode.CUSTOMIZED);
// refresh the detector cache after modification to avoid unexpected change detected.
detector.updateSnapshots(_dataProvider);
changeTrimmableValuesAndVerifyDetector(
new FieldType[]{FieldType.SIMPLE_FIELD, FieldType.LIST_FIELD}, idealState, detector,
_dataProvider);
}
// 3. Resource Config
for (ResourceConfig resourceConfig : _resourceConfigMap.values()) {
// Preference lists in the list fields are non-trimmable
changeTrimmableValuesAndVerifyDetector(
new FieldType[]{FieldType.SIMPLE_FIELD, FieldType.MAP_FIELD}, resourceConfig, detector,
_dataProvider);
}
// 4. Instance Config
for (InstanceConfig instanceConfig : _instanceConfigMap.values()) {
changeTrimmableValuesAndVerifyDetector(FieldType.values(), instanceConfig, detector,
_dataProvider);
}
}
private void modifyListMapfieldKeysAndVerifyDetector(HelixProperty helixProperty,
HelixConstants.ChangeType expectedChangeType, ResourceChangeDetector detector,
ResourceControllerDataProvider dataProvider) {
helixProperty.getRecord()
.setListField(helixProperty.getId() + "NewListField", Collections.singletonList("foobar"));
helixProperty.getRecord()
.setMapField(helixProperty.getId() + "NewMapField", Collections.singletonMap("foo", "bar"));
detector.updateSnapshots(dataProvider);
for (HelixConstants.ChangeType changeType : HelixConstants.ChangeType.values()) {
Assert.assertEquals(detector.getChangesByType(changeType).size(),
changeType == expectedChangeType ? 1 : 0,
String.format("Any key changes in the List or Map fields shall be detected!"));
}
}
private void changeNonTrimmableValuesAndVerifyDetector(
Map<FieldType, Set<String>> nonTrimmableFieldMap, HelixProperty helixProperty,
HelixConstants.ChangeType expectedChangeType, ResourceChangeDetector detector,
ResourceControllerDataProvider dataProvider) {
for (FieldType type : nonTrimmableFieldMap.keySet()) {
for (String fieldKey : nonTrimmableFieldMap.get(type)) {
switch (type) {
case LIST_FIELD:
helixProperty.getRecord().setListField(fieldKey, Collections.singletonList("foobar"));
break;
case MAP_FIELD:
helixProperty.getRecord().setMapField(fieldKey, Collections.singletonMap("foo", "bar"));
break;
case SIMPLE_FIELD:
helixProperty.getRecord().setSimpleField(fieldKey, "foobar");
break;
default:
Assert.fail("Unknown field type " + type.name());
}
detector.updateSnapshots(dataProvider);
for (HelixConstants.ChangeType changeType : HelixConstants.ChangeType.values()) {
Assert.assertEquals(detector.getAdditionsByType(changeType).size(), 0,
String.format("There should not be any additional change detected!"));
Assert.assertEquals(detector.getRemovalsByType(changeType).size(), 0,
String.format("There should not be any removal change detected!"));
Assert.assertEquals(detector.getChangesByType(changeType).size(),
changeType == expectedChangeType ? 1 : 0,
String.format("The detected change of %s is not as expected.", fieldKey));
}
}
}
}
private void changeTrimmableValuesAndVerifyDetector(FieldType[] trimmableFieldTypes,
HelixProperty helixProperty, ResourceChangeDetector detector,
ResourceControllerDataProvider dataProvider) {
for (FieldType type : trimmableFieldTypes) {
switch (type) {
case LIST_FIELD:
// Modify value if key exists.
// Note if adding new keys, then the change will be detected regardless of the content.
helixProperty.getRecord().getListFields().keySet().stream().forEach(key -> {
helixProperty.getRecord().setListField(key,
Collections.singletonList("new-foobar" + System.currentTimeMillis()));
});
break;
case MAP_FIELD:
// Modify value if key exists.
// Note if adding new keys, then the change will be detected regardless of the content.
helixProperty.getRecord().getMapFields().keySet().stream().forEach(key -> {
helixProperty.getRecord().setMapField(key,
Collections.singletonMap("new-foo", "bar" + System.currentTimeMillis()));
});
break;
case SIMPLE_FIELD:
helixProperty.getRecord()
.setSimpleField("TrimmableSimpleField", "foobar" + System.currentTimeMillis());
break;
default:
Assert.fail("Unknown field type " + type.name());
}
detector.updateSnapshots(dataProvider);
for (HelixConstants.ChangeType changeType : HelixConstants.ChangeType.values()) {
Assert.assertEquals(
detector.getAdditionsByType(changeType).size() + detector.getRemovalsByType(changeType)
.size() + detector.getChangesByType(changeType).size(), 0, String.format(
"There should not be any change detected for the trimmable field changes!"));
}
}
}
}
| 9,720 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/dataproviders/TestResourceControllerDataProvider.java
|
package org.apache.helix.controller.dataproviders;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableSet;
import org.apache.helix.model.IdealState;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestResourceControllerDataProvider {
@Test
public void testStablePartitionListCache() {
String resourceName = "TestResource";
Set<String> partitionSetA = ImmutableSet.of("Partiton1", "Partiton2", "Partiton3");
Set<String> partitionSetB = ImmutableSet.of("Partiton1", "Partiton2", "Partiton4");
Map<String, IdealState> idealStateMap = new HashMap<>();
IdealState is = mock(IdealState.class);
when(is.getPartitionSet()).thenReturn(partitionSetA);
when(is.getResourceName()).thenReturn(resourceName);
idealStateMap.put(resourceName, is);
ResourceControllerDataProvider dataProvider = new ResourceControllerDataProvider();
List<String> cachedPartitionList =
dataProvider.getStablePartitionList(resourceName);
Assert.assertTrue(cachedPartitionList == null);
// 1. Test refresh and get stable list
dataProvider.refreshStablePartitionList(idealStateMap);
List<String> cachedPartitionListA =
dataProvider.getStablePartitionList(resourceName);
Assert.assertTrue(cachedPartitionListA.size() == partitionSetA.size() && cachedPartitionListA
.containsAll(partitionSetA));
Set<String> partitionSetAWithDifferentOrder = new LinkedHashSet<>();
partitionSetAWithDifferentOrder.add("Partiton3");
partitionSetAWithDifferentOrder.add("Partiton2");
partitionSetAWithDifferentOrder.add("Partiton1");
// Verify that iterating this list will generate a different result
List<String> tmpPartitionList = new ArrayList<>(partitionSetAWithDifferentOrder);
Assert.assertFalse(cachedPartitionListA.equals(tmpPartitionList));
// Verify that the cached stable partition list still return a list with the same order even
// after refresh call.
when(is.getPartitionSet()).thenReturn(partitionSetAWithDifferentOrder);
dataProvider.refreshStablePartitionList(idealStateMap);
Assert
.assertTrue(dataProvider.getStablePartitionList(resourceName).equals(cachedPartitionListA));
// 2. Test update the cache if items in the list have been changed.
when(is.getPartitionSet()).thenReturn(partitionSetB);
dataProvider.refreshStablePartitionList(idealStateMap);
List<String> cachedPartitionListB =
dataProvider.getStablePartitionList(resourceName);
Assert.assertTrue(cachedPartitionListB.size() == partitionSetB.size() && cachedPartitionListB
.containsAll(partitionSetB));
// 3. Test removing item from the cache once the IdealState has been removed
idealStateMap.clear();
dataProvider.refreshStablePartitionList(idealStateMap);
// Now, since the cache has been cleaned, the get will return different order.
Assert.assertTrue(
dataProvider.getStablePartitionList(resourceName) == null);
}
}
| 9,721 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/dataproviders/TestWorkflowControllerDataProvider.java
|
package org.apache.helix.controller.dataproviders;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import org.apache.helix.AccessOption;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.integration.task.TaskTestUtil;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobQueue;
import org.apache.helix.task.RuntimeJobDag;
import org.apache.helix.task.TaskUtil;
import org.apache.helix.task.Workflow;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestWorkflowControllerDataProvider extends TaskTestBase {
@Test
public void testResourceConfigRefresh() throws Exception {
Workflow.Builder builder = new Workflow.Builder("TEST");
JobConfig.Builder jobBuilder = JobConfig.Builder.fromMap(WorkflowGenerator.DEFAULT_JOB_CONFIG);
builder.addJob(WorkflowGenerator.JOB_NAME_1, jobBuilder);
_driver.start(builder.build());
WorkflowControllerDataProvider cache =
new WorkflowControllerDataProvider("CLUSTER_" + TestHelper.getTestClassName());
boolean expectedValuesAchieved = TestHelper.verify(() -> {
cache.requireFullRefresh();
cache.refresh(_manager.getHelixDataAccessor());
int configMapSize = cache.getJobConfigMap().size();
int workflowConfigMapSize = cache.getWorkflowConfigMap().size();
int contextsSize = cache.getContexts().size();
return (configMapSize == 1 && workflowConfigMapSize == 1 && contextsSize == 2);
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(expectedValuesAchieved);
builder = new Workflow.Builder("TEST1");
builder.addParentChildDependency(WorkflowGenerator.JOB_NAME_1, WorkflowGenerator.JOB_NAME_2);
builder.addJob(WorkflowGenerator.JOB_NAME_1, jobBuilder);
builder.addJob(WorkflowGenerator.JOB_NAME_2, jobBuilder);
_driver.start(builder.build());
expectedValuesAchieved = TestHelper.verify(() -> {
cache.requireFullRefresh();
cache.refresh(_manager.getHelixDataAccessor());
int configMapSize = cache.getJobConfigMap().size();
int workflowConfigMapSize = cache.getWorkflowConfigMap().size();
int contextsSize = cache.getContexts().size();
return (configMapSize == 3 && workflowConfigMapSize == 2 && contextsSize == 5);
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(expectedValuesAchieved);
}
@Test (dependsOnMethods = "testResourceConfigRefresh")
public void testRuntimeDagRefresh() throws Exception {
String jobQueueName = TestHelper.getTestMethodName();
JobQueue.Builder builder = TaskTestUtil.buildJobQueue(jobQueueName);
JobConfig.Builder jobBuilder = JobConfig.Builder.fromMap(WorkflowGenerator.DEFAULT_JOB_CONFIG);
builder.enqueueJob(WorkflowGenerator.JOB_NAME_1, jobBuilder);
String jobName1 = TaskUtil.getNamespacedJobName(jobQueueName, WorkflowGenerator.JOB_NAME_1);
_driver.start(builder.build());
WorkflowControllerDataProvider cache =
new WorkflowControllerDataProvider("CLUSTER_" + TestHelper.getTestClassName());
Assert.assertTrue(TestHelper.verify(() -> {
cache.requireFullRefresh();
cache.refresh(_manager.getHelixDataAccessor());
return cache.getTaskDataCache().getJobConfig(jobName1) != null;
}, TestHelper.WAIT_DURATION));
RuntimeJobDag runtimeJobDag = cache.getTaskDataCache().getRuntimeJobDag(jobQueueName);
Assert.assertEquals(Collections.singleton(jobName1), runtimeJobDag.getAllNodes());
// Mimic job running
runtimeJobDag.getNextJob();
// Add job config without adding it to the dag
String danglingJobName = TaskUtil.getNamespacedJobName(jobQueueName, "DanglingJob");
JobConfig danglingJobConfig = new JobConfig(danglingJobName, jobBuilder.build());
PropertyKey.Builder keyBuilder = _manager.getHelixDataAccessor().keyBuilder();
_baseAccessor
.create(keyBuilder.resourceConfig(danglingJobName).getPath(), danglingJobConfig.getRecord(),
AccessOption.PERSISTENT);
// There shouldn't be a refresh to runtime dag. The dag should only contain one job and the job is inflight.
Assert.assertTrue(TestHelper.verify(() -> {
cache.requireFullRefresh();
cache.refresh(_manager.getHelixDataAccessor());
return cache.getTaskDataCache().getJobConfig(danglingJobName) != null;
}, TestHelper.WAIT_DURATION));
runtimeJobDag = cache.getTaskDataCache().getRuntimeJobDag(jobQueueName);
Assert.assertEquals(Collections.singleton(jobName1), runtimeJobDag.getAllNodes());
Assert.assertEquals(Collections.singleton(jobName1), runtimeJobDag.getInflightJobList());
_driver.enqueueJob(jobQueueName, WorkflowGenerator.JOB_NAME_2, jobBuilder);
String jobName2 = TaskUtil.getNamespacedJobName(jobQueueName, WorkflowGenerator.JOB_NAME_2);
// There should be a refresh to runtime dag.
Assert.assertTrue(TestHelper.verify(() -> {
cache.requireFullRefresh();
cache.refresh(_manager.getHelixDataAccessor());
return cache.getTaskDataCache().getJobConfig(jobName2) != null;
}, TestHelper.WAIT_DURATION));
runtimeJobDag = cache.getTaskDataCache().getRuntimeJobDag(jobQueueName);
Assert.assertEquals(new HashSet<>(Arrays.asList(jobName1, jobName2)),
runtimeJobDag.getAllNodes());
Assert.assertEquals(Collections.emptyList(), runtimeJobDag.getInflightJobList());
}
}
| 9,722 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestStateTransitionPriority.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.helix.api.config.StateTransitionThrottleConfig;
import org.apache.helix.controller.common.PartitionStateMap;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class TestStateTransitionPriority extends BaseStageTest {
public static final String RESOURCE = "Resource";
public static final String PARTITION = "Partition";
// TODO : Reenable this when throttling enabled for recovery rebalance
@Test(dataProvider = "ResourceLevelPriority", enabled = false)
public void testResourceLevelPriorityForRecoveryBalance(
Map<String, String> resourceMap, String priorityField, List<String> expectedPriority) {
preSetup(StateTransitionThrottleConfig.RebalanceType.RECOVERY_BALANCE, resourceMap.keySet(),
priorityField, 10, 1);
event.addAttribute(AttributeName.RESOURCES.name(),
getResourceMap(resourceMap.keySet().toArray(new String[resourceMap.keySet().size()]), 1,
"MasterSlave"));
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
getResourceMap(resourceMap.keySet().toArray(new String[resourceMap.keySet().size()]), 1,
"MasterSlave"));
// Initialize bestpossible state and current state
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
MessageOutput messageSelectOutput = new MessageOutput();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (String resource : resourceMap.keySet()) {
IdealState is = accessor.getProperty(accessor.keyBuilder().idealStates(resource));
is.getRecord().setSimpleField(priorityField, resourceMap.get(resource));
setSingleIdealState(is);
Map<String, List<String>> partitionMap = new HashMap<String, List<String>>();
Partition partition = new Partition(resource + "_0");
String instanceName = HOSTNAME_PREFIX + resource.split("_")[1];
partitionMap.put(partition.getPartitionName(),
Collections.singletonList(instanceName));
bestPossibleStateOutput.setPreferenceLists(resource, partitionMap);
bestPossibleStateOutput.setState(resource, partition, instanceName, "SLAVE");
messageSelectOutput.addMessage(resource, partition, generateMessage("OFFLINE", "SLAVE", instanceName));
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
}
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), messageSelectOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
runStage(event, new ReadClusterDataStage());
// Keep update the current state.
List<String> resourcePriority = new ArrayList<String>();
for (int i = 0; i < resourceMap.size(); i++) {
runStage(event, new IntermediateStateCalcStage());
updateCurrentStatesForRecoveryBalance(resourcePriority, currentStateOutput);
}
Assert.assertEquals(resourcePriority, expectedPriority);
}
@Test(dataProvider = "ResourceLevelPriority")
public void testResourceLevelPriorityForLoadBalance(
Map<String, String> resourceMap, String priorityField, List<String> expectedPriority) {
preSetup(StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE, resourceMap.keySet(), priorityField,
10, 1);
event.addAttribute(AttributeName.RESOURCES.name(),
getResourceMap(resourceMap.keySet().toArray(new String[resourceMap.keySet().size()]), 1,
"MasterSlave"));
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
getResourceMap(resourceMap.keySet().toArray(new String[resourceMap.keySet().size()]), 1,
"MasterSlave"));
// Initialize bestpossible state and current state
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
MessageOutput messageSelectOutput = new MessageOutput();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (String resource : resourceMap.keySet()) {
IdealState is = accessor.getProperty(accessor.keyBuilder().idealStates(resource));
is.getRecord().setSimpleField(priorityField, resourceMap.get(resource));
setSingleIdealState(is);
Map<String, List<String>> partitionMap = new HashMap<String, List<String>>();
Partition partition = new Partition(resource + "_0");
String instanceName = HOSTNAME_PREFIX + resource.split("_")[1];
String nextInstanceName = HOSTNAME_PREFIX + (Integer.parseInt(resource.split("_")[1]) + 1);
partitionMap.put(partition.getPartitionName(), Collections.singletonList(nextInstanceName));
bestPossibleStateOutput.setPreferenceLists(resource, partitionMap);
bestPossibleStateOutput.setState(resource, partition, instanceName, "MASTER");
bestPossibleStateOutput.setState(resource, partition, nextInstanceName, "SLAVE");
currentStateOutput.setCurrentState(resource, partition, instanceName, "MASTER");
}
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), messageSelectOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
runStage(event, new ReadClusterDataStage());
// Keep update the current state.
List<String> resourcePriority = new ArrayList<String>();
for (int i = 0; i < resourceMap.size(); i++) {
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(),
generateMessageMapForResource(bestPossibleStateOutput, currentStateOutput, resourcePriority));
runStage(event, new IntermediateStateCalcStage());
updateCurrentStatesForLoadBalance(resourcePriority, currentStateOutput, bestPossibleStateOutput);
}
Assert.assertEquals(resourcePriority, expectedPriority);
}
@Test(dataProvider = "PartitionLevelPriority")
public void testPartitionLevelPriority(String resourceName,
Map<String, Map<String, String>> bestPossibleMap,
Map<String, Map<String, String>> currentStateMap, List<String> preferenceList,
List<String> expectedPriority) {
preSetup(StateTransitionThrottleConfig.RebalanceType.RECOVERY_BALANCE,
new HashSet<String>(Arrays.asList(resourceName)), "no_field", 3, 3);
// Add load rebalance throttle config
ClusterConfig clusterConfig = accessor.getProperty(accessor.keyBuilder().clusterConfig());
StateTransitionThrottleConfig throttleConfigForLoadRebalance =
new StateTransitionThrottleConfig(StateTransitionThrottleConfig.RebalanceType.ANY,
StateTransitionThrottleConfig.ThrottleScope.CLUSTER, 1);
List<StateTransitionThrottleConfig> currentThrottleConfig =
clusterConfig.getStateTransitionThrottleConfigs();
currentThrottleConfig.add(throttleConfigForLoadRebalance);
clusterConfig.setStateTransitionThrottleConfigs(currentThrottleConfig);
setClusterConfig(clusterConfig);
// Initialize best possible state, current state and resource map.
Resource resource = new Resource(resourceName);
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (String partitionName : bestPossibleMap.keySet()) {
Partition partition = new Partition(partitionName);
bestPossibleStateOutput.setPreferenceList(resourceName, partitionName, preferenceList);
for (String instanceName : bestPossibleMap.get(partitionName).keySet()) {
bestPossibleStateOutput.setState(resourceName, partition, instanceName,
bestPossibleMap.get(partitionName).get(instanceName));
currentStateOutput.setCurrentState(resourceName, partition, instanceName,
currentStateMap.get(partitionName).get(instanceName));
}
resource.addPartition(partitionName);
}
resource.setStateModelDefRef("MasterSlave");
event.addAttribute(AttributeName.RESOURCES.name(),
Collections.singletonMap(resourceName, resource));
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
Collections.singletonMap(resourceName, resource));
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(),
generateMessageMapForPartition(bestPossibleMap, currentStateMap, Collections.emptyList(), resourceName));
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
runStage(event, new ReadClusterDataStage());
// Keep update the current state.
List<String> partitionPriority = new ArrayList<String>();
for (int i = 0; i < bestPossibleMap.size(); i++) {
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(),
generateMessageMapForPartition(bestPossibleMap, currentStateMap, partitionPriority, resourceName));
runStage(event, new IntermediateStateCalcStage());
updateCurrentStateForPartitionLevelPriority(partitionPriority, currentStateOutput, resourceName, bestPossibleMap);
}
Assert.assertEquals(partitionPriority, expectedPriority);
}
@DataProvider(name = "PartitionLevelPriority")
private Object[][] loadPartitionInput() {
return loadInputData(PARTITION);
}
@DataProvider(name = "ResourceLevelPriority")
private Object[][] loadResourceInput() {
return loadInputData(RESOURCE);
}
private static final String TEST_INPUT_FILE = "Test%sLevelPriority.json";
private static final String PRIORITY_FIELD = "PriorityField";
private static final String EXPECTED_PRIORITY = "ExpectedPriority";
private static final String BEST_POSSIBLE_MAP = "BestPossibleMap";
private static final String CURRENT_STATE_MAP = "CurrentStateMap";
private static final String PREFERENCE_LIST = "PreferenceList";
private Object[][] loadInputData(String inputEntry) {
Object[][] inputData = null;
InputStream inputStream =
getClass().getClassLoader().getResourceAsStream(String.format(TEST_INPUT_FILE, inputEntry));
try {
ObjectReader mapReader = new ObjectMapper().reader(Map.class);
Map<String, Object> inputMaps = mapReader.readValue(inputStream);
List<Map<String, Object>> inputs = (List<Map<String, Object>>) inputMaps.get(inputEntry);
inputData = new Object[inputs.size()][];
for (int i = 0; i < inputs.size(); i++) {
if (inputEntry.equals(RESOURCE)) {
Map<String, String> resourceMap =
(Map<String, String>) inputs.get(i).get(RESOURCE + "Map");
String priorityField = (String) inputs.get(i).get(PRIORITY_FIELD);
List<String> expectedPriority = (List<String>) inputs.get(i).get(EXPECTED_PRIORITY);
inputData[i] = new Object[] { resourceMap, priorityField, expectedPriority };
} else if (inputEntry.equals(PARTITION)) {
String resource = (String) inputs.get(i).get(RESOURCE);
Map<String, Map<String, String>> bestPossibleMap =
(Map<String, Map<String, String>>) inputs.get(i).get(BEST_POSSIBLE_MAP);
Map<String, Map<String, String>> currentStateMap =
(Map<String, Map<String, String>>) inputs.get(i).get(CURRENT_STATE_MAP);
List<String> expectedPriority = (List<String>) inputs.get(i).get(EXPECTED_PRIORITY);
List<String> preferenceList = (List<String>) inputs.get(i).get(PREFERENCE_LIST);
inputData[i] = new Object[] { resource, bestPossibleMap, currentStateMap, preferenceList,
expectedPriority
};
}
}
} catch (IOException e) {
e.printStackTrace();
}
return inputData;
}
private void preSetup(StateTransitionThrottleConfig.RebalanceType rebalanceType,
Set<String> resourceSet, String priorityField, int numOfLiveInstances, int numOfReplicas) {
setupIdealState(numOfLiveInstances, resourceSet.toArray(new String[resourceSet.size()]),
numOfLiveInstances, numOfReplicas, IdealState.RebalanceMode.FULL_AUTO, "MasterSlave");
setupStateModel();
setupLiveInstances(numOfLiveInstances);
// Set up cluster configs
ClusterConfig clusterConfig = accessor.getProperty(accessor.keyBuilder().clusterConfig());
StateTransitionThrottleConfig throttleConfig = new StateTransitionThrottleConfig(rebalanceType,
StateTransitionThrottleConfig.ThrottleScope.CLUSTER, 1);
clusterConfig.setStateTransitionThrottleConfigs(Collections.singletonList(throttleConfig));
clusterConfig.setResourcePriorityField(priorityField);
setClusterConfig(clusterConfig);
}
private void updateCurrentStatesForRecoveryBalance(List<String> resourcePriority,
CurrentStateOutput currentStateOutput) {
IntermediateStateOutput output = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
for (PartitionStateMap partitionStateMap : output.getResourceStatesMap().values()) {
String resourceName = partitionStateMap.getResourceName();
Partition partition = new Partition(resourceName + "_0");
String instanceName = HOSTNAME_PREFIX + resourceName.split("_")[1];
if (partitionStateMap.getPartitionMap(partition).values().contains("SLAVE")
&& !resourcePriority.contains(resourceName)) {
updateCurrentOutput(resourcePriority, currentStateOutput, resourceName, partition,
instanceName, "SLAVE");
break;
}
}
}
private void updateCurrentStatesForLoadBalance(List<String> resourcePriority, CurrentStateOutput currentStateOutput,
BestPossibleStateOutput bestPossibleStateOutput) {
MessageOutput output = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
for (String resourceName : bestPossibleStateOutput.getResourceStatesMap().keySet()) {
Partition partition = new Partition(resourceName + "_0");
if (output.getResourceMessageMap(resourceName).get(partition) != null
&& output.getResourceMessageMap(resourceName).get(partition).size() > 0) {
String nextInstanceName = HOSTNAME_PREFIX + (Integer.parseInt(resourceName.split("_")[1]) + 1);
currentStateOutput.setCurrentState(resourceName, partition, nextInstanceName, "SLAVE");
resourcePriority.add(resourceName);
break;
}
}
}
private MessageOutput generateMessageMapForResource(BestPossibleStateOutput bestPossibleStateOutput,
CurrentStateOutput currentStateOutput, List<String> resourcePrirority) {
MessageOutput messageSelectOutput = new MessageOutput();
for (String resource : bestPossibleStateOutput.getResourceStatesMap().keySet()) {
if (!resourcePrirority.contains(resource) && !bestPossibleStateOutput.getPartitionStateMap(resource)
.getStateMap()
.equals(currentStateOutput.getCurrentStateMap(resource))) {
messageSelectOutput.addMessage(resource, new Partition(resource + "_0"),
generateMessage("OFFLINE", "SLAVE", (HOSTNAME_PREFIX + (Integer.parseInt(resource.split("_")[1]) + 1))));
}
}
return messageSelectOutput;
}
private MessageOutput generateMessageMapForPartition(Map<String, Map<String, String>> bestPossibleMap,
Map<String, Map<String, String>> currentStateMap, List<String> partitionPriority, String resourceName) {
MessageOutput messageSelectOutput = new MessageOutput();
for (String partitionName : bestPossibleMap.keySet()) {
for (String instanceName : bestPossibleMap.get(partitionName).keySet()) {
if (!partitionPriority.contains(partitionName) && !bestPossibleMap.get(partitionName)
.get(instanceName)
.equals(currentStateMap.get(partitionName).get(instanceName))) {
messageSelectOutput.addMessage(resourceName, new Partition(partitionName),
generateMessage(currentStateMap.get(partitionName).get(instanceName),
bestPossibleMap.get(partitionName).get(instanceName), instanceName));
}
}
}
return messageSelectOutput;
}
private void updateCurrentOutput(List<String> resourcePriority,
CurrentStateOutput currentStateOutput, String resourceName, Partition partition,
String instanceName, String state) {
resourcePriority.add(resourceName);
currentStateOutput.setCurrentState(resourceName, partition, instanceName, state);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
}
private void updateCurrentStateForPartitionLevelPriority(List<String> partitionPriority,
CurrentStateOutput currentStateOutput, String resourceName,
Map<String, Map<String, String>> bestPossibleMap) {
MessageOutput output = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
output.getResourceMessageMap(resourceName).entrySet().stream().filter(e -> e.getValue().size() > 0).forEach(e -> {
partitionPriority.add(e.getKey().toString());
for (String instanceName : bestPossibleMap.get(e.getKey().toString()).keySet()) {
currentStateOutput.setCurrentState(resourceName, e.getKey(), instanceName,
bestPossibleMap.get(e.getKey().toString()).get(instanceName));
}
});
}
}
| 9,723 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestQuotaConstraintSkipWorkflowAssignment.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.controller.stages.task.TaskSchedulingStage;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskUtil;
import org.apache.helix.task.Workflow;
import org.apache.helix.task.assigner.AssignableInstance;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestQuotaConstraintSkipWorkflowAssignment extends TaskTestBase {
@BeforeClass
public void beforeClass() throws Exception {
setSingleTestEnvironment();
super.beforeClass();
_controller.syncStop();
}
@Test
public void testQuotaConstraintSkipWorkflowAssignment() throws Exception {
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
WorkflowControllerDataProvider cache = new WorkflowControllerDataProvider(CLUSTER_NAME);
TaskDriver driver = new TaskDriver(_manager);
for (int i = 0; i < 10; i++) {
Workflow.Builder workflow = new Workflow.Builder("Workflow" + i);
JobConfig.Builder job = new JobConfig.Builder();
job.setJobCommandConfigMap(Collections.singletonMap(MockTask.JOB_DELAY, "100000"));
job.setWorkflow("Workflow" + i);
TaskConfig taskConfig =
new TaskConfig(MockTask.TASK_COMMAND, new HashMap<String, String>(), null, null);
job.addTaskConfigMap(Collections.singletonMap(taskConfig.getId(), taskConfig));
job.setJobId(TaskUtil.getNamespacedJobName("Workflow" + i, "JOB"));
workflow.addJob("JOB", job);
driver.start(workflow.build());
}
ConfigAccessor accessor = new ConfigAccessor(_gZkClient);
ClusterConfig clusterConfig = accessor.getClusterConfig(CLUSTER_NAME);
clusterConfig.setTaskQuotaRatio(AssignableInstance.DEFAULT_QUOTA_TYPE, 3);
clusterConfig.setTaskQuotaRatio("OtherType", 37);
accessor.setClusterConfig(CLUSTER_NAME, clusterConfig);
cache.refresh(_manager.getHelixDataAccessor());
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
event.addAttribute(AttributeName.helixmanager.name(), _manager);
runStage(event, new ResourceComputationStage());
runStage(event, new CurrentStateComputationStage());
runStage(event, new TaskSchedulingStage());
Assert.assertTrue(!cache.getAssignableInstanceManager()
.hasGlobalCapacity(AssignableInstance.DEFAULT_QUOTA_TYPE));
BestPossibleStateOutput bestPossibleStateOutput =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
Assert.assertTrue(bestPossibleStateOutput.getStateMap().size() == 3);
}
}
| 9,724 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestIntermediateStateCalcStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import org.apache.helix.api.config.StateTransitionThrottleConfig;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.monitoring.mbeans.ClusterStatusMonitor;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestIntermediateStateCalcStage extends BaseStageTest {
private ClusterConfig _clusterConfig;
@Test
public void testNoStateMissing() {
String resourcePrefix = "resource";
int nResource = 4;
int nPartition = 2;
int nReplica = 3;
String[] resources = new String[nResource];
for (int i = 0; i < nResource; i++) {
resources[i] = resourcePrefix + "_" + i;
}
preSetup(resources, nReplica, nReplica);
event.addAttribute(AttributeName.RESOURCES.name(), getResourceMap(resources, nPartition, "OnlineOffline"));
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
getResourceMap(resources, nPartition, "OnlineOffline"));
// Initialize bestpossible state and current state
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
MessageOutput messageSelectOutput = new MessageOutput();
IntermediateStateOutput expectedResult = new IntermediateStateOutput();
_clusterConfig.setErrorOrRecoveryPartitionThresholdForLoadBalance(1);
setClusterConfig(_clusterConfig);
for (String resource : resources) {
IdealState is = accessor.getProperty(accessor.keyBuilder().idealStates(resource));
setSingleIdealState(is);
Map<String, List<String>> partitionMap = new HashMap<>();
for (int p = 0; p < nPartition; p++) {
Partition partition = new Partition(resource + "_" + p);
for (int r = 0; r < nReplica; r++) {
String instanceName = HOSTNAME_PREFIX + r;
partitionMap.put(partition.getPartitionName(), Collections.singletonList(instanceName));
if (resource.endsWith("0")) {
// Regular recovery balance
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
// add blocked state transition messages
Message pendingMessage = generateMessage("OFFLINE", "ONLINE", instanceName);
currentStateOutput.setPendingMessage(resource, partition, instanceName, pendingMessage);
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
// should be recovered:
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else if (resource.endsWith("1")) {
// Regular load balance
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "DROPPED", instanceName + "-1"));
// should be recovered:
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else if (resource.endsWith("2")) {
// Recovery balance with transient states, should keep the current states in the output.
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "OFFLINE");
// should be kept unchanged:
expectedResult.setState(resource, partition, instanceName, "OFFLINE");
} else if (resource.endsWith("3")) {
// One unresolved error should not prevent recovery balance
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
if (p == 0) {
if (r == 0) {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ERROR");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ERROR");
// This partition is still ERROR
expectedResult.setState(resource, partition, instanceName, "ERROR");
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
messageSelectOutput.addMessage(resource, partition, generateMessage("OFFLINE", "ONLINE", instanceName));
// Recovery balance
expectedResult.setState(resource, partition, instanceName, "ONLINE");
}
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
// load balance is throttled, so keep all current states
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "DROPPED", instanceName + "-1"));
expectedResult.setState(resource, partition, instanceName, "ONLINE");
// The following must be removed because now downward state transitions are allowed
// expectedResult.setState(resource, partition, instanceName + "-1", "OFFLINE");
}
} else if (resource.endsWith("4")) {
// Test that partitions with replicas to drop are dropping them when recovery is
// happening for other partitions
if (p == 0) {
// This partition requires recovery
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition, generateMessage("OFFLINE", "ONLINE", instanceName));
// After recovery, it should be back ONLINE
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else {
// Other partitions require dropping of replicas
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
// BestPossibleState dictates that we only need one ONLINE replica
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName + "-1", "DROPPED");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "DROPPED", instanceName + "-1"));
// So instanceName-1 will NOT be expected to show up in expectedResult
expectedResult.setState(resource, partition, instanceName, "ONLINE");
expectedResult.setState(resource, partition, instanceName + "-1", "DROPPED");
}
} else if (resource.endsWith("5")) {
// Test that load balance bringing up a new replica does NOT happen with a recovery
// partition
if (p == 0) {
// Set up a partition requiring recovery
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition, generateMessage("OFFLINE", "ONLINE", instanceName));
// After recovery, it should be back ONLINE
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
// Check that load balance (bringing up a new node) did not take place
bestPossibleStateOutput.setState(resource, partition, instanceName + "-1", "ONLINE");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "ONLINE", instanceName + "-1"));
expectedResult.setState(resource, partition, instanceName, "ONLINE");
}
}
}
}
bestPossibleStateOutput.setPreferenceLists(resource, partitionMap);
}
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), messageSelectOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(), new ResourceControllerDataProvider());
runStage(event, new ReadClusterDataStage());
runStage(event, new IntermediateStateCalcStage());
IntermediateStateOutput output = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
for (String resource : resources) {
// Note Assert.assertEquals won't work. If "actual" is an empty map, it won't compare
// anything.
Assert.assertTrue(output.getPartitionStateMap(resource)
.getStateMap()
.equals(expectedResult.getPartitionStateMap(resource).getStateMap()));
}
}
@Test
public void testWithClusterConfigChange() {
String resourcePrefix = "resource";
int nResource = 1;
int nPartition = 2;
int nReplica = 3;
String[] resources = new String[nResource];
for (int i = 0; i < nResource; i++) {
resources[i] = resourcePrefix + "_" + i;
}
preSetup(resources, nReplica, nReplica);
event.addAttribute(AttributeName.RESOURCES.name(), getResourceMap(resources, nPartition, "OnlineOffline"));
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
getResourceMap(resources, nPartition, "OnlineOffline"));
ClusterStatusMonitor monitor = new ClusterStatusMonitor(_clusterName);
monitor.active();
event.addAttribute(AttributeName.clusterStatusMonitor.name(), monitor);
// Initialize best possible state and current state
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
MessageOutput messageSelectOutput = new MessageOutput();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
IntermediateStateOutput expectedResult = new IntermediateStateOutput();
for (String resource : resources) {
IdealState is = accessor.getProperty(accessor.keyBuilder().idealStates(resource));
setSingleIdealState(is);
Map<String, List<String>> partitionMap = new HashMap<>();
for (int p = 0; p < nPartition; p++) {
Partition partition = new Partition(resource + "_" + p);
for (int r = 0; r < nReplica; r++) {
String instanceName = HOSTNAME_PREFIX + r;
partitionMap.put(partition.getPartitionName(), Collections.singletonList(instanceName));
if (resource.endsWith("0")) {
// Test that when the threshold is set at a number greater than the number of error and
// recovery partitions, load balance DOES take place
_clusterConfig.setErrorOrRecoveryPartitionThresholdForLoadBalance(Integer.MAX_VALUE);
setClusterConfig(_clusterConfig);
if (p == 0) {
// Set up a partition requiring recovery
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition, generateMessage("OFFLINE", "ONLINE", instanceName));
// After recovery, it should be back ONLINE
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else {
// Ensure we have at least one ONLINE replica so that this partition does not need
// recovery
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
expectedResult.setState(resource, partition, instanceName, "ONLINE");
// This partition to bring up a replica (load balance will happen)
bestPossibleStateOutput.setState(resource, partition, instanceName + "-1", "ONLINE");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "ONLINE", instanceName + "-1"));
expectedResult.setState(resource, partition, instanceName + "-1", "ONLINE");
}
}
}
}
bestPossibleStateOutput.setPreferenceLists(resource, partitionMap);
}
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), messageSelectOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(), new ResourceControllerDataProvider());
runStage(event, new ReadClusterDataStage());
runStage(event, new IntermediateStateCalcStage());
IntermediateStateOutput output = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
// Validate that there are 0 resourced load balance been throttled
ClusterStatusMonitor clusterStatusMonitor =
event.getAttribute(AttributeName.clusterStatusMonitor.name());
Assert.assertEquals(clusterStatusMonitor.getNumOfResourcesRebalanceThrottledGauge(), 0);
Assert.assertEquals(clusterStatusMonitor.getResourceMonitor("resource_0")
.getRebalanceThrottledByErrorPartitionGauge(), 0);
for (String resource : resources) {
// Note Assert.assertEquals won't work. If "actual" is an empty map, it won't compare
// anything.
Assert.assertEquals(output.getPartitionStateMap(resource).getStateMap(),
expectedResult.getPartitionStateMap(resource).getStateMap());
}
}
@Test
public void testThrottleByErrorPartition() {
String resourcePrefix = "resource";
int nResource = 3;
int nPartition = 3;
int nReplica = 3;
String[] resources = new String[nResource];
for (int i = 0; i < nResource; i++) {
resources[i] = resourcePrefix + "_" + i;
}
preSetup(resources, nReplica, nReplica);
event.addAttribute(AttributeName.RESOURCES.name(),
getResourceMap(resources, nPartition, "OnlineOffline"));
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
getResourceMap(resources, nPartition, "OnlineOffline"));
ClusterStatusMonitor monitor = new ClusterStatusMonitor(_clusterName);
monitor.active();
event.addAttribute(AttributeName.clusterStatusMonitor.name(), monitor);
// Initialize best possible state and current state
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
MessageOutput messageSelectOutput = new MessageOutput();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
IntermediateStateOutput expectedResult = new IntermediateStateOutput();
_clusterConfig.setErrorOrRecoveryPartitionThresholdForLoadBalance(0);
setClusterConfig(_clusterConfig);
for (String resource : resources) {
IdealState is = accessor.getProperty(accessor.keyBuilder().idealStates(resource));
setSingleIdealState(is);
Map<String, List<String>> partitionMap = new HashMap<>();
for (int p = 0; p < nPartition; p++) {
Partition partition = new Partition(resource + "_" + p);
for (int r = 0; r < nReplica; r++) {
String instanceName = HOSTNAME_PREFIX + r;
partitionMap.put(partition.getPartitionName(), Collections.singletonList(instanceName));
// A resource with 2 replicas in error state and one need recovery in offline->online. error state
// throttle won't block recovery rebalance
if (resource.endsWith("0")) {
if (p <= 1) {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ERROR");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ERROR");
expectedResult.setState(resource, partition, instanceName, "ERROR");
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
expectedResult.setState(resource, partition, instanceName, "OFFLINE");
if (r == 0) {
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "ONLINE", instanceName));
expectedResult.setState(resource, partition, instanceName, "ONLINE");
}
}
} else if (resource.endsWith("1")) {
// A resource with 1 replicas in error state and one need load balance in offline->online. error state
// throttle will block load rebalance
if (p <= 0) {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ERROR");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ERROR");
expectedResult.setState(resource, partition, instanceName, "ERROR");
} else {
if (r == 0) {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else {
// even though there is ST msg, it should be throttled
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "ONLINE", instanceName));
expectedResult.setState(resource, partition, instanceName, "OFFLINE");
}
}
} else {
// A resource need regular load balance
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "DROPPED", instanceName + "-1"));
// should be recovered:
expectedResult.setState(resource, partition, instanceName, "ONLINE");
}
}
}
bestPossibleStateOutput.setPreferenceLists(resource, partitionMap);
}
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), messageSelectOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
runStage(event, new ReadClusterDataStage());
runStage(event, new IntermediateStateCalcStage());
IntermediateStateOutput output = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
// Validate that there are 2 resourced load balance been throttled
ClusterStatusMonitor clusterStatusMonitor =
event.getAttribute(AttributeName.clusterStatusMonitor.name());
Assert.assertEquals(clusterStatusMonitor.getNumOfResourcesRebalanceThrottledGauge(), 2);
Assert.assertEquals(clusterStatusMonitor.getResourceMonitor("resource_0")
.getRebalanceThrottledByErrorPartitionGauge(), 1);
Assert.assertEquals(clusterStatusMonitor.getResourceMonitor("resource_1")
.getRebalanceThrottledByErrorPartitionGauge(), 1);
Assert.assertEquals(clusterStatusMonitor.getResourceMonitor("resource_2")
.getRebalanceThrottledByErrorPartitionGauge(), 0);
for (String resource : resources) {
// Note Assert.assertEquals won't work. If "actual" is an empty map, it won't compare
// anything.
Assert.assertEquals(output.getPartitionStateMap(resource).getStateMap(),
expectedResult.getPartitionStateMap(resource).getStateMap());
}
}
@Test
public void testPartitionMissing() {
String resourcePrefix = "resource";
int nResource = 4;
int nPartition = 2;
int nReplica = 3;
String[] resources = new String[nResource];
for (int i = 0; i < nResource; i++) {
resources[i] = resourcePrefix + "_" + i;
}
preSetup(resources, nReplica, nReplica);
event.addAttribute(AttributeName.RESOURCES.name(), getResourceMap(resources, nPartition, "OnlineOffline"));
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
getResourceMap(resources, nPartition, "OnlineOffline"));
// Initialize bestpossible state and current state
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
MessageOutput messageSelectOutput = new MessageOutput();
IntermediateStateOutput expectedResult = new IntermediateStateOutput();
_clusterConfig.setErrorOrRecoveryPartitionThresholdForLoadBalance(1);
setClusterConfig(_clusterConfig);
for (String resource : resources) {
IdealState is = accessor.getProperty(accessor.keyBuilder().idealStates(resource));
setSingleIdealState(is);
Map<String, List<String>> partitionMap = new HashMap<>();
for (int p = 0; p < nPartition; p++) {
Partition partition = new Partition(resource + "_" + p);
for (int r = 0; r < nReplica; r++) {
String instanceName = HOSTNAME_PREFIX + r;
// PartitionMap is used as a preferenceList.
// For the last partition, let us add null as preferenceList.
if (p != nPartition - 1) {
partitionMap.put(partition.getPartitionName(), Collections.singletonList(instanceName));
} else {
partitionMap.put(partition.getPartitionName(), null);
}
// TODO: The following code is same for testNoStateMissing
if (resource.endsWith("0")) {
// Regular recovery balance
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
// add blocked state transition messages
Message pendingMessage = generateMessage("OFFLINE", "ONLINE", instanceName);
currentStateOutput.setPendingMessage(resource, partition, instanceName, pendingMessage);
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
// should be recovered:
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else if (resource.endsWith("1")) {
// Regular load balance
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "DROPPED", instanceName + "-1"));
// should be recovered:
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else if (resource.endsWith("2")) {
// Recovery balance with transient states, should keep the current states in the output.
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "OFFLINE");
// should be kept unchanged:
expectedResult.setState(resource, partition, instanceName, "OFFLINE");
} else if (resource.endsWith("3")) {
// One unresolved error should not prevent recovery balance
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
if (p == 0) {
if (r == 0) {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ERROR");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ERROR");
// This partition is still ERROR
expectedResult.setState(resource, partition, instanceName, "ERROR");
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
messageSelectOutput.addMessage(resource, partition, generateMessage("OFFLINE", "ONLINE", instanceName));
// Recovery balance
expectedResult.setState(resource, partition, instanceName, "ONLINE");
}
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
// load balance is throttled, so keep all current states
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "DROPPED", instanceName + "-1"));
expectedResult.setState(resource, partition, instanceName, "ONLINE");
// The following must be removed because now downward state transitions are allowed
// expectedResult.setState(resource, partition, instanceName + "-1", "OFFLINE");
}
} else if (resource.endsWith("4")) {
// Test that partitions with replicas to drop are dropping them when recovery is
// happening for other partitions
if (p == 0) {
// This partition requires recovery
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition, generateMessage("OFFLINE", "ONLINE", instanceName));
// After recovery, it should be back ONLINE
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else {
// Other partitions require dropping of replicas
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
// BestPossibleState dictates that we only need one ONLINE replica
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName + "-1", "DROPPED");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "DROPPED", instanceName + "-1"));
// So instanceName-1 will NOT be expected to show up in expectedResult
expectedResult.setState(resource, partition, instanceName, "ONLINE");
expectedResult.setState(resource, partition, instanceName + "-1", "DROPPED");
}
} else if (resource.endsWith("5")) {
// Test that load balance bringing up a new replica does NOT happen with a recovery
// partition
if (p == 0) {
// Set up a partition requiring recovery
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
messageSelectOutput.addMessage(resource, partition, generateMessage("OFFLINE", "ONLINE", instanceName));
// After recovery, it should be back ONLINE
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
// Check that load balance (bringing up a new node) did not take place
bestPossibleStateOutput.setState(resource, partition, instanceName + "-1", "ONLINE");
messageSelectOutput.addMessage(resource, partition,
generateMessage("OFFLINE", "ONLINE", instanceName + "-1"));
expectedResult.setState(resource, partition, instanceName, "ONLINE");
}
}
}
}
bestPossibleStateOutput.setPreferenceLists(resource, partitionMap);
}
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), messageSelectOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(), new ResourceControllerDataProvider());
runStage(event, new ReadClusterDataStage());
runStage(event, new IntermediateStateCalcStage());
IntermediateStateOutput output = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
for (String resource : resources) {
// Note Assert.assertEquals won't work. If "actual" is an empty map, it won't compare
// anything.
Assert.assertTrue(output.getPartitionStateMap(resource)
.getStateMap()
.equals(expectedResult.getPartitionStateMap(resource).getStateMap()));
}
}
private void preSetup(String[] resources, int numOfLiveInstances, int numOfReplicas) {
setupIdealState(numOfLiveInstances, resources, numOfLiveInstances, numOfReplicas,
IdealState.RebalanceMode.FULL_AUTO, "OnlineOffline");
setupStateModel();
setupLiveInstances(numOfLiveInstances);
// Set up cluster configs
_clusterConfig = accessor.getProperty(accessor.keyBuilder().clusterConfig());
_clusterConfig.setStateTransitionThrottleConfigs(ImmutableList.of(
new StateTransitionThrottleConfig(StateTransitionThrottleConfig.RebalanceType.RECOVERY_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.INSTANCE, 3),
new StateTransitionThrottleConfig(StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.INSTANCE, 3)));
setClusterConfig(_clusterConfig);
}
}
| 9,725 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.List;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.StageContext;
import org.apache.helix.mock.MockManager;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.LiveInstance.LiveInstanceProperty;
import org.apache.helix.tools.DefaultIdealStateCalculator;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestCompatibilityCheckStage extends BaseStageTest {
private void prepare(String controllerVersion, String participantVersion) {
prepare(controllerVersion, participantVersion, null);
}
private void prepare(String controllerVersion, String participantVersion,
String minSupportedParticipantVersion) {
List<String> instances =
Arrays.asList("localhost_0", "localhost_1", "localhost_2", "localhost_3", "localhost_4");
int partitions = 10;
int replicas = 1;
// set ideal state
String resourceName = "testResource";
ZNRecord record =
DefaultIdealStateCalculator.calculateIdealState(instances, partitions, replicas,
resourceName, "MASTER", "SLAVE");
IdealState idealState = new IdealState(record);
idealState.setStateModelDefRef("MasterSlave");
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.idealStates(resourceName), idealState);
// set live instances
record = new ZNRecord("localhost_0");
if (participantVersion != null) {
record.setSimpleField(LiveInstanceProperty.HELIX_VERSION.toString(), participantVersion);
}
LiveInstance liveInstance = new LiveInstance(record);
liveInstance.setSessionId("session_0");
accessor.setProperty(keyBuilder.liveInstance("localhost_0"), liveInstance);
if (controllerVersion != null) {
((MockManager) manager).setVersion(controllerVersion);
}
if (minSupportedParticipantVersion != null) {
manager.getProperties().getProperties()
.put("minimum_supported_version.participant", minSupportedParticipantVersion);
}
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
runStage(event, new ReadClusterDataStage());
}
@Test
public void testCompatible() {
prepare("1.0.0", "1.0.0", "0.4");
CompatibilityCheckStage stage = new CompatibilityCheckStage();
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
try {
stage.process(event);
} catch (Exception e) {
Assert.fail("Should not fail since versions are compatible", e);
}
stage.postProcess();
}
@Test
public void testNullParticipantVersion() {
prepare("0.4.0", null);
CompatibilityCheckStage stage = new CompatibilityCheckStage();
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
try {
stage.process(event);
} catch (Exception e) {
Assert
.fail("Should not fail since compatibility check will be skipped if participant version is null");
}
stage.postProcess();
}
@Test
public void testNullControllerVersion() {
prepare(null, "0.4.0");
CompatibilityCheckStage stage = new CompatibilityCheckStage();
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
try {
stage.process(event);
} catch (Exception e) {
Assert
.fail("Should not fail since compatibility check will be skipped if controller version is null");
}
stage.postProcess();
}
@Test
public void testIncompatible() {
prepare("0.6.1-incubating-SNAPSHOT", "0.3.4", "0.4");
CompatibilityCheckStage stage = new CompatibilityCheckStage();
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
try {
stage.process(event);
Assert
.fail("Should fail since participant version is less than the minimum participant version supported by controller");
} catch (Exception e) {
// OK
}
stage.postProcess();
}
}
| 9,726 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestRebalancePipeline.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.controller.pipeline.StageException;
import org.apache.helix.controller.stages.resource.ResourceMessageDispatchStage;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.manager.zk.ZKHelixAdmin;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestRebalancePipeline extends ZkUnitTestBase {
private final String _className = getShortClassName();
@Test
public void testDuplicateMsg() throws Exception {
String clusterName = "CLUSTER_" + _className + "_dup";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
refreshClusterConfig(clusterName, accessor);
HelixManager manager =
new DummyClusterManager(clusterName, accessor, Long.toHexString(_gZkClient.getSessionId()));
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
event.addAttribute(AttributeName.helixmanager.name(), manager);
ResourceControllerDataProvider dataCache = new ResourceControllerDataProvider();
// The AsyncTasksThreadPool needs to be set, otherwise to start pending message cleanup job
// will throw NPE and stop the pipeline. TODO: https://github.com/apache/helix/issues/1158
ExecutorService executorService = Executors.newSingleThreadExecutor();
dataCache.setAsyncTasksThreadPool(executorService);
event.addAttribute(AttributeName.ControllerDataProvider.name(), dataCache);
final String resourceName = "testResource_dup";
String[] resourceGroups = new String[] {
resourceName
};
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] {
0
}, resourceGroups, 1, 1);
List<LiveInstance> liveInstances = setupLiveInstances(clusterName, new int[] {
0
});
setupStateModel(clusterName);
// cluster data cache refresh pipeline
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
// rebalance pipeline
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ResourceComputationStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationPhase());
rebalancePipeline.addStage(new MessageSelectionStage());
rebalancePipeline.addStage(new IntermediateStateCalcStage());
rebalancePipeline.addStage(new MessageThrottleStage());
rebalancePipeline.addStage(new ResourceMessageDispatchStage());
// round1: set node0 currentState to OFFLINE
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", liveInstances.get(0).getEphemeralOwner(),
"OFFLINE");
runPipeline(event, dataRefresh, false);
runPipeline(event, rebalancePipeline, false);
MessageOutput msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages =
msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE-SLAVE for node0");
Message message = messages.get(0);
Assert.assertEquals(message.getFromState(), "OFFLINE");
Assert.assertEquals(message.getToState(), "SLAVE");
Assert.assertEquals(message.getTgtName(), "localhost_0");
// round2: updates node0 currentState to SLAVE but keep the
// message, make sure controller should not wait for the message to be deleted, but should
// send out a S -> M message to node0
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0",
liveInstances.get(0).getEphemeralOwner(), "SLAVE");
runPipeline(event, dataRefresh, false);
refreshClusterConfig(clusterName, accessor);
Pipeline computationPipeline = new Pipeline();
computationPipeline.addStage(new ResourceComputationStage());
computationPipeline.addStage(new CurrentStateComputationStage());
Pipeline messagePipeline = new Pipeline();
messagePipeline.addStage(new BestPossibleStateCalcStage());
messagePipeline.addStage(new MessageGenerationPhase());
messagePipeline.addStage(new MessageSelectionStage());
messagePipeline.addStage(new IntermediateStateCalcStage());
messagePipeline.addStage(new MessageThrottleStage());
messagePipeline.addStage(new ResourceMessageDispatchStage());
runPipeline(event, computationPipeline, false);
Map<String, Map<String, Message>> staleMessages = dataCache.getStaleMessages();
Assert.assertEquals(staleMessages.size(), 1);
Assert.assertTrue(staleMessages.containsKey("localhost_0"));
Assert.assertTrue(staleMessages.get("localhost_0").containsKey(message.getMsgId()));
runPipeline(event, messagePipeline, false);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: SLAVE-MASTER for node0");
Assert.assertTrue(messages.get(0).getTgtName().equalsIgnoreCase("localhost_0"));
Assert.assertTrue(messages.get(0).getFromState().equalsIgnoreCase("SLAVE"));
Assert.assertTrue(messages.get(0).getToState().equalsIgnoreCase("MASTER"));
Thread.sleep(2 * MessageGenerationPhase.DEFAULT_OBSELETE_MSG_PURGE_DELAY);
runPipeline(event, dataRefresh, false);
// Verify the stale message should be deleted
Assert.assertTrue(TestHelper.verify(() -> {
if (dataCache.getStaleMessages().size() != 0) {
return false;
}
return true;
}, TestHelper.WAIT_DURATION));
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
executorService.shutdown();
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testMsgTriggeredRebalance() throws Exception {
String clusterName = "CLUSTER_" + _className + "_msgTrigger";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
refreshClusterConfig(clusterName, accessor);
final String resourceName = "testResource_dup";
String[] resourceGroups = new String[] {
resourceName
};
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] {
0, 1
}, resourceGroups, 1, 2);
setupStateModel(clusterName);
setupInstances(clusterName, new int[] {
0, 1
});
List<LiveInstance> liveInstances = setupLiveInstances(clusterName, new int[] {
0, 1
});
long msgPurgeDelay = MessageGenerationPhase.DEFAULT_OBSELETE_MSG_PURGE_DELAY;
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
// round1: controller sends O->S to both node0 and node1
Builder keyBuilder = accessor.keyBuilder();
Assert.assertTrue(TestHelper.verify(() -> {
for (LiveInstance liveInstance : liveInstances) {
List<String> messages =
accessor.getChildNames(keyBuilder.messages(liveInstance.getInstanceName()));
if (messages.size() < 1) {
return false;
}
}
return true;
}, TestHelper.WAIT_DURATION));
// round2: node0 and node1 update current states but not removing messages
// Since controller's rebalancer pipeline will GC pending messages after timeout, and both hosts
// update current states to SLAVE, controller will send out rebalance message to
// have one host to become master
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", liveInstances.get(0).getEphemeralOwner(),
"SLAVE", true);
setCurrentState(clusterName, "localhost_1", resourceName, resourceName + "_0", liveInstances.get(1).getEphemeralOwner(),
"SLAVE", true);
// Controller has timeout > 1sec, so within 1s, controller should not have GCed message
Assert.assertTrue(msgPurgeDelay > 1000);
Assert.assertFalse(TestHelper.verify(() -> {
for (LiveInstance liveInstance : liveInstances) {
List<String> messages =
accessor.getChildNames(keyBuilder.messages(liveInstance.getInstanceName()));
if (messages.size() >= 1) {
return false;
}
}
return true;
}, TestHelper.WAIT_DURATION));
// After another purge delay, controller should cleanup messages and continue to rebalance
Thread.sleep(msgPurgeDelay);
// Manually trigger another rebalance by touching current state
List<Message> allMsgs = new ArrayList<>();
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0",
liveInstances.get(0).getEphemeralOwner(), "SLAVE");
Assert.assertTrue(TestHelper.verify(() -> {
allMsgs.clear();
for (LiveInstance liveInstance : liveInstances) {
allMsgs.addAll(accessor.getChildValues(keyBuilder.messages(liveInstance.getInstanceName()),
true));
}
if (allMsgs.size() != 1 || !allMsgs.get(0).getToState().equals("MASTER") || !allMsgs.get(0)
.getFromState().equals("SLAVE")) {
return false;
}
return true;
}, TestHelper.WAIT_DURATION));
// round3: node0 changes state to master, but failed to delete message,
// controller will clean it up
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", liveInstances.get(0).getEphemeralOwner(),
"MASTER", true);
Thread.sleep(msgPurgeDelay);
// touch current state to trigger rebalance
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", liveInstances.get(0).getEphemeralOwner(),
"MASTER", false);
Assert.assertTrue(TestHelper.verify(() -> accessor.getChildNames(keyBuilder.messages("localhost_0")).isEmpty(), 2000));
// round4: node0 has duplicated but valid message, i.e. there is a P2P message sent to it
// due to error in the triggered pipeline, controller should remove duplicated message
// immediately as the partition has became master 3 sec ago (there is already a timeout)
Message sourceMsg = allMsgs.get(0);
Message dupMsg = new Message(sourceMsg.getMsgType(), UUID.randomUUID().toString());
dupMsg.getRecord().setSimpleFields(sourceMsg.getRecord().getSimpleFields());
dupMsg.getRecord().setListFields(sourceMsg.getRecord().getListFields());
dupMsg.getRecord().setMapFields(sourceMsg.getRecord().getMapFields());
accessor.setProperty(dupMsg.getKey(accessor.keyBuilder(), dupMsg.getTgtName()), dupMsg);
Assert.assertTrue(TestHelper.verify(() -> accessor.getChildNames(keyBuilder.messages("localhost_0")).isEmpty(), 1500));
// round5: node0 has completely invalid message, controller should immediately delete it
dupMsg.setFromState("SLAVE");
dupMsg.setToState("OFFLINE");
accessor.setProperty(dupMsg.getKey(accessor.keyBuilder(), dupMsg.getTgtName()), dupMsg);
Assert.assertTrue(TestHelper.verify(() -> accessor.getChildNames(keyBuilder.messages("localhost_0")).isEmpty(), 1500));
if (controller.isConnected()) {
controller.syncStop();
}
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testChangeIdealStateWithPendingMsg() throws Exception {
String clusterName = "CLUSTER_" + _className + "_pending";
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
admin.addCluster(clusterName);
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager =
new DummyClusterManager(clusterName, accessor, Long.toHexString(_gZkClient.getSessionId()));
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
event.addAttribute(AttributeName.helixmanager.name(), manager);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
refreshClusterConfig(clusterName, accessor);
final String resourceName = "testResource_pending";
String[] resourceGroups = new String[] {
resourceName
};
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] {
0
}, resourceGroups, 1, 1);
List<LiveInstance> liveInstances = setupLiveInstances(clusterName, new int[] {
0
});
setupStateModel(clusterName);
// cluster data cache refresh pipeline
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
// rebalance pipeline
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ResourceComputationStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationPhase());
rebalancePipeline.addStage(new MessageSelectionStage());
rebalancePipeline.addStage(new IntermediateStateCalcStage());
rebalancePipeline.addStage(new MessageThrottleStage());
rebalancePipeline.addStage(new ResourceMessageDispatchStage());
// round1: set node0 currentState to OFFLINE and node1 currentState to SLAVE
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", liveInstances.get(0).getEphemeralOwner(),
"OFFLINE");
runPipeline(event, dataRefresh, false);
runPipeline(event, rebalancePipeline, false);
MessageOutput msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages =
msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE-SLAVE for node0");
Message message = messages.get(0);
Assert.assertEquals(message.getFromState(), "OFFLINE");
Assert.assertEquals(message.getToState(), "SLAVE");
Assert.assertEquals(message.getTgtName(), "localhost_0");
// round2: drop resource, but keep the
// message, make sure controller should not send O->DROPPED until O->S is done
admin.dropResource(clusterName, resourceName);
List<IdealState> idealStates = accessor.getChildValues(accessor.keyBuilder().idealStates(), true);
cache.setIdealStates(idealStates);
runPipeline(event, dataRefresh, false);
cache = event.getAttribute(AttributeName.ControllerDataProvider.name());
cache.setClusterConfig(new ClusterConfig(clusterName));
runPipeline(event, rebalancePipeline, false);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 0,
"Should not output only 1 message: OFFLINE->DROPPED for localhost_0");
// round3: remove O->S message for localhost_0, localhost_0 still in OFFLINE
// controller should now send O->DROPPED to localhost_0
Builder keyBuilder = accessor.keyBuilder();
List<String> msgIds = accessor.getChildNames(keyBuilder.messages("localhost_0"));
accessor.removeProperty(keyBuilder.message("localhost_0", msgIds.get(0)));
runPipeline(event, dataRefresh, false);
runPipeline(event, rebalancePipeline, false);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1,
"Should output 1 message: OFFLINE->DROPPED for localhost_0");
message = messages.get(0);
Assert.assertEquals(message.getFromState(), "OFFLINE");
Assert.assertEquals(message.getToState(), "DROPPED");
Assert.assertEquals(message.getTgtName(), "localhost_0");
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testMasterXfer() throws Exception {
String clusterName = "CLUSTER_" + _className + "_xfer";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager =
new DummyClusterManager(clusterName, accessor, Long.toHexString(_gZkClient.getSessionId()));
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
refreshClusterConfig(clusterName, accessor);
final String resourceName = "testResource_xfer";
String[] resourceGroups = new String[] {
resourceName
};
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] {
0, 1
}, resourceGroups, 1, 2);
List<LiveInstance> liveInstances = setupLiveInstances(clusterName, new int[] {
1
});
setupStateModel(clusterName);
// cluster data cache refresh pipeline
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
// rebalance pipeline
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ResourceComputationStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationPhase());
rebalancePipeline.addStage(new MessageSelectionStage());
rebalancePipeline.addStage(new IntermediateStateCalcStage());
rebalancePipeline.addStage(new MessageThrottleStage());
rebalancePipeline.addStage(new ResourceMessageDispatchStage());
// round1: set node1 currentState to SLAVE
setCurrentState(clusterName, "localhost_1", resourceName, resourceName + "_0", liveInstances.get(0).getEphemeralOwner(),
"SLAVE");
runPipeline(event, dataRefresh, false);
runPipeline(event, rebalancePipeline, false);
MessageOutput msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages =
msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: SLAVE-MASTER for node1");
Message message = messages.get(0);
Assert.assertEquals(message.getFromState(), "SLAVE");
Assert.assertEquals(message.getToState(), "MASTER");
Assert.assertEquals(message.getTgtName(), "localhost_1");
// round2: updates node0 currentState to SLAVE but keep the
// message, make sure controller should not send S->M until removal is done
setupLiveInstances(clusterName, new int[] {
0
});
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", "session_0",
"SLAVE");
runPipeline(event, dataRefresh, false);
runPipeline(event, rebalancePipeline, false);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 0, "Should NOT output 1 message: SLAVE-MASTER for node0");
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testNoDuplicatedMaster() throws Exception {
String clusterName = "CLUSTER_" + _className + "_no_duplicated_master";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager =
new DummyClusterManager(clusterName, accessor, Long.toHexString(_gZkClient.getSessionId()));
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
refreshClusterConfig(clusterName, accessor);
final String resourceName = "testResource_no_duplicated_master";
String[] resourceGroups = new String[] {
resourceName
};
// ideal state: node0 is SLAVE, node1 is MASTER
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] {
0, 1
}, resourceGroups, 1, 2);
List<LiveInstance> liveInstances = setupLiveInstances(clusterName, new int[] {
0, 1
});
setupStateModel(clusterName);
// cluster data cache refresh pipeline
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
// rebalance pipeline
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ResourceComputationStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationPhase());
rebalancePipeline.addStage(new MessageSelectionStage());
rebalancePipeline.addStage(new IntermediateStateCalcStage());
rebalancePipeline.addStage(new MessageThrottleStage());
rebalancePipeline.addStage(new ResourceMessageDispatchStage());
// set node0 currentState to SLAVE, node1 currentState to MASTER
// Helix will try to switch the state of the two instances, but it should not be two MASTER at
// the same time
// so it should first transit M->S, then transit another instance S->M
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", liveInstances.get(0).getEphemeralOwner(),
"SLAVE");
setCurrentState(clusterName, "localhost_1", resourceName, resourceName + "_0", liveInstances.get(1).getEphemeralOwner(),
"MASTER");
runPipeline(event, dataRefresh, false);
runPipeline(event, rebalancePipeline, false);
MessageOutput msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages =
msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1,
"Should output 1 message: MASTER-SLAVE for localhost_1");
Message message = messages.get(0);
Assert.assertEquals(message.getFromState(), "MASTER");
Assert.assertEquals(message.getToState(), "SLAVE");
Assert.assertEquals(message.getTgtName(), "localhost_1");
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
/*
* Tests if controller loses leadership when pipeline is still running, messages should not be
* sent to participants. No message sent on leadership loss prevents double masters issue.
* This test simulates that controller leader's zk session changes after ReadClusterDataStage
* and so state transition messages are not sent out and stage exception is thrown to terminate
* the pipeline.
*/
@Test
public void testNoMessageSentOnControllerLeadershipLoss() throws Exception {
String methodName = TestHelper.getTestMethodName();
String clusterName = _className + "_" + methodName;
final String resourceName = "testResource_" + methodName;
final String partitionName = resourceName + "_0";
String[] resourceGroups = new String[] {
resourceName
};
// ideal state: localhost_0 is MASTER
// replica=1 means 1 master
setupIdealState(clusterName, new int[] {
0
}, resourceGroups, 1, 1);
List<LiveInstance> liveInstances = setupLiveInstances(clusterName, new int[] {
0
});
setupStateModel(clusterName);
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
DummyClusterManager manager =
new DummyClusterManager(clusterName, accessor, Long.toHexString(_gZkClient.getSessionId()));
ClusterEvent event = new ClusterEvent(clusterName, ClusterEventType.OnDemandRebalance);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
event.addAttribute(AttributeName.EVENT_SESSION.name(), Optional.of(manager.getSessionId()));
refreshClusterConfig(clusterName, accessor);
// cluster data cache refresh pipeline
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
// rebalance pipeline
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ResourceComputationStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationPhase());
rebalancePipeline.addStage(new MessageSelectionStage());
rebalancePipeline.addStage(new IntermediateStateCalcStage());
rebalancePipeline.addStage(new MessageThrottleStage());
rebalancePipeline.addStage(new ResourceMessageDispatchStage());
// set node0 currentState to SLAVE, node1 currentState to MASTER
// Helix will try to switch the state of the instance, so it should transit S->M
setCurrentState(clusterName, "localhost_0", resourceName, partitionName,
liveInstances.get(0).getEphemeralOwner(), "SLAVE");
runPipeline(event, dataRefresh, false);
// After data refresh, controller loses leadership and its session id changes.
manager.setSessionId(manager.getSessionId() + "_new");
try {
// Because leader loses leadership, StageException should be thrown and message is not sent.
runPipeline(event, rebalancePipeline, true);
Assert.fail("StageException should be thrown because controller leader session changed.");
} catch (StageException e) {
Assert.assertTrue(
e.getMessage().matches("Event session doesn't match controller .* Expected session: .*"));
}
// Verify the ST message not being sent out is the expected one to
// transit the replica SLAVE->MASTER
MessageOutput msgThrottleOutput = event.getAttribute(AttributeName.MESSAGES_THROTTLE.name());
List<Message> messages =
msgThrottleOutput.getMessages(resourceName, new Partition(partitionName));
Assert.assertEquals(messages.size(), 1,
"Should output 1 message: SLAVE->MASTER for localhost_0");
Message message = messages.get(0);
Assert.assertEquals(message.getFromState(), "SLAVE");
Assert.assertEquals(message.getToState(), "MASTER");
Assert.assertEquals(message.getTgtName(), "localhost_0");
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
}
protected void setCurrentState(String clusterName, String instance, String resourceGroupName,
String resourceKey, String sessionId, String state) {
setCurrentState(clusterName, instance, resourceGroupName, resourceKey, sessionId, state, false);
}
private void setCurrentState(String clusterName, String instance, String resourceGroupName,
String resourceKey, String sessionId, String state, boolean updateTimestamp) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
CurrentState curState = new CurrentState(resourceGroupName);
curState.setState(resourceKey, state);
curState.setSessionId(sessionId);
curState.setStateModelDefRef("MasterSlave");
if (updateTimestamp) {
curState.setEndTime(resourceKey, System.currentTimeMillis());
}
accessor.setProperty(keyBuilder.currentState(instance, sessionId, resourceGroupName), curState);
}
private void refreshClusterConfig(String clusterName, HelixDataAccessor accessor) {
accessor.setProperty(accessor.keyBuilder().clusterConfig(), new ClusterConfig(clusterName));
}
}
| 9,727 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestReplicaLevelThrottling.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.helix.api.config.StateTransitionThrottleConfig;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.model.builder.FullAutoModeISBuilder;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class TestReplicaLevelThrottling extends BaseStageTest {
static final String CLUSTER_NAME = "TestCluster";
static final String RESOURCE_NAME = "TestResource";
static final String NOT_SET = "-1";
static final String DEFAULT_ERROR_THRESHOLD = String.valueOf(Integer.MAX_VALUE);
@Test(dataProvider = "replicaLevelThrottlingInput")
public void testPerReplicaThrottling(ClusterEvent event, Map<String, Map<String, String>> expectedOutput,
Map<String, Object> cacheMap, Mock mock) {
prepareCache(cacheMap, mock);
runStage(event, new IntermediateStateCalcStage());
Assert.assertTrue(matches(event, expectedOutput));
}
// Prepare the cache since it is well encapsulated, there is no way to set the cache things.
// Also if we move this piece in data loading, the temporary created mock object and ClusterConfig will be overrided
// by following test cases data.
private void prepareCache(Map<String, Object> cacheMap, Mock mock) {
when(mock.cache.getClusterConfig()).thenReturn((ClusterConfig) cacheMap.get(CacheKeys.clusterConfig.name()));
when(mock.cache.getStateModelDef((String) cacheMap.get(CacheKeys.stateModelName.name()))).thenReturn(
(StateModelDefinition) cacheMap.get(CacheKeys.stateModelDef.name()));
when(mock.cache.getEnabledLiveInstances()).thenReturn(new HashSet<>(
((Map<String, List<String>>) cacheMap.get(CacheKeys.preferenceList.name())).values().iterator().next()));
when(mock.cache.getLiveInstances()).thenReturn(new HashSet<>(
((Map<String, List<String>>) cacheMap.get(CacheKeys.preferenceList.name())).values().iterator().next()).stream()
.collect(Collectors.toMap(e -> e, e -> new LiveInstance(e))));
when(mock.cache.getIdealState(RESOURCE_NAME)).thenReturn(
new FullAutoModeISBuilder(RESOURCE_NAME).setMinActiveReplica(
(Integer) cacheMap.get(CacheKeys.minActiveReplica.name()))
.setNumReplica((Integer) cacheMap.get(CacheKeys.numReplica.name()))
.setStateModel((String) cacheMap.get(CacheKeys.stateModelName.name()))
.setNumPartitions(2)
.setRebalancerMode(IdealState.RebalanceMode.FULL_AUTO)
.build());
}
private boolean matches(ClusterEvent event, Map<String, Map<String, String>> expectedOutPut) {
Map<Partition, Map<String, String>> intermediateResult =
((IntermediateStateOutput) event.getAttribute(AttributeName.INTERMEDIATE_STATE.name())).getPartitionStateMap(
RESOURCE_NAME).getStateMap();
for (Partition partition : intermediateResult.keySet()) {
if (!expectedOutPut.containsKey(partition.getPartitionName()) || !expectedOutPut.get(partition.getPartitionName())
.equals(intermediateResult.get(partition))) {
return false;
}
}
return true;
}
@DataProvider(name = "replicaLevelThrottlingInput")
public Object[][] rebalanceStrategies() {
List<Object[]> data = new ArrayList<>();
data.addAll(loadTestInputs("TestReplicaLevelThrottling.SingleTopState.json"));
data.addAll(loadTestInputs("TestReplicaLevelThrottling.MultiTopStates.json"));
Object[][] ret = new Object[data.size()][];
for (int i = 0; i < data.size(); i++) {
ret[i] = data.get(i);
}
return ret;
}
enum Entry {
stateModel,
numReplica,
minActiveReplica,
testCases,
// Per Test
partitionNames,
messageOutput, // instance -> target state of message
bestPossible,
preferenceList,
clusterThrottleLoad,
resourceThrottleLoad,
instanceThrottleLoad,
instanceThrottleRecovery,
currentStates,
pendingMessages,
expectedOutput,
errorThreshold
}
enum CacheKeys {
clusterConfig,
stateModelName,
stateModelDef,
minActiveReplica,
numReplica,
preferenceList
}
public List<Object[]> loadTestInputs(String fileName) {
List<Object[]> ret = null;
InputStream inputStream = getClass().getClassLoader().getResourceAsStream(fileName);
try {
ObjectReader mapReader = new ObjectMapper().reader(Map.class);
Map<String, Object> inputMaps = mapReader.readValue(inputStream);
String stateModelName = (String) inputMaps.get(Entry.stateModel.name());
StateModelDefinition stateModelDef =
BuiltInStateModelDefinitions.valueOf(stateModelName).getStateModelDefinition();
int minActiveReplica = Integer.parseInt((String) inputMaps.get(Entry.minActiveReplica.name()));
int numReplica = Integer.parseInt((String) inputMaps.get(Entry.numReplica.name()));
List<Map<String, Object>> inputs = (List<Map<String, Object>>) inputMaps.get(Entry.testCases.name());
ret = new ArrayList<>();
Mock mock = new Mock();
for (Map<String, Object> inMap : inputs) {
Resource resource = new Resource(RESOURCE_NAME);
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
MessageOutput messageOutput = new MessageOutput();
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
Map<String, List<String>> preferenceLists = (Map<String, List<String>>) inMap.get(Entry.preferenceList.name());
Map<String, Map<String, String>> pendingMessages =
(Map<String, Map<String, String>>) inMap.get(Entry.pendingMessages.name());
Map<String, Map<String, String>> currentStates =
(Map<String, Map<String, String>>) inMap.get(Entry.currentStates.name());
Map<String, Map<String, String>> bestPossible =
(Map<String, Map<String, String>>) inMap.get(Entry.bestPossible.name());
Map<String, Map<String, String>> messageMap =
(Map<String, Map<String, String>>) inMap.get(Entry.messageOutput.name());
for (String partition : (List<String>) inMap.get(Entry.partitionNames.name())) {
resource.addPartition(partition);
bestPossibleStateOutput.setPreferenceList(RESOURCE_NAME, partition, preferenceLists.get(partition));
bestPossibleStateOutput.setState(RESOURCE_NAME, resource.getPartition(partition),
bestPossible.get(partition));
List<Message> messages = generateMessages(messageMap.get(partition), currentStates.get(partition));
messageOutput.addMessages(RESOURCE_NAME, resource.getPartition(partition), messages);
currentStates.get(partition)
.entrySet()
.forEach(
e -> currentStateOutput.setCurrentState(resource.getResourceName(), resource.getPartition(partition),
e.getKey(), e.getValue()));
generateMessages(pendingMessages.get(partition), currentStates.get(partition)).forEach(
m -> currentStateOutput.setPendingMessage(resource.getResourceName(), resource.getPartition(partition),
m.getTgtName(), m));
}
ClusterEvent event = new ClusterEvent(CLUSTER_NAME, ClusterEventType.Unknown);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput); // add current states
event.addAttribute(AttributeName.ControllerDataProvider.name(),
buildCache(mock, numReplica, minActiveReplica, stateModelDef, stateModelName, preferenceLists));
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), messageOutput);
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
Collections.singletonMap(RESOURCE_NAME, resource));
Map<String, Map<String, String>> expectedOutput =
(Map<String, Map<String, String>>) inMap.get(Entry.expectedOutput.name());
// Build throttle configs
ClusterConfig clusterConfig = new ClusterConfig(CLUSTER_NAME);
List<StateTransitionThrottleConfig> throttleConfigs = new ArrayList<>();
getSingleThrottleEntry(StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.CLUSTER, Entry.clusterThrottleLoad.name(), throttleConfigs,
inMap);
getSingleThrottleEntry(StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.RESOURCE, Entry.resourceThrottleLoad.name(), throttleConfigs,
inMap);
getSingleThrottleEntry(StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.INSTANCE, Entry.instanceThrottleLoad.name(), throttleConfigs,
inMap);
getSingleThrottleEntry(StateTransitionThrottleConfig.RebalanceType.RECOVERY_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.INSTANCE, Entry.instanceThrottleRecovery.name(),
throttleConfigs, inMap);
clusterConfig.setStateTransitionThrottleConfigs(throttleConfigs);
clusterConfig.setErrorPartitionThresholdForLoadBalance(Integer.parseInt(
(String) inMap.getOrDefault(Entry.errorThreshold.name(), DEFAULT_ERROR_THRESHOLD)));
Map<String, Object> cacheMap = new HashMap<>();
cacheMap.put(CacheKeys.clusterConfig.name(), clusterConfig);
cacheMap.put(CacheKeys.stateModelName.name(), stateModelName);
cacheMap.put(CacheKeys.stateModelDef.name(), stateModelDef);
cacheMap.put(CacheKeys.preferenceList.name(), preferenceLists);
cacheMap.put(CacheKeys.minActiveReplica.name(), minActiveReplica);
cacheMap.put(CacheKeys.numReplica.name(), numReplica);
ret.add(new Object[]{event, expectedOutput, cacheMap, mock});
}
} catch (IOException e) {
e.printStackTrace();
}
return ret;
}
private List<Message> generateMessages(Map<String, String> messageMap, Map<String, String> currentStates) {
if (messageMap == null || currentStates == null) {
return Collections.emptyList();
}
List<Message> messages = new ArrayList<>();
for (Map.Entry<String, String> entry : messageMap.entrySet()) {
Message message = new Message(new ZNRecord(UUID.randomUUID().toString()));
message.setFromState(currentStates.get(entry.getKey()));
message.setToState(entry.getValue());
message.setTgtName(entry.getKey());
messages.add(message);
}
return messages;
}
private ResourceControllerDataProvider buildCache(Mock mock, int numReplica, int minActive,
StateModelDefinition stateModelDefinition, String stateModel, Map<String, List<String>> preferenceLists) {
return mock.cache;
}
private void getSingleThrottleEntry(StateTransitionThrottleConfig.RebalanceType rebalanceType,
StateTransitionThrottleConfig.ThrottleScope throttleScope, String entryName,
List<StateTransitionThrottleConfig> throttleConfigs, Map<String, Object> inMap) {
if (!inMap.get(entryName).equals(NOT_SET)) {
throttleConfigs.add(new StateTransitionThrottleConfig(rebalanceType, throttleScope,
Integer.valueOf((String) inMap.get(entryName))));
}
}
private final class Mock {
private ResourceControllerDataProvider cache = mock(ResourceControllerDataProvider.class);
}
}
| 9,728 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestTaskStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixConstants;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.common.DedupEventProcessor;
import org.apache.helix.controller.dataproviders.BaseControllerDataProvider;
import org.apache.helix.controller.pipeline.AsyncWorkerType;
import org.apache.helix.task.TaskUtil;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.common.caches.TaskDataCache;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.controller.stages.task.TaskPersistDataStage;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.integration.task.TaskTestUtil;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.JobQueue;
import org.apache.helix.task.TaskPartitionState;
import org.apache.helix.task.TaskState;
import org.apache.helix.task.WorkflowContext;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestTaskStage extends TaskTestBase {
private ClusterEvent _event =
new ClusterEvent(CLUSTER_NAME, ClusterEventType.TaskCurrentStateChange);
private PropertyKey.Builder _keyBuilder;
private String _testWorkflow = TestHelper.getTestClassName();
private String _testJobPrefix = _testWorkflow + "_Job_";
@BeforeClass
public void beforeClass() throws Exception {
super.beforeClass();
// Stop the controller for isolated testing of the stage
_controller.syncStop();
_keyBuilder = _manager.getHelixDataAccessor().keyBuilder();
}
@Test
public void testPersistContextData() {
_event.addAttribute(AttributeName.helixmanager.name(), _manager);
WorkflowControllerDataProvider cache = new WorkflowControllerDataProvider(CLUSTER_NAME);
TaskDataCache taskDataCache = cache.getTaskDataCache();
// Build a queue
JobQueue.Builder queueBuilder = TaskTestUtil.buildJobQueue(_testWorkflow);
JobConfig.Builder jobBuilder_0 =
new JobConfig.Builder().setJobId("Job_0").setTargetResource("1").setCommand("1");
JobConfig.Builder jobBuilder_1 =
new JobConfig.Builder().setJobId("Job_0").setTargetResource("1").setCommand("1");
JobConfig.Builder jobBuilder_2 =
new JobConfig.Builder().setJobId("Job_0").setTargetResource("1").setCommand("1");
queueBuilder.enqueueJob("Job_0", jobBuilder_0).enqueueJob("Job_1", jobBuilder_1)
.enqueueJob("Job_2", jobBuilder_2);
_driver.createQueue(queueBuilder.build());
// Manually trigger a cache refresh
cache.refresh(new ZKHelixDataAccessor(CLUSTER_NAME, _baseAccessor));
// Create the context
WorkflowContext wfCtx = new WorkflowContext(new ZNRecord(TaskUtil.WORKFLOW_CONTEXT_KW));
wfCtx.setJobState(_testJobPrefix + "0", TaskState.COMPLETED);
wfCtx.setJobState(_testJobPrefix + "1", TaskState.COMPLETED);
wfCtx.setWorkflowState(TaskState.IN_PROGRESS);
wfCtx.setName(_testWorkflow);
wfCtx.setStartTime(System.currentTimeMillis());
JobContext jbCtx0 = new JobContext(new ZNRecord(_testJobPrefix + "0"));
jbCtx0.setName(_testJobPrefix + "0");
jbCtx0.setStartTime(System.currentTimeMillis());
jbCtx0.setPartitionState(0, TaskPartitionState.COMPLETED);
JobContext jbCtx1 = new JobContext((new ZNRecord(_testJobPrefix + "1")));
jbCtx1.setName(_testJobPrefix + "1");
jbCtx1.setStartTime(System.currentTimeMillis());
jbCtx1.setPartitionState(0, TaskPartitionState.COMPLETED);
taskDataCache.updateWorkflowContext(_testWorkflow, wfCtx);
taskDataCache.updateJobContext(_testJobPrefix + "0", jbCtx0);
taskDataCache.updateJobContext(_testJobPrefix + "1", jbCtx1);
_event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
// Write contexts to ZK first
TaskPersistDataStage persistDataStage = new TaskPersistDataStage();
persistDataStage.process(_event);
Assert.assertNotNull(_driver.getWorkflowContext(_testWorkflow));
Assert.assertNotNull(_driver.getJobContext(_testJobPrefix + "0"));
Assert.assertNotNull(_driver.getJobContext(_testJobPrefix + "1"));
jbCtx0.setPartitionState(0, TaskPartitionState.ERROR);
wfCtx.setJobState(_testJobPrefix + "0", TaskState.FAILED);
taskDataCache.updateJobContext(_testJobPrefix + "0", jbCtx0);
wfCtx.getJobStates().remove(_testJobPrefix + "1");
taskDataCache.removeContext(_testJobPrefix + "1");
JobContext jbCtx2 = new JobContext(new ZNRecord(_testJobPrefix + "2"));
jbCtx2.setName(_testJobPrefix + "2");
jbCtx2.setPartitionState(1, TaskPartitionState.INIT);
wfCtx.setJobState(_testJobPrefix + "2", TaskState.IN_PROGRESS);
taskDataCache.updateJobContext(_testJobPrefix + "2", jbCtx2);
taskDataCache.updateWorkflowContext(_testWorkflow, wfCtx);
persistDataStage.process(_event);
Assert.assertEquals(_driver.getWorkflowContext(_testWorkflow), wfCtx);
Assert.assertEquals(_driver.getJobContext(_testJobPrefix + "0"), jbCtx0);
Assert.assertEquals(_driver.getJobContext(_testJobPrefix + "2"), jbCtx2);
Assert.assertNull(_driver.getJobContext(_testJobPrefix + "1"));
}
/**
* Test that if there is a job in the DAG with JobConfig gone (due to ZK delete failure), the
* async job purge will try to delete it again.
*/
@Test(dependsOnMethods = "testPersistContextData")
public void testPartialDataPurge() throws Exception {
DedupEventProcessor<String, Runnable> worker =
new DedupEventProcessor<String, Runnable>(CLUSTER_NAME,
AsyncWorkerType.TaskJobPurgeWorker.name()) {
@Override
protected void handleEvent(Runnable event) {
event.run();
}
};
worker.start();
Map<AsyncWorkerType, DedupEventProcessor<String, Runnable>> workerPool = new HashMap<>();
workerPool.put(AsyncWorkerType.TaskJobPurgeWorker, worker);
_event.addAttribute(AttributeName.AsyncFIFOWorkerPool.name(), workerPool);
// Manually delete JobConfig
deleteJobConfigs(_testWorkflow, _testJobPrefix + "0");
deleteJobConfigs(_testWorkflow, _testJobPrefix + "1");
deleteJobConfigs(_testWorkflow, _testJobPrefix + "2");
// Manually refresh because there's no controller notify data change
BaseControllerDataProvider dataProvider =
_event.getAttribute(AttributeName.ControllerDataProvider.name());
dataProvider.notifyDataChange(HelixConstants.ChangeType.RESOURCE_CONFIG);
dataProvider.refresh(_manager.getHelixDataAccessor());
// Then purge jobs
TaskGarbageCollectionStage garbageCollectionStage = new TaskGarbageCollectionStage();
garbageCollectionStage.process(_event);
// Check that contexts have been purged for the 2 jobs in both old and new ZNode paths
checkForContextRemoval(_testWorkflow, _testJobPrefix + "0");
checkForContextRemoval(_testWorkflow, _testJobPrefix + "1");
checkForContextRemoval(_testWorkflow, _testJobPrefix + "2");
}
@Test(dependsOnMethods = "testPartialDataPurge")
public void testWorkflowGarbageCollection() throws Exception {
DedupEventProcessor<String, Runnable> worker =
new DedupEventProcessor<String, Runnable>(CLUSTER_NAME,
AsyncWorkerType.TaskJobPurgeWorker.name()) {
@Override
protected void handleEvent(Runnable event) {
event.run();
}
};
worker.start();
Map<AsyncWorkerType, DedupEventProcessor<String, Runnable>> workerPool = new HashMap<>();
workerPool.put(AsyncWorkerType.TaskJobPurgeWorker, worker);
_event.addAttribute(AttributeName.AsyncFIFOWorkerPool.name(), workerPool);
String zkPath =
_manager.getHelixDataAccessor().keyBuilder().resourceConfig(_testWorkflow).getPath();
_baseAccessor.remove(zkPath, AccessOption.PERSISTENT);
// Manually refresh because there's no controller notify data change
BaseControllerDataProvider dataProvider =
_event.getAttribute(AttributeName.ControllerDataProvider.name());
dataProvider.notifyDataChange(HelixConstants.ChangeType.RESOURCE_CONFIG);
dataProvider.refresh(_manager.getHelixDataAccessor());
// Then garbage collect workflow
TaskGarbageCollectionStage garbageCollectionStage = new TaskGarbageCollectionStage();
garbageCollectionStage.process(_event);
// Check that contexts have been purged for the workflow
checkForContextRemoval(_testWorkflow);
worker.shutdown();
}
private void deleteJobConfigs(String workflowName, String jobName) {
String oldPath = _manager.getHelixDataAccessor().keyBuilder().resourceConfig(jobName).getPath();
String newPath = _manager.getHelixDataAccessor().keyBuilder()
.jobConfigZNode(workflowName, jobName).getPath();
_baseAccessor.remove(oldPath, AccessOption.PERSISTENT);
_baseAccessor.remove(newPath, AccessOption.PERSISTENT);
}
private void checkForContextRemoval(String workflow, String job) throws Exception {
// JobContexts in old ZNode path
String oldPath =
String.format("/%s/PROPERTYSTORE/TaskRebalancer/%s/Context", CLUSTER_NAME, job);
String newPath = _keyBuilder.jobContextZNode(workflow, job).getPath();
Assert.assertTrue(TestHelper.verify(
() -> !_baseAccessor.exists(oldPath, AccessOption.PERSISTENT) && !_baseAccessor
.exists(newPath, AccessOption.PERSISTENT), 120000));
}
private void checkForContextRemoval(String workflow) throws Exception {
Assert.assertTrue(TestHelper.verify(() -> !_baseAccessor
.exists(_keyBuilder.workflowContextZNode(workflow).getPath(), AccessOption.PERSISTENT),
120000));
}
}
| 9,729 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/DummyClusterManager.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Set;
import org.apache.helix.ClusterMessagingService;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerProperties;
import org.apache.helix.InstanceType;
import org.apache.helix.LiveInstanceInfoProvider;
import org.apache.helix.PreConnectCallback;
import org.apache.helix.PropertyKey;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.api.listeners.ClusterConfigChangeListener;
import org.apache.helix.api.listeners.ConfigChangeListener;
import org.apache.helix.api.listeners.ControllerChangeListener;
import org.apache.helix.api.listeners.CurrentStateChangeListener;
import org.apache.helix.api.listeners.CustomizedStateChangeListener;
import org.apache.helix.api.listeners.CustomizedStateConfigChangeListener;
import org.apache.helix.api.listeners.CustomizedStateRootChangeListener;
import org.apache.helix.api.listeners.CustomizedViewChangeListener;
import org.apache.helix.api.listeners.CustomizedViewRootChangeListener;
import org.apache.helix.api.listeners.ExternalViewChangeListener;
import org.apache.helix.api.listeners.IdealStateChangeListener;
import org.apache.helix.api.listeners.InstanceConfigChangeListener;
import org.apache.helix.api.listeners.LiveInstanceChangeListener;
import org.apache.helix.api.listeners.MessageListener;
import org.apache.helix.api.listeners.ResourceConfigChangeListener;
import org.apache.helix.api.listeners.ScopedConfigChangeListener;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.healthcheck.ParticipantHealthReportCollector;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
public class DummyClusterManager implements HelixManager {
HelixDataAccessor _accessor;
String _clusterName;
String _sessionId;
private String _instanceName;
public DummyClusterManager(String clusterName, HelixDataAccessor accessor) {
_clusterName = clusterName;
_accessor = accessor;
_sessionId = "session_" + clusterName;
_instanceName = "DummyInstance_" + clusterName;
}
public DummyClusterManager(String clusterName, HelixDataAccessor accessor, String sessionId) {
_clusterName = clusterName;
_accessor = accessor;
_sessionId = sessionId;
_instanceName = "DummyInstance_" + clusterName;
}
@Override
public void connect() throws Exception {
// TODO Auto-generated method stub
}
@Override
public boolean isConnected() {
// TODO Auto-generated method stub
return false;
}
@Override
public void disconnect() {
// TODO Auto-generated method stub
}
@Override
public void addIdealStateChangeListener(IdealStateChangeListener listener) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addIdealStateChangeListener(org.apache.helix.IdealStateChangeListener listener) throws Exception {
}
@Override
public void addLiveInstanceChangeListener(LiveInstanceChangeListener listener) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addLiveInstanceChangeListener(org.apache.helix.LiveInstanceChangeListener listener) throws Exception {
}
@Override
public void addConfigChangeListener(ConfigChangeListener listener) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addMessageListener(MessageListener listener, String instanceName) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addMessageListener(org.apache.helix.MessageListener listener, String instanceName) throws Exception {
}
@Override
public void addCurrentStateChangeListener(CurrentStateChangeListener listener,
String instanceName, String sessionId) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addCurrentStateChangeListener(org.apache.helix.CurrentStateChangeListener listener, String instanceName,
String sessionId) throws Exception {
}
@Override
public void addCustomizedStateRootChangeListener(CustomizedStateRootChangeListener listener,
String instanceName) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addCustomizedStateChangeListener(CustomizedStateChangeListener listener,
String instanceName, String customizedStateType) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addExternalViewChangeListener(ExternalViewChangeListener listener) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addCustomizedViewChangeListener(CustomizedViewChangeListener listener, String customizedStateType) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addCustomizedViewRootChangeListener(CustomizedViewRootChangeListener listener) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addTargetExternalViewChangeListener(ExternalViewChangeListener listener) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addExternalViewChangeListener(org.apache.helix.ExternalViewChangeListener listener) throws Exception {
}
@Override
public void setEnabledControlPipelineTypes(Set<Pipeline.Type> types) {
}
@Override
public boolean removeListener(PropertyKey key, Object listener) {
// TODO Auto-generated method stub
return false;
}
@Override
public String getClusterName() {
return _clusterName;
}
@Override
public String getMetadataStoreConnectionString() {
return null;
}
@Override
public String getInstanceName() {
return _instanceName;
}
@Override
public String getSessionId() {
return _sessionId;
}
@Override
public long getLastNotificationTime() {
// TODO Auto-generated method stub
return 0;
}
@Override
public void addControllerListener(ControllerChangeListener listener) {
// TODO Auto-generated method stub
}
@Override
public void addControllerListener(org.apache.helix.ControllerChangeListener listener) {
}
@Override
public HelixAdmin getClusterManagmentTool() {
// TODO Auto-generated method stub
return null;
}
@Override
public ClusterMessagingService getMessagingService() {
// TODO Auto-generated method stub
return null;
}
@Override
public InstanceType getInstanceType() {
// TODO Auto-generated method stub
return null;
}
@Override
public String getVersion() {
// TODO Auto-generated method stub
return null;
}
@Override
public StateMachineEngine getStateMachineEngine() {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean isLeader() {
// TODO Auto-generated method stub
return false;
}
@Override
public ConfigAccessor getConfigAccessor() {
// TODO Auto-generated method stub
return null;
}
@Override
public void startTimerTasks() {
// TODO Auto-generated method stub
}
@Override
public void stopTimerTasks() {
// TODO Auto-generated method stub
}
@Override
public HelixDataAccessor getHelixDataAccessor() {
return _accessor;
}
@Override
public void addPreConnectCallback(PreConnectCallback callback) {
// TODO Auto-generated method stub
}
@Override
public ZkHelixPropertyStore<ZNRecord> getHelixPropertyStore() {
// TODO Auto-generated method stub
return null;
}
@Override
public void addInstanceConfigChangeListener(InstanceConfigChangeListener listener)
throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addInstanceConfigChangeListener(org.apache.helix.InstanceConfigChangeListener listener) throws Exception {
}
@Override
public void addResourceConfigChangeListener(ResourceConfigChangeListener listener)
throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addCustomizedStateConfigChangeListener(CustomizedStateConfigChangeListener listener)
throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addClusterfigChangeListener(ClusterConfigChangeListener listener)
throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addConfigChangeListener(ScopedConfigChangeListener listener, ConfigScopeProperty scope)
throws Exception {
// TODO Auto-generated method stub
}
@Override
public void addConfigChangeListener(org.apache.helix.ScopedConfigChangeListener listener, ConfigScopeProperty scope)
throws Exception {
}
@Override
public void setLiveInstanceInfoProvider(LiveInstanceInfoProvider liveInstanceInfoProvider) {
// TODO Auto-generated method stub
}
@Override
public HelixManagerProperties getProperties() {
// TODO Auto-generated method stub
return null;
}
@Override
public void addControllerMessageListener(MessageListener listener) {
// TODO Auto-generated method stub
}
@Override
public void addControllerMessageListener(org.apache.helix.MessageListener listener) {
}
@Override
public ParticipantHealthReportCollector getHealthReportCollector() {
// TODO Auto-generated method stub
return null;
}
@Override
public Long getSessionStartTime() {
return 0L;
}
protected void setSessionId(String sessionId) {
_sessionId = sessionId;
}
}
| 9,730 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestMsgSelectionStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.helix.TestHelper;
import org.apache.helix.controller.stages.MessageSelectionStage.Bounds;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageState;
import org.apache.helix.model.Message.MessageType;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestMsgSelectionStage {
private Message newMessage(String resourceName, String partitionName, String instanceName,
String fromState, String toState) {
String uuid = UUID.randomUUID().toString();
Message message = new Message(MessageType.STATE_TRANSITION, uuid);
message.setSrcName("controller");
message.setTgtName(instanceName);
message.setMsgState(MessageState.NEW);
message.setResourceName(resourceName);
message.setPartitionName(partitionName);
message.setFromState(fromState);
message.setToState(toState);
message.setTgtSessionId("sessionId");
message.setSrcSessionId("sessionId");
message.setStateModelDef("MasterSlave");
message.setStateModelFactoryName("DEFAULT");
message.setBucketSize(0);
return message;
}
@Test
public void testMasterXfer() {
System.out.println("START testMasterXfer at " + new Date(System.currentTimeMillis()));
Map<String, LiveInstance> liveInstances = new HashMap<String, LiveInstance>();
liveInstances.put("localhost_0", new LiveInstance("localhost_0"));
liveInstances.put("localhost_1", new LiveInstance("localhost_1"));
Map<String, String> currentStates = new HashMap<String, String>();
currentStates.put("localhost_0", "SLAVE");
currentStates.put("localhost_1", "MASTER");
Map<String, Message> pendingMessages = new HashMap<String, Message>();
List<Message> messages = new ArrayList<Message>();
messages.add(TestHelper.createMessage("msgId_0", "SLAVE", "MASTER", "localhost_0", "TestDB",
"TestDB_0"));
messages.add(TestHelper.createMessage("msgId_1", "MASTER", "SLAVE", "localhost_1", "TestDB",
"TestDB_0"));
Map<String, Bounds> stateConstraints = new HashMap<String, Bounds>();
stateConstraints.put("MASTER", new Bounds(0, 1));
stateConstraints.put("SLAVE", new Bounds(0, 2));
Map<String, Integer> stateTransitionPriorities = new HashMap<String, Integer>();
stateTransitionPriorities.put("MASTER-SLAVE", 0);
stateTransitionPriorities.put("SLAVE-MASTER", 1);
List<Message> selectedMsg =
new MessageSelectionStage().selectMessages(liveInstances, currentStates, pendingMessages,
messages, Collections.<Message>emptyList(), stateConstraints, stateTransitionPriorities,
BuiltInStateModelDefinitions.MasterSlave.getStateModelDefinition(), false);
Assert.assertEquals(selectedMsg.size(), 1);
Assert.assertEquals(selectedMsg.get(0).getMsgId(), "msgId_1");
System.out.println("END testMasterXfer at " + new Date(System.currentTimeMillis()));
}
@Test
public void testMasterXferAfterMasterResume() {
System.out.println("START testMasterXferAfterMasterResume at "
+ new Date(System.currentTimeMillis()));
Map<String, LiveInstance> liveInstances = new HashMap<String, LiveInstance>();
liveInstances.put("localhost_0", new LiveInstance("localhost_0"));
liveInstances.put("localhost_1", new LiveInstance("localhost_1"));
Map<String, String> currentStates = new HashMap<String, String>();
currentStates.put("localhost_0", "SLAVE");
currentStates.put("localhost_1", "SLAVE");
Map<String, Message> pendingMessages = new HashMap<String, Message>();
pendingMessages.put("localhost_1", newMessage("TestDB", "TestDB_0", "localhost_1", "SLAVE", "MASTER"));
List<Message> messages = new ArrayList<Message>();
messages.add(TestHelper.createMessage("msgId_0", "SLAVE", "MASTER", "localhost_0", "TestDB",
"TestDB_0"));
Map<String, Bounds> stateConstraints = new HashMap<String, Bounds>();
stateConstraints.put("MASTER", new Bounds(0, 1));
stateConstraints.put("SLAVE", new Bounds(0, 2));
Map<String, Integer> stateTransitionPriorities = new HashMap<String, Integer>();
stateTransitionPriorities.put("MASTER-SLAVE", 0);
stateTransitionPriorities.put("SLAVE-MASTER", 1);
List<Message> selectedMsg =
new MessageSelectionStage().selectMessages(liveInstances, currentStates, pendingMessages,
messages, Collections.<Message>emptyList(), stateConstraints, stateTransitionPriorities,
BuiltInStateModelDefinitions.MasterSlave.getStateModelDefinition(), false);
Assert.assertEquals(selectedMsg.size(), 0);
System.out.println("END testMasterXferAfterMasterResume at "
+ new Date(System.currentTimeMillis()));
}
}
| 9,731 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestManagementModeStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import org.apache.helix.HelixConstants;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.api.status.ClusterManagementMode;
import org.apache.helix.api.status.ClusterManagementModeRequest;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.controller.dataproviders.ManagementControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ClusterStatus;
import org.apache.helix.model.ControllerHistory;
import org.apache.helix.model.LiveInstance;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestManagementModeStage extends ZkTestBase {
HelixManager _manager;
HelixDataAccessor _accessor;
String _clusterName;
@BeforeClass
public void beforeClass() {
_clusterName = "CLUSTER_" + TestHelper.getTestClassName();
_accessor = new ZKHelixDataAccessor(_clusterName, new ZkBaseDataAccessor<>(_gZkClient));
_manager = new DummyClusterManager(_clusterName, _accessor);
}
@AfterClass
public void afterClass() {
deleteLiveInstances(_clusterName);
deleteCluster(_clusterName);
}
@Test
public void testClusterFreezeStatus() throws Exception {
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(_clusterName, new int[]{0, 1}, new String[]{"TestDB"}, 1, 2);
List<LiveInstance> liveInstances = setupLiveInstances(_clusterName, new int[]{0, 1});
setupStateModel(_clusterName);
ClusterEvent event = new ClusterEvent(_clusterName, ClusterEventType.Unknown);
ManagementControllerDataProvider cache = new ManagementControllerDataProvider(_clusterName,
Pipeline.Type.MANAGEMENT_MODE.name());
event.addAttribute(AttributeName.helixmanager.name(), _manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
// Freeze cluster
ClusterManagementModeRequest request = ClusterManagementModeRequest.newBuilder()
.withClusterName(_clusterName)
.withMode(ClusterManagementMode.Type.CLUSTER_FREEZE)
.withReason("test")
.build();
_gSetupTool.getClusterManagementTool().setClusterManagementMode(request);
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
dataRefresh.addStage(new ResourceComputationStage());
dataRefresh.addStage(new CurrentStateComputationStage());
runPipeline(event, dataRefresh, false);
ManagementModeStage managementModeStage = new ManagementModeStage();
managementModeStage.process(event);
// In frozen mode
ClusterStatus clusterStatus = _accessor.getProperty(_accessor.keyBuilder().clusterStatus());
Assert.assertEquals(clusterStatus.getManagementMode(), ClusterManagementMode.Type.CLUSTER_FREEZE);
ControllerHistory history =
_accessor.getProperty(_accessor.keyBuilder().controllerLeaderHistory());
Assert.assertNull(history);
// Mark both live instances to be frozen, then entering freeze mode is complete
for (int i = 0; i < 2; i++) {
LiveInstance liveInstance = liveInstances.get(i);
liveInstance.setStatus(LiveInstance.LiveInstanceStatus.FROZEN);
PropertyKey liveInstanceKey =
_accessor.keyBuilder().liveInstance(liveInstance.getInstanceName());
_accessor.updateProperty(liveInstanceKey, liveInstance);
}
// Require cache refresh
cache.notifyDataChange(HelixConstants.ChangeType.LIVE_INSTANCE);
runPipeline(event, dataRefresh, false);
managementModeStage.process(event);
// Freeze mode is complete
clusterStatus = _accessor.getProperty(_accessor.keyBuilder().clusterStatus());
Assert.assertEquals(clusterStatus.getManagementMode(), ClusterManagementMode.Type.CLUSTER_FREEZE);
Assert.assertEquals(clusterStatus.getManagementModeStatus(),
ClusterManagementMode.Status.COMPLETED);
// Management history is recorded
history = _accessor.getProperty(_accessor.keyBuilder().controllerLeaderHistory());
Assert.assertEquals(history.getManagementModeHistory().size(), 1);
String lastHistory = history.getManagementModeHistory().get(0);
Assert.assertTrue(lastHistory.contains("MODE=" + ClusterManagementMode.Type.CLUSTER_FREEZE));
Assert.assertTrue(lastHistory.contains("STATUS=" + ClusterManagementMode.Status.COMPLETED));
// No duplicate management mode history entries
managementModeStage.process(event);
history = _accessor.getProperty(_accessor.keyBuilder().controllerLeaderHistory());
Assert.assertEquals(history.getManagementModeHistory().size(), 1);
// Unfreeze cluster
request = ClusterManagementModeRequest.newBuilder()
.withClusterName(_clusterName)
.withMode(ClusterManagementMode.Type.NORMAL)
.withReason("test")
.build();
_gSetupTool.getClusterManagementTool().setClusterManagementMode(request);
runPipeline(event, dataRefresh, false);
managementModeStage.process(event);
clusterStatus = _accessor.getProperty(_accessor.keyBuilder().clusterStatus());
Assert.assertEquals(clusterStatus.getManagementMode(), ClusterManagementMode.Type.NORMAL);
// In progress because a live instance is still frozen
Assert.assertEquals(clusterStatus.getManagementModeStatus(),
ClusterManagementMode.Status.IN_PROGRESS);
// remove froze status to mark the live instances to be normal status
for (int i = 0; i < 2; i++) {
LiveInstance liveInstance = liveInstances.get(i);
PropertyKey liveInstanceKey =
_accessor.keyBuilder().liveInstance(liveInstance.getInstanceName());
liveInstance.getRecord().getSimpleFields()
.remove(LiveInstance.LiveInstanceProperty.STATUS.name());
_accessor.setProperty(liveInstanceKey, liveInstance);
}
// Require cache refresh
cache.notifyDataChange(HelixConstants.ChangeType.LIVE_INSTANCE);
runPipeline(event, dataRefresh, false);
try {
managementModeStage.process(event);
} catch (HelixException expected) {
// It's expected because controller does not set for cluster.
Assert.assertTrue(expected.getMessage()
.startsWith("Failed to switch management mode pipeline, enabled=false"));
}
clusterStatus = _accessor.getProperty(_accessor.keyBuilder().clusterStatus());
// Fully existed frozen mode
Assert.assertEquals(clusterStatus.getManagementMode(), ClusterManagementMode.Type.NORMAL);
Assert.assertEquals(clusterStatus.getManagementModeStatus(),
ClusterManagementMode.Status.COMPLETED);
}
}
| 9,732 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestClusterEventBlockingQueue.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.helix.common.ClusterEventBlockingQueue;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Test {@link ClusterEventBlockingQueue} to ensure that it coalesces events while keeping then in
* FIFO order.
*/
public class TestClusterEventBlockingQueue {
@Test
public void testEventQueue() throws Exception {
// initialize the queue
ClusterEventBlockingQueue queue = new ClusterEventBlockingQueue();
// add an event
ClusterEvent event1 = new ClusterEvent(ClusterEventType.IdealStateChange);
queue.put(event1);
Assert.assertEquals(queue.size(), 1);
// add an event with a different name
ClusterEvent event2 = new ClusterEvent(ClusterEventType.ConfigChange);
queue.put(event2);
Assert.assertEquals(queue.size(), 2);
// add an event with the same type as event1 (should not change queue size)
ClusterEvent newEvent1 = new ClusterEvent(ClusterEventType.IdealStateChange);
newEvent1.addAttribute("attr", 1);
queue.put(newEvent1);
Assert.assertEquals(queue.size(), 2);
// test peek
ClusterEvent peeked = queue.peek();
Assert.assertEquals(peeked.getEventType(), ClusterEventType.IdealStateChange);
Assert.assertEquals((int) peeked.getAttribute("attr"), 1);
Assert.assertEquals(queue.size(), 2);
// test take the head
ListeningExecutorService service =
MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
ClusterEvent takenEvent1 = safeTake(queue, service);
Assert.assertEquals(takenEvent1.getEventType(), ClusterEventType.IdealStateChange);
Assert.assertEquals((int) takenEvent1.getAttribute("attr"), 1);
Assert.assertEquals(queue.size(), 1);
// test take the tail
ClusterEvent takenEvent2 = safeTake(queue, service);
Assert.assertEquals(takenEvent2.getEventType(), ClusterEventType.ConfigChange);
Assert.assertEquals(queue.size(), 0);
}
private ClusterEvent safeTake(final ClusterEventBlockingQueue queue,
final ListeningExecutorService service) throws InterruptedException, ExecutionException,
TimeoutException {
// the take() in ClusterEventBlockingQueue will wait indefinitely
// for this test, stop waiting after 30 seconds
ListenableFuture<ClusterEvent> future = service.submit(new Callable<ClusterEvent>() {
@Override
public ClusterEvent call() throws InterruptedException {
return queue.take();
}
});
ClusterEvent event = future.get(30, TimeUnit.SECONDS);
return event;
}
}
| 9,733 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/BaseStageTest.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.controller.pipeline.AbstractAsyncBaseStage;
import org.apache.helix.controller.pipeline.Stage;
import org.apache.helix.controller.pipeline.StageContext;
import org.apache.helix.mock.MockHelixAdmin;
import org.apache.helix.mock.MockManager;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Resource;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.ITestContext;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
public class BaseStageTest {
public final static String HOSTNAME_PREFIX = "localhost_";
public final static String SESSION_PREFIX = "session_";
private final static int MIN_ACTIVE_REPLICA_NOT_SET = -1;
protected String _clusterName;
protected HelixManager manager;
protected HelixDataAccessor accessor;
protected ClusterEvent event;
protected HelixAdmin admin;
@BeforeClass()
public void beforeClass() {
String className = this.getClass().getName();
System.out.println("START " + className.substring(className.lastIndexOf('.') + 1) + " at "
+ new Date(System.currentTimeMillis()));
}
@AfterClass()
public void afterClass() {
String className = this.getClass().getName();
System.out.println("END " + className.substring(className.lastIndexOf('.') + 1) + " at "
+ new Date(System.currentTimeMillis()));
}
public void setup() {
_clusterName = "testCluster-" + UUID.randomUUID().toString();
manager = new MockManager(_clusterName);
accessor = manager.getHelixDataAccessor();
ClusterConfig clusterConfig = new ClusterConfig(_clusterName);
setClusterConfig(clusterConfig);
admin = new MockHelixAdmin(manager);
event = new ClusterEvent(ClusterEventType.Unknown);
admin.addCluster(_clusterName);
}
@BeforeMethod
public void beforeTest(Method testMethod, ITestContext testContext){
long startTime = System.currentTimeMillis();
System.out.println("START " + testMethod.getName() + " at " + new Date(startTime));
testContext.setAttribute("StartTime", System.currentTimeMillis());
setup();
}
@AfterMethod
public void endTest(Method testMethod, ITestContext testContext) {
Long startTime = (Long) testContext.getAttribute("StartTime");
long endTime = System.currentTimeMillis();
System.out.println("END " + testMethod.getName() + " at " + new Date(endTime) + ", took: " + (endTime - startTime) + "ms.");
}
protected List<IdealState> setupIdealState(int nodes, String[] resources, int partitions,
int replicas, RebalanceMode rebalanceMode, String stateModelName, String rebalanceClassName,
String rebalanceStrategyName, int minActiveReplica) {
List<IdealState> idealStates = new ArrayList<IdealState>();
for (String resourceName : resources) {
ZNRecord record = new ZNRecord(resourceName);
for (int p = 0; p < partitions; p++) {
List<String> value = new ArrayList<String>();
for (int r = 0; r < replicas; r++) {
value.add(HOSTNAME_PREFIX + (p + r + 1) % nodes);
}
record.setListField(resourceName + "_" + p, value);
}
IdealState idealState = new IdealState(record);
idealState.setStateModelDefRef(stateModelName);
if (rebalanceClassName != null) {
idealState.setRebalancerClassName(rebalanceClassName);
}
if (rebalanceStrategyName != null) {
idealState.setRebalanceStrategy(rebalanceStrategyName);
}
idealState.setRebalanceMode(rebalanceMode);
idealState.setNumPartitions(partitions);
idealStates.add(idealState);
idealState.setReplicas(String.valueOf(replicas));
if (minActiveReplica > 0) {
idealState.setMinActiveReplicas(minActiveReplica);
}
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.idealStates(resourceName), idealState);
}
return idealStates;
}
protected List<IdealState> setupIdealState(int nodes, String[] resources, int partitions,
int replicas, RebalanceMode rebalanceMode) {
return setupIdealState(nodes, resources, partitions, replicas, rebalanceMode,
BuiltInStateModelDefinitions.MasterSlave.name(), null, null, MIN_ACTIVE_REPLICA_NOT_SET);
}
protected List<IdealState> setupIdealState(int nodes, String[] resources, int partitions,
int replicas, RebalanceMode rebalanceMode, String stateModelName) {
return setupIdealState(nodes, resources, partitions, replicas, rebalanceMode, stateModelName,
null, null, MIN_ACTIVE_REPLICA_NOT_SET);
}
protected List<IdealState> setupIdealState(int nodes, String[] resources, int partitions,
int replicas, RebalanceMode rebalanceMode, String stateModelName, String rebalanceClassName) {
return setupIdealState(nodes, resources, partitions, replicas, rebalanceMode, stateModelName,
rebalanceClassName, null, MIN_ACTIVE_REPLICA_NOT_SET);
}
protected List<String> setupLiveInstances(int numLiveInstances) {
List<String> instances = new ArrayList<>();
for (int i = 0; i < numLiveInstances; i++) {
LiveInstance liveInstance = new LiveInstance(HOSTNAME_PREFIX + i);
liveInstance.setSessionId(SESSION_PREFIX + i);
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.liveInstance(HOSTNAME_PREFIX + i), liveInstance);
instances.add(liveInstance.getInstanceName());
}
return instances;
}
protected void setupInstances(int numInstances) {
// setup liveInstances
for (int i = 0; i < numInstances; i++) {
String instance = HOSTNAME_PREFIX + i;
InstanceConfig config = new InstanceConfig(instance);
config.setHostName(instance);
config.setPort("12134");
admin.addInstance(manager.getClusterName(), config);
}
}
protected void setupCurrentStates(Map<String, CurrentState> currentStates) {
Builder keyBuilder = accessor.keyBuilder();
for (String instanceName : currentStates.keySet()) {
accessor.setProperty(keyBuilder
.currentState(instanceName, currentStates.get(instanceName).getSessionId(),
currentStates.get(instanceName).getResourceName()), currentStates.get(instanceName));
}
}
protected void setClusterConfig(ClusterConfig clusterConfig) {
accessor.setProperty(accessor.keyBuilder().clusterConfig(), clusterConfig);
}
protected void setSingleIdealState(IdealState idealState) {
accessor
.setProperty(accessor.keyBuilder().idealStates(idealState.getResourceName()), idealState);
}
protected void runStage(ClusterEvent event, Stage stage) {
event.addAttribute(AttributeName.helixmanager.name(), manager);
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
try {
if (stage instanceof AbstractAsyncBaseStage) {
((AbstractAsyncBaseStage) stage).execute(event);
} else {
stage.process(event);
}
} catch (Exception e) {
e.printStackTrace();
}
stage.postProcess();
}
protected void setupStateModel() {
Builder keyBuilder = accessor.keyBuilder();
ZNRecord masterSlave = new StateModelConfigGenerator().generateConfigForMasterSlave();
accessor.setProperty(keyBuilder.stateModelDef(masterSlave.getId()),
new StateModelDefinition(masterSlave));
ZNRecord leaderStandby = new StateModelConfigGenerator().generateConfigForLeaderStandby();
accessor.setProperty(keyBuilder.stateModelDef(leaderStandby.getId()),
new StateModelDefinition(leaderStandby));
ZNRecord onlineOffline = new StateModelConfigGenerator().generateConfigForOnlineOffline();
accessor.setProperty(keyBuilder.stateModelDef(onlineOffline.getId()),
new StateModelDefinition(onlineOffline));
}
protected Map<String, Resource> getResourceMap() {
Map<String, Resource> resourceMap = new HashMap<String, Resource>();
Resource testResource = new Resource("testResourceName");
testResource.setStateModelDefRef("MasterSlave");
testResource.addPartition("testResourceName_0");
testResource.addPartition("testResourceName_1");
testResource.addPartition("testResourceName_2");
testResource.addPartition("testResourceName_3");
testResource.addPartition("testResourceName_4");
resourceMap.put("testResourceName", testResource);
return resourceMap;
}
protected Map<String, Resource> getResourceMap(String[] resources, int partitions,
String stateModel) {
Map<String, Resource> resourceMap = new HashMap<String, Resource>();
for (String r : resources) {
Resource testResource = new Resource(r);
testResource.setStateModelDefRef(stateModel);
for (int i = 0; i < partitions; i++) {
testResource.addPartition(r + "_" + i);
}
resourceMap.put(r, testResource);
}
return resourceMap;
}
protected Map<String, Resource> getResourceMap(String[] resources, int partitions,
String stateModel, ClusterConfig clusterConfig, ResourceConfig resourceConfig) {
Map<String, Resource> resourceMap = new HashMap<String, Resource>();
for (String r : resources) {
Resource testResource = new Resource(r, clusterConfig, resourceConfig);
testResource.setStateModelDefRef(stateModel);
for (int i = 0; i < partitions; i++) {
testResource.addPartition(r + "_" + i);
}
resourceMap.put(r, testResource);
}
return resourceMap;
}
protected Message generateMessage(String fromState, String toState, String tgtName) {
Message message = new Message(new ZNRecord(UUID.randomUUID().toString()));
message.setTgtName(tgtName);
message.setFromState(fromState);
message.setToState(toState);
return message;
}
}
| 9,734 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestStateTransitionThrottleController.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import org.apache.helix.api.config.StateTransitionThrottleConfig;
import org.apache.helix.model.ClusterConfig;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import static org.apache.helix.api.config.StateTransitionThrottleConfig.RebalanceType.ANY;
import static org.apache.helix.api.config.StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE;
import static org.apache.helix.api.config.StateTransitionThrottleConfig.RebalanceType.RECOVERY_BALANCE;
public class TestStateTransitionThrottleController {
private static final String INSTANCE = "instance0";
private static final String RESOURCE = "db0";
private static final List<StateTransitionThrottleConfig.RebalanceType> VALID_REBALANCE_TYPES =
ImmutableList.of(LOAD_BALANCE, RECOVERY_BALANCE, ANY);
@Test(description = "When cluster level ANY throttle config is set")
public void testChargeClusterWhenANYClusterLevelThrottleConfig() {
int maxNumberOfST = 1;
ClusterConfig clusterConfig = new ClusterConfig("config");
clusterConfig
.setStateTransitionThrottleConfigs(ImmutableList.of(new StateTransitionThrottleConfig(ANY,
StateTransitionThrottleConfig.ThrottleScope.CLUSTER, maxNumberOfST)));
StateTransitionThrottleControllerAccessor controller =
new StateTransitionThrottleControllerAccessor(RESOURCE, INSTANCE, clusterConfig);
Assert.assertTrue(controller.isThrottleEnabled());
for (StateTransitionThrottleConfig.RebalanceType rebalanceType : VALID_REBALANCE_TYPES) {
controller.chargeCluster(rebalanceType);
for (StateTransitionThrottleConfig.RebalanceType type : VALID_REBALANCE_TYPES) {
Assert.assertTrue(controller.shouldThrottleForCluster(type));
Assert.assertTrue(controller.shouldThrottleForInstance(type, INSTANCE));
Assert.assertTrue(controller.shouldThrottleForInstance(type, RESOURCE));
}
// reset controller
controller = new StateTransitionThrottleControllerAccessor(RESOURCE, INSTANCE, clusterConfig);
}
}
@Test(description = "When cluster throttle is config of LOAD_BALANCE/RECOVERY_BALANCE, no ANY type")
public void testChargeCluster_OnlySetClusterSpecificType() {
int maxNumberOfST = 1;
ClusterConfig clusterConfig = new ClusterConfig("config");
clusterConfig.setStateTransitionThrottleConfigs(ImmutableList.of(
new StateTransitionThrottleConfig(RECOVERY_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.CLUSTER, maxNumberOfST),
new StateTransitionThrottleConfig(LOAD_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.CLUSTER, maxNumberOfST)));
StateTransitionThrottleControllerAccessor controller =
new StateTransitionThrottleControllerAccessor(RESOURCE, INSTANCE, clusterConfig);
Assert.assertTrue(controller.isThrottleEnabled());
controller.chargeCluster(ANY);
Assert.assertEquals(controller.getClusterLevelQuota(RECOVERY_BALANCE), 1);
Assert.assertEquals(controller.getClusterLevelQuota(LOAD_BALANCE), 1);
Assert.assertEquals(controller.getClusterLevelQuota(ANY), 0);
VALID_REBALANCE_TYPES.forEach(controller::chargeCluster);
for (StateTransitionThrottleConfig.RebalanceType rebalanceType : ImmutableList.of(LOAD_BALANCE,
RECOVERY_BALANCE)) {
Assert.assertTrue(controller.shouldThrottleForCluster(rebalanceType));
Assert.assertTrue(controller.shouldThrottleForInstance(rebalanceType, INSTANCE));
Assert.assertTrue(controller.shouldThrottleForResource(rebalanceType, RESOURCE));
}
}
@DataProvider
public static Object[][] mixedConfigurations() {
// TODO: add more mixed configuration setting when refactoring the controller logic
return new Object[][] {
{
10, 9, 8, 7, 6, 5, 4, 3, 2
}
};
}
@Test(dataProvider = "mixedConfigurations")
public void testChargeClusterWithMixedThrottleConfig(int anyClusterLevelQuota,
int loadClusterLevelQuota, int recoveryClusterLevelQuota, int anyInstanceLevelQuota,
int loadInstanceLevelQuota, int recoveryInstanceLevelQuota, int anyResourceLevelQuota,
int loadResourceLevelQuota, int recoveryResourceLevelQuota) {
List<StateTransitionThrottleConfig> stateTransitionThrottleConfigs = Arrays.asList(
new StateTransitionThrottleConfig(ANY, StateTransitionThrottleConfig.ThrottleScope.CLUSTER,
anyClusterLevelQuota),
new StateTransitionThrottleConfig(RECOVERY_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.CLUSTER, recoveryClusterLevelQuota),
new StateTransitionThrottleConfig(LOAD_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.CLUSTER, loadClusterLevelQuota),
new StateTransitionThrottleConfig(ANY, StateTransitionThrottleConfig.ThrottleScope.INSTANCE,
anyInstanceLevelQuota),
new StateTransitionThrottleConfig(RECOVERY_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.INSTANCE, recoveryInstanceLevelQuota),
new StateTransitionThrottleConfig(LOAD_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.INSTANCE, loadInstanceLevelQuota),
new StateTransitionThrottleConfig(ANY, StateTransitionThrottleConfig.ThrottleScope.RESOURCE,
anyResourceLevelQuota),
new StateTransitionThrottleConfig(RECOVERY_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.RESOURCE, recoveryResourceLevelQuota),
new StateTransitionThrottleConfig(LOAD_BALANCE,
StateTransitionThrottleConfig.ThrottleScope.RESOURCE, loadResourceLevelQuota));
ClusterConfig clusterConfig = new ClusterConfig("config");
clusterConfig.setStateTransitionThrottleConfigs(stateTransitionThrottleConfigs);
StateTransitionThrottleControllerAccessor controller =
new StateTransitionThrottleControllerAccessor(RESOURCE, INSTANCE, clusterConfig);
Assert.assertTrue(controller.isThrottleEnabled());
// verify behavior after charging cluster
controller.chargeCluster(ANY);
Assert.assertEquals(controller.getClusterLevelQuota(ANY), anyClusterLevelQuota - 1);
controller.chargeCluster(RECOVERY_BALANCE);
Assert.assertEquals(controller.getClusterLevelQuota(RECOVERY_BALANCE),
recoveryClusterLevelQuota - 1);
controller.chargeCluster(LOAD_BALANCE);
Assert.assertEquals(controller.getClusterLevelQuota(LOAD_BALANCE), loadClusterLevelQuota - 1);
// verify behavior after charging instance
controller.chargeInstance(ANY, INSTANCE);
Assert.assertEquals(controller.getInstanceLevelQuota(ANY, INSTANCE), anyInstanceLevelQuota - 1);
controller.chargeInstance(RECOVERY_BALANCE, INSTANCE);
Assert.assertEquals(controller.getInstanceLevelQuota(RECOVERY_BALANCE, INSTANCE),
recoveryInstanceLevelQuota - 1);
controller.chargeInstance(LOAD_BALANCE, INSTANCE);
Assert.assertEquals(controller.getInstanceLevelQuota(LOAD_BALANCE, INSTANCE),
loadInstanceLevelQuota - 1);
// verify behavior after charging resource
controller.chargeResource(ANY, RESOURCE);
Assert.assertEquals(controller.getResourceLevelQuota(ANY, RESOURCE), anyResourceLevelQuota - 1);
controller.chargeResource(RECOVERY_BALANCE, RESOURCE);
Assert.assertEquals(controller.getResourceLevelQuota(RECOVERY_BALANCE, RESOURCE),
recoveryResourceLevelQuota - 1);
controller.chargeResource(LOAD_BALANCE, RESOURCE);
Assert.assertEquals(controller.getResourceLevelQuota(LOAD_BALANCE, RESOURCE),
loadResourceLevelQuota - 1);
}
// The inner class just to fetch the protected fields of {@link StateTransitionThrottleController}
private static class StateTransitionThrottleControllerAccessor
extends StateTransitionThrottleController {
StateTransitionThrottleControllerAccessor(String resource, String liveInstance,
ClusterConfig clusterConfig) {
super(ImmutableSet.of(resource), clusterConfig, ImmutableSet.of(liveInstance));
}
long getClusterLevelQuota(StateTransitionThrottleConfig.RebalanceType rebalanceType) {
return _pendingTransitionAllowedInCluster.getOrDefault(rebalanceType, 0L);
}
long getResourceLevelQuota(StateTransitionThrottleConfig.RebalanceType rebalanceType,
String resource) {
return _pendingTransitionAllowedPerResource.getOrDefault(resource, Collections.emptyMap())
.getOrDefault(rebalanceType, 0L);
}
long getInstanceLevelQuota(StateTransitionThrottleConfig.RebalanceType rebalanceType,
String instance) {
return _pendingTransitionAllowedPerInstance.getOrDefault(instance, Collections.emptyMap())
.getOrDefault(rebalanceType, 0L);
}
}
}
| 9,735 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestCustomizedStateComputationStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.helix.PropertyKey;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.CustomizedState;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestCustomizedStateComputationStage extends BaseStageTest {
private final String RESOURCE_NAME = "testResourceName";
private final String PARTITION_NAME = "testResourceName_0";
private final String CUSTOMIZED_STATE_NAME = "customizedState1";
private final String INSTANCE_NAME = "localhost_1";
@Test
public void testEmptyCustomizedState() {
Map<String, Resource> resourceMap = getResourceMap();
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider(_clusterName));
CustomizedStateComputationStage stage = new CustomizedStateComputationStage();
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
CustomizedStateOutput output = event.getAttribute(AttributeName.CUSTOMIZED_STATE.name());
AssertJUnit.assertEquals(output
.getPartitionCustomizedStateMap(CUSTOMIZED_STATE_NAME, RESOURCE_NAME,
new Partition("testResourceName_0")).size(), 0);
}
@Test
public void testSimpleCustomizedState() {
// setup resource
Map<String, Resource> resourceMap = getResourceMap();
setupLiveInstances(5);
// Add CustomizedStateAggregation Config
CustomizedStateConfig config = new CustomizedStateConfig();
List<String> aggregationEnabledTypes = new ArrayList<>();
aggregationEnabledTypes.add(CUSTOMIZED_STATE_NAME);
config.setAggregationEnabledTypes(aggregationEnabledTypes);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.customizedStateConfig(), config);
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider(_clusterName));
CustomizedStateComputationStage stage = new CustomizedStateComputationStage();
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
CustomizedStateOutput output1 = event.getAttribute(AttributeName.CUSTOMIZED_STATE.name());
AssertJUnit.assertEquals(output1
.getPartitionCustomizedStateMap(CUSTOMIZED_STATE_NAME, RESOURCE_NAME,
new Partition(PARTITION_NAME)).size(), 0);
// Add a customized state
CustomizedState customizedState = new CustomizedState(RESOURCE_NAME);
customizedState.setState(PARTITION_NAME, "STARTED");
accessor.setProperty(
keyBuilder.customizedState(INSTANCE_NAME, CUSTOMIZED_STATE_NAME, RESOURCE_NAME),
customizedState);
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
CustomizedStateOutput output2 = event.getAttribute(AttributeName.CUSTOMIZED_STATE.name());
Partition partition = new Partition(PARTITION_NAME);
AssertJUnit.assertEquals(output2
.getPartitionCustomizedState(CUSTOMIZED_STATE_NAME, RESOURCE_NAME, partition,
INSTANCE_NAME), "STARTED");
}
}
| 9,736 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.StageContext;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Resource;
import org.apache.helix.tools.DefaultIdealStateCalculator;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestResourceComputationStage extends BaseStageTest {
/**
* Case where we have one resource in IdealState
* @throws Exception
*/
@Test
public void testSimple() throws Exception {
int nodes = 5;
List<String> instances = new ArrayList<String>();
for (int i = 0; i < nodes; i++) {
instances.add("localhost_" + i);
}
int partitions = 10;
int replicas = 1;
String resourceName = "testResource";
ZNRecord record =
DefaultIdealStateCalculator.calculateIdealState(instances, partitions, replicas,
resourceName, "MASTER", "SLAVE");
IdealState idealState = new IdealState(record);
idealState.setStateModelDefRef("MasterSlave");
HelixDataAccessor accessor = manager.getHelixDataAccessor();
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.idealStates(resourceName), idealState);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
ResourceComputationStage stage = new ResourceComputationStage();
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
Map<String, Resource> resource = event.getAttribute(AttributeName.RESOURCES_TO_REBALANCE.name());
AssertJUnit.assertEquals(1, resource.size());
AssertJUnit.assertEquals(resource.keySet().iterator().next(), resourceName);
AssertJUnit.assertEquals(resource.values().iterator().next().getResourceName(), resourceName);
AssertJUnit.assertEquals(resource.values().iterator().next().getStateModelDefRef(),
idealState.getStateModelDefRef());
AssertJUnit
.assertEquals(resource.values().iterator().next().getPartitions().size(), partitions);
}
@Test
public void testMultipleResources() throws Exception {
// List<IdealState> idealStates = new ArrayList<IdealState>();
String[] resources = new String[] {
"testResource1", "testResource2"
};
List<IdealState> idealStates = setupIdealState(5, resources, 10, 1, RebalanceMode.SEMI_AUTO);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
ResourceComputationStage stage = new ResourceComputationStage();
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES_TO_REBALANCE.name());
AssertJUnit.assertEquals(resources.length, resourceMap.size());
for (int i = 0; i < resources.length; i++) {
String resourceName = resources[i];
IdealState idealState = idealStates.get(i);
AssertJUnit.assertTrue(resourceMap.containsKey(resourceName));
AssertJUnit.assertEquals(resourceMap.get(resourceName).getResourceName(), resourceName);
AssertJUnit.assertEquals(resourceMap.get(resourceName).getStateModelDefRef(),
idealState.getStateModelDefRef());
AssertJUnit.assertEquals(resourceMap.get(resourceName).getPartitions().size(),
idealState.getNumPartitions());
}
}
@Test
public void testMultipleResourcesWithSomeDropped() throws Exception {
int nodes = 5;
List<String> instances = new ArrayList<String>();
for (int i = 0; i < nodes; i++) {
instances.add("localhost_" + i);
}
String[] resources = new String[] {
"testResource1", "testResource2"
};
List<IdealState> idealStates = new ArrayList<IdealState>();
for (int i = 0; i < resources.length; i++) {
int partitions = 10;
int replicas = 1;
String resourceName = resources[i];
ZNRecord record =
DefaultIdealStateCalculator.calculateIdealState(instances, partitions, replicas,
resourceName, "MASTER", "SLAVE");
IdealState idealState = new IdealState(record);
idealState.setStateModelDefRef("MasterSlave");
HelixDataAccessor accessor = manager.getHelixDataAccessor();
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.idealStates(resourceName), idealState);
idealStates.add(idealState);
}
// ADD A LIVE INSTANCE WITH A CURRENT STATE THAT CONTAINS RESOURCE WHICH NO
// LONGER EXISTS IN IDEALSTATE
String instanceName = "localhost_" + 3;
LiveInstance liveInstance = new LiveInstance(instanceName);
String sessionId = UUID.randomUUID().toString();
liveInstance.setSessionId(sessionId);
HelixDataAccessor accessor = manager.getHelixDataAccessor();
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.liveInstance(instanceName), liveInstance);
String oldResource = "testResourceOld";
CurrentState currentState = new CurrentState(oldResource);
currentState.setState("testResourceOld_0", "OFFLINE");
currentState.setState("testResourceOld_1", "SLAVE");
currentState.setState("testResourceOld_2", "MASTER");
currentState.setStateModelDefRef("MasterSlave");
accessor.setProperty(keyBuilder.currentState(instanceName, sessionId, oldResource),
currentState);
String oldTaskResource = "testTaskResourceOld";
CurrentState taskCurrentState = new CurrentState(oldTaskResource);
taskCurrentState.setState("testTaskResourceOld_0", "RUNNING");
taskCurrentState.setState("testTaskResourceOld_1", "FINISHED");
taskCurrentState.setStateModelDefRef("Task");
accessor.setProperty(keyBuilder.taskCurrentState(instanceName, sessionId, oldTaskResource),
taskCurrentState);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
ResourceComputationStage stage = new ResourceComputationStage();
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name());
// +1 because it will have one for current state
AssertJUnit.assertEquals(resources.length + 1, resourceMap.size());
for (int i = 0; i < resources.length; i++) {
String resourceName = resources[i];
IdealState idealState = idealStates.get(i);
AssertJUnit.assertTrue(resourceMap.containsKey(resourceName));
AssertJUnit.assertEquals(resourceMap.get(resourceName).getResourceName(), resourceName);
AssertJUnit.assertEquals(resourceMap.get(resourceName).getStateModelDefRef(),
idealState.getStateModelDefRef());
AssertJUnit.assertEquals(resourceMap.get(resourceName).getPartitions().size(),
idealState.getNumPartitions());
}
// Test the data derived from CurrentState
AssertJUnit.assertTrue(resourceMap.containsKey(oldResource));
AssertJUnit.assertEquals(resourceMap.get(oldResource).getResourceName(), oldResource);
AssertJUnit.assertEquals(resourceMap.get(oldResource).getStateModelDefRef(),
currentState.getStateModelDefRef());
AssertJUnit.assertEquals(resourceMap.get(oldResource).getPartitions().size(), currentState
.getPartitionStateMap().size());
AssertJUnit.assertNotNull(resourceMap.get(oldResource).getPartition("testResourceOld_0"));
AssertJUnit.assertNotNull(resourceMap.get(oldResource).getPartition("testResourceOld_1"));
AssertJUnit.assertNotNull(resourceMap.get(oldResource).getPartition("testResourceOld_2"));
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new WorkflowControllerDataProvider());
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
resourceMap = event.getAttribute(AttributeName.RESOURCES.name());
// +2 because it will have current state and task current state
AssertJUnit.assertEquals(resources.length + 2, resourceMap.size());
Resource taskResource = resourceMap.get(oldTaskResource);
AssertJUnit.assertNotNull(taskResource);
AssertJUnit.assertEquals(taskResource.getResourceName(), oldTaskResource);
AssertJUnit
.assertEquals(taskResource.getStateModelDefRef(), taskCurrentState.getStateModelDefRef());
AssertJUnit.assertEquals(taskResource.getPartitions().size(),
taskCurrentState.getPartitionStateMap().size());
AssertJUnit.assertNotNull(taskResource.getPartition("testTaskResourceOld_0"));
AssertJUnit.assertNotNull(taskResource.getPartition("testTaskResourceOld_1"));
}
@Test
public void testNull() {
ResourceComputationStage stage = new ResourceComputationStage();
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
boolean exceptionCaught = false;
try {
stage.process(event);
} catch (Exception e) {
exceptionCaught = true;
}
AssertJUnit.assertTrue(exceptionCaught);
stage.postProcess();
}
// public void testEmptyCluster()
// {
// ClusterEvent event = new ClusterEvent("sampleEvent");
// ClusterManager manager = new Mocks.MockManager();
// event.addAttribute("clustermanager", manager);
// ResourceComputationStage stage = new ResourceComputationStage();
// StageContext context = new StageContext();
// stage.init(context);
// stage.preProcess();
// boolean exceptionCaught = false;
// try
// {
// stage.process(event);
// } catch (Exception e)
// {
// exceptionCaught = true;
// }
// Assert.assertTrue(exceptionCaught);
// stage.postProcess();
// }
}
| 9,737 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestMessageThrottleStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ClusterConstraints;
import org.apache.helix.model.ClusterConstraints.ConstraintAttribute;
import org.apache.helix.model.ClusterConstraints.ConstraintType;
import org.apache.helix.model.ConstraintItem;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageType;
import org.apache.helix.model.Partition;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestMessageThrottleStage extends ZkUnitTestBase {
final String _className = getShortClassName();
@Test
public void testMsgThrottleBasic() throws Exception {
String clusterName = "CLUSTER_" + _className + "_basic";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] {
0, 1
}, new String[] {
"TestDB"
}, 1, 2);
setupLiveInstances(clusterName, new int[] {
0, 1
});
setupStateModel(clusterName);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider(clusterName);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
MessageThrottleStage throttleStage = new MessageThrottleStage();
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since DATA_CACHE is null");
} catch (Exception e) {
// OK
}
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh, false);
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since RESOURCE is null");
} catch (Exception e) {
// OK
}
runStage(event, new ResourceComputationStage());
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since MESSAGE_SELECT is null");
} catch (Exception e) {
// OK
}
MessageOutput msgSelectOutput = new MessageOutput();
List<Message> selectMessages = new ArrayList<>();
Message msg =
createMessage(MessageType.STATE_TRANSITION, "msgId-001", "OFFLINE", "SLAVE", "TestDB",
"localhost_0");
selectMessages.add(msg);
msgSelectOutput.addMessages("TestDB", new Partition("TestDB_0"), selectMessages);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), msgSelectOutput);
runStage(event, throttleStage);
MessageOutput msgThrottleOutput =
event.getAttribute(AttributeName.MESSAGES_THROTTLE.name());
Assert.assertEquals(msgThrottleOutput.getMessages("TestDB", new Partition("TestDB_0")).size(),
1);
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test()
public void testMsgThrottleConstraints() throws Exception {
String clusterName = "CLUSTER_" + _className + "_constraints";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] {
0, 1
}, new String[] {
"TestDB"
}, 1, 2);
setupLiveInstances(clusterName, new int[] {
0, 1
});
setupStateModel(clusterName);
// setup constraints
ZNRecord record = new ZNRecord(ConstraintType.MESSAGE_CONSTRAINT.toString());
// constraint0:
// "MESSAGE_TYPE=STATE_TRANSITION,CONSTRAINT_VALUE=ANY"
record.setMapField("constraint0", new TreeMap<>());
record.getMapField("constraint0").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint0").put("CONSTRAINT_VALUE", "ANY");
ConstraintItem constraint0 = new ConstraintItem(record.getMapField("constraint0"));
// constraint1:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,CONSTRAINT_VALUE=ANY"
record.setMapField("constraint1", new TreeMap<>());
record.getMapField("constraint1").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint1").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint1").put("CONSTRAINT_VALUE", "50");
ConstraintItem constraint1 = new ConstraintItem(record.getMapField("constraint1"));
// constraint2:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,RESOURCE=TestDB,CONSTRAINT_VALUE=2";
record.setMapField("constraint2", new TreeMap<>());
record.getMapField("constraint2").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint2").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint2").put("INSTANCE", ".*");
record.getMapField("constraint2").put("RESOURCE", "TestDB");
record.getMapField("constraint2").put("CONSTRAINT_VALUE", "2");
ConstraintItem constraint2 = new ConstraintItem(record.getMapField("constraint2"));
// constraint3:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=localhost_12918,RESOURCE=.*,CONSTRAINT_VALUE=1";
record.setMapField("constraint3", new TreeMap<>());
record.getMapField("constraint3").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint3").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint3").put("INSTANCE", "localhost_1");
record.getMapField("constraint3").put("RESOURCE", ".*");
record.getMapField("constraint3").put("CONSTRAINT_VALUE", "1");
ConstraintItem constraint3 = new ConstraintItem(record.getMapField("constraint3"));
// constraint4:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,RESOURCE=.*,CONSTRAINT_VALUE=10"
record.setMapField("constraint4", new TreeMap<>());
record.getMapField("constraint4").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint4").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint4").put("INSTANCE", ".*");
record.getMapField("constraint4").put("RESOURCE", ".*");
record.getMapField("constraint4").put("CONSTRAINT_VALUE", "10");
ConstraintItem constraint4 = new ConstraintItem(record.getMapField("constraint4"));
// constraint5:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=localhost_12918,RESOURCE=TestDB,CONSTRAINT_VALUE=5"
record.setMapField("constraint5", new TreeMap<>());
record.getMapField("constraint5").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint5").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint5").put("INSTANCE", "localhost_0");
record.getMapField("constraint5").put("RESOURCE", "TestDB");
record.getMapField("constraint5").put("CONSTRAINT_VALUE", "3");
ConstraintItem constraint5 = new ConstraintItem(record.getMapField("constraint5"));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()),
new ClusterConstraints(record));
ClusterConstraints constraint =
accessor.getProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()));
MessageThrottleStage throttleStage = new MessageThrottleStage();
// test constraintSelection
// message1: hit contraintSelection rule1 and rule2
Message msg1 =
createMessage(MessageType.STATE_TRANSITION, "msgId-001", "OFFLINE", "SLAVE", "TestDB",
"localhost_0");
Map<ConstraintAttribute, String> msgAttr = ClusterConstraints.toConstraintAttributes(msg1);
Set<ConstraintItem> matches = constraint.match(msgAttr);
Assert.assertEquals(matches.size(), 5);
Assert.assertTrue(containsConstraint(matches, constraint0));
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint2));
Assert.assertTrue(containsConstraint(matches, constraint4));
Assert.assertTrue(containsConstraint(matches, constraint5));
matches = throttleStage.selectConstraints(matches, msgAttr);
Assert.assertEquals(matches.size(), 2);
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint5));
// message2: hit contraintSelection rule1, rule2, and rule3
Message msg2 =
createMessage(MessageType.STATE_TRANSITION, "msgId-002", "OFFLINE", "SLAVE", "TestDB",
"localhost_1");
msgAttr = ClusterConstraints.toConstraintAttributes(msg2);
matches = constraint.match(msgAttr);
Assert.assertEquals(matches.size(), 5);
Assert.assertTrue(containsConstraint(matches, constraint0));
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint2));
Assert.assertTrue(containsConstraint(matches, constraint3));
Assert.assertTrue(containsConstraint(matches, constraint4));
matches = throttleStage.selectConstraints(matches, msgAttr);
Assert.assertEquals(matches.size(), 2);
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint3));
// test messageThrottleStage
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider(clusterName);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh, false);
runStage(event, new ResourceComputationStage());
MessageOutput msgSelectOutput = new MessageOutput();
Message msg3 =
createMessage(MessageType.STATE_TRANSITION, "msgId-003", "OFFLINE", "SLAVE", "TestDB",
"localhost_0");
Message msg4 =
createMessage(MessageType.STATE_TRANSITION, "msgId-004", "OFFLINE", "SLAVE", "TestDB",
"localhost_0");
Message msg5 =
createMessage(MessageType.STATE_TRANSITION, "msgId-005", "OFFLINE", "SLAVE", "TestDB",
"localhost_0");
Message msg6 =
createMessage(MessageType.STATE_TRANSITION, "msgId-006", "OFFLINE", "SLAVE", "TestDB",
"localhost_1");
List<Message> selectMessages = new ArrayList<>();
selectMessages.add(msg1);
selectMessages.add(msg2);
selectMessages.add(msg3);
selectMessages.add(msg4);
selectMessages.add(msg5); // should be throttled
selectMessages.add(msg6); // should be throttled
msgSelectOutput.addMessages("TestDB", new Partition("TestDB_0"), selectMessages);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), msgSelectOutput);
runStage(event, throttleStage);
MessageOutput msgThrottleOutput =
event.getAttribute(AttributeName.MESSAGES_THROTTLE.name());
List<Message> throttleMessages =
msgThrottleOutput.getMessages("TestDB", new Partition("TestDB_0"));
Assert.assertEquals(throttleMessages.size(), 4);
Assert.assertTrue(throttleMessages.contains(msg1));
Assert.assertTrue(throttleMessages.contains(msg2));
Assert.assertTrue(throttleMessages.contains(msg3));
Assert.assertTrue(throttleMessages.contains(msg4));
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
private boolean containsConstraint(Set<ConstraintItem> constraints, ConstraintItem constraint) {
for (ConstraintItem item : constraints) {
if (item.toString().equals(constraint.toString())) {
return true;
}
}
return false;
}
}
| 9,738 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestCancellationMessageGeneration.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.HelixManager;
import org.apache.helix.controller.common.PartitionStateMap;
import org.apache.helix.controller.common.ResourcesStateMap;
import org.apache.helix.controller.dataproviders.BaseControllerDataProvider;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestCancellationMessageGeneration extends MessageGenerationPhase {
private static final String TEST_CLUSTER = "testCluster";
private static final String TEST_RESOURCE = "resource0";
private static final String TEST_INSTANCE = "instance0";
private static final String TEST_PARTITION = "partition0";
/*
* This test checks the cancellation message generation when currentState=null and desiredState=DROPPED
*/
@Test
public void TestOFFLINEToDROPPED() throws Exception {
ClusterEvent event = new ClusterEvent(TEST_CLUSTER, ClusterEventType.Unknown);
// Set current state to event
CurrentStateOutput currentStateOutput = mock(CurrentStateOutput.class);
Partition partition = mock(Partition.class);
when(partition.getPartitionName()).thenReturn(TEST_PARTITION);
when(currentStateOutput.getCurrentState(TEST_RESOURCE, partition, TEST_INSTANCE)).thenReturn(null);
Message message = mock(Message.class);
when(message.getFromState()).thenReturn("OFFLINE");
when(message.getToState()).thenReturn("SLAVE");
when(currentStateOutput.getPendingMessage(TEST_RESOURCE, partition, TEST_INSTANCE)).thenReturn(message);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
// Set helix manager to event
event.addAttribute(AttributeName.helixmanager.name(), mock(HelixManager.class));
// Set controller data provider to event
BaseControllerDataProvider cache = mock(BaseControllerDataProvider.class);
StateModelDefinition stateModelDefinition = new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
when(cache.getStateModelDef(TaskConstants.STATE_MODEL_NAME)).thenReturn(stateModelDefinition);
Map<String, LiveInstance> liveInstances= mock(Map.class);
LiveInstance mockLiveInstance = mock(LiveInstance.class);
when(mockLiveInstance.getInstanceName()).thenReturn(TEST_INSTANCE);
when(mockLiveInstance.getEphemeralOwner()).thenReturn("TEST");
when(liveInstances.values()).thenReturn(Arrays.asList(mockLiveInstance));
when(cache.getLiveInstances()).thenReturn(liveInstances);
ClusterConfig clusterConfig = mock(ClusterConfig.class);
when(cache.getClusterConfig()).thenReturn(clusterConfig);
when(clusterConfig.isStateTransitionCancelEnabled()).thenReturn(true);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
// Set resources to rebalance to event
Map<String, Resource> resourceMap = new HashMap<>();
Resource resource = mock(Resource.class);
when(resource.getResourceName()).thenReturn(TEST_RESOURCE);
List<Partition> partitions = Arrays.asList(partition);
when(resource.getPartitions()).thenReturn(partitions);
when(resource.getStateModelDefRef()).thenReturn(TaskConstants.STATE_MODEL_NAME);
resourceMap.put(TEST_RESOURCE, resource);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
// set up resource state map
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
PartitionStateMap partitionStateMap = new PartitionStateMap(TEST_RESOURCE);
Map<Partition, Map<String, String>> stateMap = partitionStateMap.getStateMap();
Map<String, String> instanceStateMap = new HashMap<>();
instanceStateMap.put(TEST_INSTANCE, HelixDefinedState.DROPPED.name());
stateMap.put(partition, instanceStateMap);
bestPossibleStateOutput.setState(TEST_RESOURCE, partition, instanceStateMap);
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
process(event);
MessageOutput output = event.getAttribute(AttributeName.MESSAGES_ALL.name());
Assert.assertEquals(output.getMessages(TEST_RESOURCE, partition).size(), 1);
}
/*
* Tests that no cancellation message is created for
* pending ST message of error partition reset.
*/
@Test
public void testNoCancellationForErrorReset() throws Exception {
List<Message> messages = generateMessages("ERROR", "ERROR", "OFFLINE");
Assert.assertTrue(messages.isEmpty(), "Should not create cancellation message");
}
/*
* Tests that controller should be able to cancel ST: ONLINE -> OFFLINE
*/
@Test
public void testCancelOnlineToOffline() throws Exception {
List<Message> messages = generateMessages("ONLINE", "ONLINE", "OFFLINE");
Assert.assertEquals(messages.size(), 1, "Should create cancellation message");
Message msg = messages.get(0);
Assert.assertEquals(msg.getMsgType(), Message.MessageType.STATE_TRANSITION_CANCELLATION.name());
Assert.assertEquals(msg.getFromState(), "ONLINE");
Assert.assertEquals(msg.getToState(), "OFFLINE");
}
private List<Message> generateMessages(String currentState, String fromState, String toState)
throws Exception {
ClusterEvent event = new ClusterEvent(TEST_CLUSTER, ClusterEventType.Unknown);
// Set current state to event
CurrentStateOutput currentStateOutput = mock(CurrentStateOutput.class);
Partition partition = mock(Partition.class);
when(partition.getPartitionName()).thenReturn(TEST_PARTITION);
when(currentStateOutput.getCurrentState(TEST_RESOURCE, partition, TEST_INSTANCE))
.thenReturn(currentState);
// Pending message for error partition reset
Message pendingMessage = mock(Message.class);
when(pendingMessage.getFromState()).thenReturn(fromState);
when(pendingMessage.getToState()).thenReturn(toState);
when(currentStateOutput.getPendingMessage(TEST_RESOURCE, partition, TEST_INSTANCE))
.thenReturn(pendingMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
// Set helix manager to event
event.addAttribute(AttributeName.helixmanager.name(), mock(HelixManager.class));
StateModelDefinition stateModelDefinition = new StateModelDefinition.Builder("TestStateModel")
.addState("ONLINE", 1).addState("OFFLINE")
.addState("DROPPED").addState("ERROR")
.initialState("OFFLINE")
.addTransition("ERROR", "OFFLINE", 1).addTransition("ONLINE", "OFFLINE", 2)
.addTransition("OFFLINE", "DROPPED", 3).addTransition("OFFLINE", "ONLINE", 4)
.build();
// Set controller data provider to event
BaseControllerDataProvider cache = mock(BaseControllerDataProvider.class);
when(cache.getStateModelDef(TaskConstants.STATE_MODEL_NAME)).thenReturn(stateModelDefinition);
Map<String, LiveInstance> liveInstances = mock(Map.class);
LiveInstance mockLiveInstance = mock(LiveInstance.class);
when(mockLiveInstance.getInstanceName()).thenReturn(TEST_INSTANCE);
when(mockLiveInstance.getEphemeralOwner()).thenReturn("TEST");
when(liveInstances.values()).thenReturn(Collections.singletonList(mockLiveInstance));
when(cache.getLiveInstances()).thenReturn(liveInstances);
ClusterConfig clusterConfig = mock(ClusterConfig.class);
when(cache.getClusterConfig()).thenReturn(clusterConfig);
when(clusterConfig.isStateTransitionCancelEnabled()).thenReturn(true);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
// Set event attribute: resources to rebalance
Map<String, Resource> resourceMap = new HashMap<>();
Resource resource = mock(Resource.class);
when(resource.getResourceName()).thenReturn(TEST_RESOURCE);
List<Partition> partitions = Collections.singletonList(partition);
when(resource.getPartitions()).thenReturn(partitions);
when(resource.getStateModelDefRef()).thenReturn(TaskConstants.STATE_MODEL_NAME);
resourceMap.put(TEST_RESOURCE, resource);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
// set up resource state map
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
PartitionStateMap partitionStateMap = new PartitionStateMap(TEST_RESOURCE);
Map<Partition, Map<String, String>> stateMap = partitionStateMap.getStateMap();
Map<String, String> instanceStateMap = new HashMap<>();
instanceStateMap.put(TEST_INSTANCE, currentState);
stateMap.put(partition, instanceStateMap);
bestPossibleStateOutput.setState(TEST_RESOURCE, partition, instanceStateMap);
// Process the event
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
process(event);
MessageOutput output = event.getAttribute(AttributeName.MESSAGES_ALL.name());
return output.getMessages(TEST_RESOURCE, partition);
}
}
| 9,739 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestBestPossibleStateCalcStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.List;
import java.util.Map;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestBestPossibleStateCalcStage extends BaseStageTest {
@Test
public void testSimple() {
System.out.println("START TestBestPossibleStateCalcStage at "
+ new Date(System.currentTimeMillis()));
// List<IdealState> idealStates = new ArrayList<IdealState>();
String[] resources = new String[] {
"testResourceName"
};
int numPartition = 5;
int numReplica = 1;
setupIdealState(5, resources, numPartition, numReplica, RebalanceMode.SEMI_AUTO,
BuiltInStateModelDefinitions.MasterSlave.name());
setupLiveInstances(5);
setupStateModel();
Map<String, Resource> resourceMap =
getResourceMap(resources, numPartition, BuiltInStateModelDefinitions.MasterSlave.name());
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(), new ResourceControllerDataProvider());
ReadClusterDataStage stage1 = new ReadClusterDataStage();
runStage(event, stage1);
BestPossibleStateCalcStage stage2 = new BestPossibleStateCalcStage();
runStage(event, stage2);
BestPossibleStateOutput output =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
for (int p = 0; p < 5; p++) {
Partition resource = new Partition("testResourceName_" + p);
Assert.assertEquals("MASTER", output.getInstanceStateMap("testResourceName", resource)
.get("localhost_" + (p + 1) % 5));
}
System.out.println("END TestBestPossibleStateCalcStage at "
+ new Date(System.currentTimeMillis()));
}
/*
* Tests the pipeline detects offline instances exceed the threshold and auto enters maintenance,
* the maintenance rebalancer is used immediately. No bootstraps in the best possible output.
*/
@Test
public void testAutoEnterMaintenanceWhenExceedingOfflineNodes() {
String[] resources = new String[]{"testResourceName"};
int numInstances = 3;
int numPartitions = 3;
setupIdealState(numInstances, resources, numPartitions, 1, RebalanceMode.FULL_AUTO,
BuiltInStateModelDefinitions.MasterSlave.name());
setupInstances(numInstances);
List<String> liveInstances = setupLiveInstances(numInstances);
setupStateModel();
// Set offline instances threshold
ClusterConfig clusterConfig = accessor.getProperty(accessor.keyBuilder().clusterConfig());
clusterConfig.setMaxOfflineInstancesAllowed(1);
setClusterConfig(clusterConfig);
Map<String, Resource> resourceMap =
getResourceMap(resources, numPartitions, BuiltInStateModelDefinitions.MasterSlave.name());
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (int p = 0; p < numPartitions; p++) {
Partition partition = new Partition("testResourceName_" + p);
currentStateOutput
.setCurrentState("testResourceName", partition, "localhost_" + (p + 1) % numInstances,
"MASTER");
}
// Disable 2 instances so the pipeline should enter maintenance
for (int i = 0; i < 2; i++) {
admin.enableInstance(_clusterName, liveInstances.get(i), false);
}
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
runStage(event, new ReadClusterDataStage());
runStage(event, new BestPossibleStateCalcStage());
BestPossibleStateOutput output = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
// State on the disabled instances should be OFFLINE instead of DROPPED
// because of maintenance rebalancer.
Assert.assertEquals(
output.getInstanceStateMap("testResourceName", new Partition("testResourceName_2"))
.get("localhost_0"),
"OFFLINE",
"Actual state should not be DROPPED");
Assert.assertEquals(
output.getInstanceStateMap("testResourceName", new Partition("testResourceName_0"))
.get("localhost_1"),
"OFFLINE",
"Actual state should not be DROPPED");
// No state change for localhost_2 because the replica is already MASTER
Assert.assertNull(
output.getInstanceStateMap("testResourceName", new Partition("testResourceName_1"))
.get("localhost_2"));
}
}
| 9,740 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestCurrentStateComputationStageForHandlingCapacity.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang3.RandomUtils;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.controller.rebalancer.DelayedAutoRebalancer;
import org.apache.helix.controller.rebalancer.waged.WagedInstanceCapacity;
import org.apache.helix.controller.rebalancer.waged.WagedRebalancer;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.monitoring.mbeans.ClusterStatusMonitor;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.AssertJUnit;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
public class TestCurrentStateComputationStageForHandlingCapacity {
private static final int INSTANCE_COUNT = 3;
private static final int RESOURCE_COUNT = 2;
private static final int PARTITION_COUNT = 3;
private static final List<String> CAPACITY_KEYS = Lists.newArrayList("CU", "PARTCOUNT", "DISK");
private static final Map<String, Integer> DEFAULT_INSTANCE_CAPACITY_MAP =
ImmutableMap.of("CU", 100, "PARTCOUNT", 10, "DISK", 100);
private static final Map<String, Integer> DEFAULT_PART_CAPACITY_MAP =
ImmutableMap.of("CU", 10, "PARTCOUNT", 1, "DISK", 1);
private ResourceControllerDataProvider _clusterData;
private Map<String, Resource> _resourceMap;
private CurrentStateOutput _currentStateOutput;
private WagedInstanceCapacity _wagedInstanceCapacity;
private CurrentStateComputationStage _currentStateComputationStage;
@BeforeMethod
public void setUp() {
// prepare cluster data
_clusterData = Mockito.spy(new ResourceControllerDataProvider());
Map<String, InstanceConfig> instanceConfigMap = generateInstanceCapacityConfigs();
_clusterData.setInstanceConfigMap(instanceConfigMap);
_clusterData.setResourceConfigMap(generateResourcePartitionCapacityConfigs());
_clusterData.setIdealStates(generateIdealStates());
Mockito.doReturn(ImmutableMap.of()).when(_clusterData).getAllInstancesMessages();
ClusterConfig clusterConfig = new ClusterConfig("test");
clusterConfig.setTopologyAwareEnabled(false);
clusterConfig.setInstanceCapacityKeys(CAPACITY_KEYS);
_clusterData.setClusterConfig(clusterConfig);
// prepare current state output
_resourceMap = generateResourceMap();
_currentStateOutput = populateCurrentStatesForResources(_resourceMap, instanceConfigMap.keySet());
// prepare instance of waged-instance capacity
_wagedInstanceCapacity = new WagedInstanceCapacity(_clusterData);
_currentStateComputationStage = new CurrentStateComputationStage();
}
@Test
public void testProcessEventWithNoWagedResources() {
// We create ideal states with all WAGED enabled.
Map<String, IdealState> idealStates = _clusterData.getIdealStates();
// remove one WAGED resource from all resources.
idealStates.forEach((resourceName, idealState) -> {
idealState.setRebalanceMode(IdealState.RebalanceMode.CUSTOMIZED);
idealState.setRebalancerClassName(DelayedAutoRebalancer.class.getName());
});
ClusterEvent clusterEvent = new ClusterEvent("test", ClusterEventType.CurrentStateChange);
clusterEvent.addAttribute(AttributeName.ControllerDataProvider.name(), _clusterData);
clusterEvent.addAttribute(AttributeName.RESOURCES.name(), _resourceMap);
_currentStateComputationStage.handleResourceCapacityCalculation(clusterEvent, _clusterData, _currentStateOutput);
// validate that we did not compute and set the capacity map.
Assert.assertNull(_clusterData.getWagedInstanceCapacity());
}
@Test
public void testProcessEventWithSomeWagedResources() {
// We create ideal states with all WAGED enabled.
Map<String, IdealState> idealStates = _clusterData.getIdealStates();
// remove WAGED from one resource.
IdealState firstIdealState = idealStates.entrySet().iterator().next().getValue();
firstIdealState.setRebalanceMode(IdealState.RebalanceMode.CUSTOMIZED);
firstIdealState.setRebalancerClassName(DelayedAutoRebalancer.class.getName());
int totalIdealStates = idealStates.size();
ClusterEvent clusterEvent = new ClusterEvent("test", ClusterEventType.CurrentStateChange);
clusterEvent.addAttribute(AttributeName.ControllerDataProvider.name(), _clusterData);
clusterEvent.addAttribute(AttributeName.RESOURCES.name(), _resourceMap);
_currentStateComputationStage.handleResourceCapacityCalculation(clusterEvent, _clusterData, _currentStateOutput);
// validate that we did not compute and set the capacity map.
WagedInstanceCapacity wagedInstanceCapacity = _clusterData.getWagedInstanceCapacity();
Assert.assertNotNull(wagedInstanceCapacity);
Map<String, Map<String, Set<String>>> allocatedPartitionsMap = wagedInstanceCapacity.getAllocatedPartitionsMap();
Set<String> resourcesAllocated = allocatedPartitionsMap.values().stream()
.map(Map::keySet)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
Assert.assertEquals(resourcesAllocated.size(), totalIdealStates - 1);
}
@Test
public void testProcessEventWithAllWagedResources() {
// We create ideal states with all WAGED enabled.
Map<String, IdealState> idealStates = _clusterData.getIdealStates();
ClusterEvent clusterEvent = new ClusterEvent("test", ClusterEventType.CurrentStateChange);
clusterEvent.addAttribute(AttributeName.ControllerDataProvider.name(), _clusterData);
clusterEvent.addAttribute(AttributeName.RESOURCES.name(), _resourceMap);
_currentStateComputationStage.handleResourceCapacityCalculation(clusterEvent, _clusterData, _currentStateOutput);
// validate that we did not compute and set the capacity map.
WagedInstanceCapacity wagedInstanceCapacity = _clusterData.getWagedInstanceCapacity();
Assert.assertNotNull(wagedInstanceCapacity);
Map<String, Map<String, Set<String>>> allocatedPartitionsMap = wagedInstanceCapacity.getAllocatedPartitionsMap();
Set<String> resourcesAllocated = allocatedPartitionsMap.values().stream()
.map(Map::keySet)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
Assert.assertEquals(resourcesAllocated.size(), idealStates.size());
}
@Test
public void testSkipCapacityCalculation() {
// case: when resource-map is null
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
new ResourceControllerDataProvider(), null, new ClusterEvent(ClusterEventType.LiveInstanceChange)));
// case: when resource-map is empty
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
new ResourceControllerDataProvider(), ImmutableMap.of(), new ClusterEvent(ClusterEventType.LiveInstanceChange)));
// case: when instance capacity is null
Assert.assertFalse(CurrentStateComputationStage.skipCapacityCalculation(
new ResourceControllerDataProvider(), _resourceMap, new ClusterEvent(ClusterEventType.LiveInstanceChange)));
// case: when event is of no-op
ResourceControllerDataProvider dataProvider = Mockito.mock(ResourceControllerDataProvider.class);
WagedInstanceCapacity instanceCapacity = Mockito.mock(WagedInstanceCapacity.class);
Mockito.when(dataProvider.getWagedInstanceCapacity()).thenReturn(instanceCapacity);
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.CustomizedStateChange)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.CustomizedViewChange)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.CustomizeStateConfigChange)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.ExternalViewChange)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.IdealStateChange)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.OnDemandRebalance)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.Resume)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.RetryRebalance)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.StateVerifier)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.TargetExternalViewChange)));
Assert.assertTrue(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.TaskCurrentStateChange)));
Assert.assertFalse(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.LiveInstanceChange)));
Assert.assertFalse(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.CurrentStateChange)));
Assert.assertFalse(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.MessageChange)));
Assert.assertFalse(CurrentStateComputationStage.skipCapacityCalculation(
dataProvider, _resourceMap, new ClusterEvent(ClusterEventType.PeriodicalRebalance)));
}
// -- static helpers
private Map<String, InstanceConfig> generateInstanceCapacityConfigs() {
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
for (int i = 0; i < INSTANCE_COUNT; i ++) {
String instanceName = "instance-" + i;
InstanceConfig config = new InstanceConfig(instanceName);
config.setInstanceCapacityMap(DEFAULT_INSTANCE_CAPACITY_MAP);
instanceConfigMap.put(instanceName, config);
}
return instanceConfigMap;
}
private Map<String, ResourceConfig> generateResourcePartitionCapacityConfigs() {
Map<String, ResourceConfig> resourceConfigMap = new HashMap<>();
try {
Map<String, Map<String, Integer>> partitionsCapacityMap = new HashMap<>();
partitionsCapacityMap.put("DEFAULT", DEFAULT_PART_CAPACITY_MAP);
for (String resourceName : getResourceNames()) {
ResourceConfig config = new ResourceConfig(resourceName);
config.setPartitionCapacityMap(partitionsCapacityMap);
resourceConfigMap.put(resourceName, config);
}
} catch(IOException e) {
throw new RuntimeException("error while setting partition capacity map");
}
return resourceConfigMap;
}
private List<IdealState> generateIdealStates() {
return getResourceNames().stream()
.map(resourceName -> {
IdealState idealState = new IdealState(resourceName);
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
idealState.setRebalancerClassName(WagedRebalancer.class.getName());
return idealState;
})
.collect(Collectors.toList());
}
private static CurrentStateOutput populateCurrentStatesForResources(
Map<String, Resource> resourceMap, Set<String> instanceNames) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
resourceMap.forEach((resourceName, resource) ->
resource.getPartitions().forEach(partition -> {
int masterPartIdx = RandomUtils.nextInt(0, instanceNames.size());
int idx = 0;
for (Iterator<String> it = instanceNames.iterator(); it.hasNext(); idx ++) {
currentStateOutput.setCurrentState(
resourceName, partition, it.next(), (idx == masterPartIdx) ? "MASTER" : "SLAVE");
}
}));
return currentStateOutput;
}
private static Map<String, Resource> generateResourceMap() {
return getResourceNames().stream()
.map(resourceName -> {
Resource resource = new Resource(resourceName);
IntStream.range(0, PARTITION_COUNT)
.mapToObj(i -> "partition-" + i)
.forEach(resource::addPartition);
return resource;
})
.collect(Collectors.toMap(Resource::getResourceName, Function.identity()));
}
private static List<String> getResourceNames() {
return IntStream.range(0, RESOURCE_COUNT)
.mapToObj(i -> "resource-" + i)
.collect(Collectors.toList());
}
}
| 9,741 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestManagementMessageGeneration.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.HelixManager;
import org.apache.helix.api.status.ClusterManagementMode;
import org.apache.helix.controller.dataproviders.ManagementControllerDataProvider;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.PauseSignal;
import org.apache.helix.model.Resource;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.util.RebalanceUtil;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestManagementMessageGeneration extends ManagementMessageGenerationPhase {
private static final String TEST_CLUSTER = "testCluster";
private static final String TEST_RESOURCE = "resource0";
private static final String TEST_INSTANCE = "instance0";
private static final String TEST_PARTITION = "partition0";
@Test
public void testCancelPendingSTMessage() throws Exception {
List<Message> messages = generateMessages("ONLINE", "ONLINE", "OFFLINE", true);
Assert.assertEquals(messages.size(), 1, "Should create cancellation message");
Message msg = messages.get(0);
Assert.assertEquals(msg.getMsgType(), Message.MessageType.STATE_TRANSITION_CANCELLATION.name());
Assert.assertEquals(msg.getFromState(), "ONLINE");
Assert.assertEquals(msg.getToState(), "OFFLINE");
messages = generateMessages("ONLINE", "ONLINE", "OFFLINE", false);
Assert.assertEquals(messages.size(), 0);
}
private List<Message> generateMessages(String currentState, String fromState, String toState,
boolean cancelPendingST) throws Exception {
ClusterEvent event = new ClusterEvent(TEST_CLUSTER, ClusterEventType.Unknown);
// Set current state to event
CurrentStateOutput currentStateOutput = mock(CurrentStateOutput.class);
Partition partition = mock(Partition.class);
when(partition.getPartitionName()).thenReturn(TEST_PARTITION);
when(currentStateOutput.getCurrentState(TEST_RESOURCE, partition, TEST_INSTANCE))
.thenReturn(currentState);
when(currentStateOutput.getCurrentStateMap(TEST_RESOURCE))
.thenReturn(ImmutableMap.of(partition, ImmutableMap.of(TEST_INSTANCE, currentState)));
// Pending message for error partition reset
Message pendingMessage = mock(Message.class);
when(pendingMessage.getFromState()).thenReturn(fromState);
when(pendingMessage.getToState()).thenReturn(toState);
when(currentStateOutput.getPendingMessage(TEST_RESOURCE, partition, TEST_INSTANCE))
.thenReturn(pendingMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
// Set helix manager to event
event.addAttribute(AttributeName.helixmanager.name(), mock(HelixManager.class));
StateModelDefinition stateModelDefinition = new StateModelDefinition.Builder("TestStateModel")
.addState("ONLINE", 1).addState("OFFLINE")
.addState("DROPPED").addState("ERROR")
.initialState("OFFLINE")
.addTransition("ERROR", "OFFLINE", 1).addTransition("ONLINE", "OFFLINE", 2)
.addTransition("OFFLINE", "DROPPED", 3).addTransition("OFFLINE", "ONLINE", 4)
.build();
// Set controller data provider to event
ManagementControllerDataProvider cache = mock(ManagementControllerDataProvider.class);
when(cache.getStateModelDef(TaskConstants.STATE_MODEL_NAME)).thenReturn(stateModelDefinition);
Map<String, LiveInstance> liveInstances = mock(Map.class);
LiveInstance mockLiveInstance = mock(LiveInstance.class);
when(mockLiveInstance.getInstanceName()).thenReturn(TEST_INSTANCE);
when(mockLiveInstance.getEphemeralOwner()).thenReturn("TEST");
when(liveInstances.values()).thenReturn(Collections.singletonList(mockLiveInstance));
when(cache.getLiveInstances()).thenReturn(liveInstances);
ClusterConfig clusterConfig = mock(ClusterConfig.class);
when(cache.getClusterConfig()).thenReturn(clusterConfig);
when(clusterConfig.isStateTransitionCancelEnabled()).thenReturn(cancelPendingST);
PauseSignal pauseSignal = mock(PauseSignal.class);
when(pauseSignal.getCancelPendingST()).thenReturn(true);
when(cache.getPauseSignal()).thenReturn(pauseSignal);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
// Set event attribute: resources to rebalance
Map<String, Resource> resourceMap = new HashMap<>();
Resource resource = mock(Resource.class);
when(resource.getResourceName()).thenReturn(TEST_RESOURCE);
List<Partition> partitions = Collections.singletonList(partition);
when(resource.getPartitions()).thenReturn(partitions);
when(resource.getStateModelDefRef()).thenReturn(TaskConstants.STATE_MODEL_NAME);
resourceMap.put(TEST_RESOURCE, resource);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
// set up resource state map
BestPossibleStateOutput bestPossibleStateOutput =
RebalanceUtil.buildBestPossibleState(resourceMap.keySet(), currentStateOutput);
// Process the event
ClusterManagementMode mode = new ClusterManagementMode(ClusterManagementMode.Type.CLUSTER_FREEZE,
ClusterManagementMode.Status.IN_PROGRESS);
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.CLUSTER_STATUS.name(), mode);
process(event);
MessageOutput output = event.getAttribute(AttributeName.MESSAGES_ALL.name());
return output.getMessages(TEST_RESOURCE, partition);
}
}
| 9,742 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestAsyncBaseStage.java
|
package org.apache.helix.controller.stages;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.apache.helix.TestHelper;
import org.apache.helix.common.DedupEventProcessor;
import org.apache.helix.controller.pipeline.AbstractAsyncBaseStage;
import org.apache.helix.controller.pipeline.AsyncWorkerType;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestAsyncBaseStage {
private static AsyncWorkerType DEFAULT_WORKER_TYPE = AsyncWorkerType.ExternalViewComputeWorker;
@Test
public void testAsyncStageCleanup() throws Exception {
BlockingAsyncStage blockingAsyncStage = new BlockingAsyncStage();
Map<AsyncWorkerType, DedupEventProcessor<String, Runnable>> asyncFIFOWorkerPool =
new HashMap<>();
DedupEventProcessor<String, Runnable> worker =
new DedupEventProcessor<String, Runnable>("ClusterName", DEFAULT_WORKER_TYPE.name()) {
@Override
protected void handleEvent(Runnable event) {
event.run();
}
};
worker.start();
asyncFIFOWorkerPool.put(DEFAULT_WORKER_TYPE, worker);
ClusterEvent event = new ClusterEvent("ClusterName", ClusterEventType.OnDemandRebalance);
event.addAttribute(AttributeName.AsyncFIFOWorkerPool.name(), asyncFIFOWorkerPool);
// Test normal execute case
blockingAsyncStage.process(event);
Assert.assertTrue(TestHelper.verify(() -> blockingAsyncStage._isStarted, 500));
Assert.assertFalse(blockingAsyncStage._isFinished);
blockingAsyncStage.proceed();
Assert.assertTrue(TestHelper.verify(() -> blockingAsyncStage._isFinished, 500));
blockingAsyncStage.reset();
// Test interruption case
blockingAsyncStage.process(event);
TestHelper.verify(() -> blockingAsyncStage._isStarted, 500);
Assert.assertFalse(blockingAsyncStage._isFinished);
worker.shutdown();
Assert.assertFalse(TestHelper.verify(() -> blockingAsyncStage._isFinished, 1000));
Assert.assertFalse(worker.isAlive());
blockingAsyncStage.reset();
}
private class BlockingAsyncStage extends AbstractAsyncBaseStage {
public boolean _isFinished = false;
public boolean _isStarted = false;
private CountDownLatch _countDownLatch = new CountDownLatch(1);
public void reset() {
_isFinished = false;
_isStarted = false;
_countDownLatch = new CountDownLatch(1);
}
public void proceed() {
_countDownLatch.countDown();
}
@Override
public AsyncWorkerType getAsyncWorkerType() {
return DEFAULT_WORKER_TYPE;
}
@Override
public void execute(ClusterEvent event) throws Exception {
_isStarted = true;
_countDownLatch.await();
_isFinished = true;
}
}
}
| 9,743 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestClusterEvent.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ConcurrentModificationException;
import java.util.concurrent.CountDownLatch;
import org.testng.Assert;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestClusterEvent {
@Test
public void testSimplePutAndGet() {
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
AssertJUnit.assertEquals(event.getEventType(), ClusterEventType.Unknown);
event.addAttribute("attr1", "value");
AssertJUnit.assertEquals(event.getAttribute("attr1"), "value");
}
@Test
public void testThreadSafeClone() throws InterruptedException {
String clusterName = "TestCluster";
ClusterEvent event = new ClusterEvent(clusterName, ClusterEventType.Unknown, "testId");
for (int i = 0; i < 100; i++) {
event.addAttribute(String.valueOf(i), i);
}
final CountDownLatch wait = new CountDownLatch(1);
Thread thread = new Thread(() -> {
String threadName = Thread.currentThread().getName();
try {
wait.await();
} catch (InterruptedException e) {
//ignore the exception
}
// update the original event's attribute map
for (int i = 0; i < 100; i++) {
event.addAttribute(threadName + i, threadName);
}
});
thread.start();
try {
wait.countDown();
ClusterEvent clonedEvent = event.clone("cloneId");
Assert.assertEquals(clonedEvent.getClusterName(), clusterName);
Assert.assertEquals(clonedEvent.getEventId(), "cloneId");
} catch (ConcurrentModificationException e) {
Assert.fail("Didn't expect any ConcurrentModificationException to occur", e);
} finally {
thread.join();
}
}
}
| 9,744 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestBestPossibleCalcStageCompatibility.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.IdealStateModeProperty;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
@SuppressWarnings("deprecation")
/**
* These tests ensure that BestPossibleStateCalcStage correctly recognizes the mode and follows
* appropriate code paths, even though the old method of setting rebalance mode is used.
*/
public class TestBestPossibleCalcStageCompatibility extends BaseStageTest {
@Test
public void testSemiAutoModeCompatibility() {
System.out.println("START TestBestPossibleStateCalcStage at "
+ new Date(System.currentTimeMillis()));
String[] resources = new String[] {
"testResourceName"
};
setupIdealStateDeprecated(5, resources, 10, 1, IdealStateModeProperty.AUTO);
setupLiveInstances(5);
setupStateModel();
Map<String, Resource> resourceMap = getResourceMap();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
ReadClusterDataStage stage1 = new ReadClusterDataStage();
runStage(event, stage1);
BestPossibleStateCalcStage stage2 = new BestPossibleStateCalcStage();
runStage(event, stage2);
BestPossibleStateOutput output =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
for (int p = 0; p < 5; p++) {
Partition resource = new Partition("testResourceName_" + p);
AssertJUnit.assertEquals("MASTER", output.getInstanceStateMap("testResourceName", resource)
.get("localhost_" + (p + 1) % 5));
}
System.out.println("END TestBestPossibleStateCalcStage at "
+ new Date(System.currentTimeMillis()));
}
@Test
public void testCustomModeCompatibility() {
System.out.println("START TestBestPossibleStateCalcStage at "
+ new Date(System.currentTimeMillis()));
String[] resources = new String[] {
"testResourceName"
};
setupIdealStateDeprecated(5, resources, 10, 1, IdealStateModeProperty.CUSTOMIZED);
setupLiveInstances(5);
setupStateModel();
Map<String, Resource> resourceMap = getResourceMap();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
ReadClusterDataStage stage1 = new ReadClusterDataStage();
runStage(event, stage1);
BestPossibleStateCalcStage stage2 = new BestPossibleStateCalcStage();
runStage(event, stage2);
BestPossibleStateOutput output =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
for (int p = 0; p < 5; p++) {
Partition resource = new Partition("testResourceName_" + p);
AssertJUnit.assertNull(output.getInstanceStateMap("testResourceName", resource).get(
"localhost_" + (p + 1) % 5));
}
System.out.println("END TestBestPossibleStateCalcStage at "
+ new Date(System.currentTimeMillis()));
}
protected List<IdealState> setupIdealStateDeprecated(int nodes, String[] resources,
int partitions, int replicas, IdealStateModeProperty mode) {
List<IdealState> idealStates = new ArrayList<IdealState>();
List<String> instances = new ArrayList<String>();
for (int i = 0; i < nodes; i++) {
instances.add("localhost_" + i);
}
for (int i = 0; i < resources.length; i++) {
String resourceName = resources[i];
ZNRecord record = new ZNRecord(resourceName);
for (int p = 0; p < partitions; p++) {
List<String> value = new ArrayList<String>();
for (int r = 0; r < replicas; r++) {
value.add("localhost_" + (p + r + 1) % nodes);
}
record.setListField(resourceName + "_" + p, value);
}
IdealState idealState = new IdealState(record);
idealState.setStateModelDefRef("MasterSlave");
idealState.setIdealStateMode(mode.toString());
idealState.setNumPartitions(partitions);
idealStates.add(idealState);
// System.out.println(idealState);
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.idealStates(resourceName), idealState);
}
return idealStates;
}
}
| 9,745 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestExternalViewStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ExternalView;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestExternalViewStage extends ZkUnitTestBase {
@Test
public void testCachedExternalViews() throws Exception {
String clusterName = "CLUSTER_" + TestHelper.getTestMethodName();
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] {
0, 1
}, new String[] {
"TestDB"
}, 1, 2);
setupLiveInstances(clusterName, new int[] {
0, 1
});
setupStateModel(clusterName);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider(clusterName);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
ExternalViewComputeStage externalViewComputeStage = new ExternalViewComputeStage();
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh, false);
runStage(event, new ResourceComputationStage());
runStage(event, new CurrentStateComputationStage());
runStage(event, externalViewComputeStage);
Assert.assertEquals(cache.getExternalViews().values(),
accessor.getChildValues(accessor.keyBuilder().externalViews(), true));
// Assure there is no external got updated
List<ExternalView> oldExternalViews =
accessor.getChildValues(accessor.keyBuilder().externalViews(), true);
runStage(event, externalViewComputeStage);
List<ExternalView> newExternalViews =
accessor.getChildValues(accessor.keyBuilder().externalViews(), true);
Assert.assertEquals(oldExternalViews, newExternalViews);
for (int i = 0; i < oldExternalViews.size(); i++) {
Assert.assertEquals(oldExternalViews.get(i).getStat().getVersion(),
newExternalViews.get(i).getStat().getVersion());
}
if (manager.isConnected()) {
manager.disconnect(); // For DummyClusterManager, this is not necessary
}
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
}
}
| 9,746 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestCurrentStateComputationStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.HashMap;
import java.util.Map;
import java.util.List;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.monitoring.mbeans.ClusterStatusMonitor;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestCurrentStateComputationStage extends BaseStageTest {
@Test
public void testEmptyCS() {
Map<String, Resource> resourceMap = getResourceMap();
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
ResourceControllerDataProvider dataCache = new ResourceControllerDataProvider();
event.addAttribute(AttributeName.ControllerDataProvider.name(), dataCache);
event.addAttribute(AttributeName.clusterStatusMonitor.name(), new ClusterStatusMonitor(_clusterName));
CurrentStateComputationStage stage = new CurrentStateComputationStage();
runStage(event, new ReadClusterDataStage());
ClusterConfig clsCfg = dataCache.getClusterConfig();
clsCfg.setInstanceCapacityKeys(ImmutableList.of("s1", "s2", "s3"));
dataCache.setClusterConfig(clsCfg);
dataCache.setInstanceConfigMap(ImmutableMap.of(
"a", new InstanceConfig("a")
));
runStage(event, stage);
CurrentStateOutput output = event.getAttribute(AttributeName.CURRENT_STATE.name());
AssertJUnit.assertEquals(
output.getCurrentStateMap("testResourceName", new Partition("testResourceName_0")).size(),
0);
}
@Test
public void testSimpleCS() {
// setup resource
Map<String, Resource> resourceMap = getResourceMap();
setupLiveInstances(5);
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
ResourceControllerDataProvider dataCache = new ResourceControllerDataProvider();
event.addAttribute(AttributeName.ControllerDataProvider.name(), dataCache);
CurrentStateComputationStage stage = new CurrentStateComputationStage();
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
CurrentStateOutput output1 = event.getAttribute(AttributeName.CURRENT_STATE.name());
AssertJUnit.assertEquals(
output1.getCurrentStateMap("testResourceName", new Partition("testResourceName_0")).size(),
0);
// Add a state transition messages
Message message = new Message(Message.MessageType.STATE_TRANSITION, "msg1");
message.setFromState("OFFLINE");
message.setToState("SLAVE");
message.setResourceName("testResourceName");
message.setPartitionName("testResourceName_1");
message.setTgtName("localhost_3");
message.setTgtSessionId("session_3");
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.message("localhost_" + 3, message.getId()), message);
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
CurrentStateOutput output2 = event.getAttribute(AttributeName.CURRENT_STATE.name());
String pendingState =
output2.getPendingMessage("testResourceName", new Partition("testResourceName_1"),
"localhost_3").getToState();
AssertJUnit.assertEquals(pendingState, "SLAVE");
ZNRecord record1 = new ZNRecord("testResourceName");
// Add a current state that matches sessionId and one that does not match
CurrentState stateWithLiveSession = new CurrentState(record1);
stateWithLiveSession.setSessionId("session_3");
stateWithLiveSession.setStateModelDefRef("MasterSlave");
stateWithLiveSession.setState("testResourceName_1", "OFFLINE");
ZNRecord record2 = new ZNRecord("testResourceName");
CurrentState stateWithDeadSession = new CurrentState(record2);
stateWithDeadSession.setSessionId("session_dead");
stateWithDeadSession.setStateModelDefRef("MasterSlave");
stateWithDeadSession.setState("testResourceName_1", "MASTER");
ZNRecord record3 = new ZNRecord("testTaskResourceName");
CurrentState taskStateWithLiveSession = new CurrentState(record3);
taskStateWithLiveSession.setSessionId("session_3");
taskStateWithLiveSession.setStateModelDefRef("Task");
taskStateWithLiveSession.setState("testTaskResourceName_1", "INIT");
ZNRecord record4 = new ZNRecord("testTaskResourceName");
CurrentState taskStateWithDeadSession = new CurrentState(record4);
taskStateWithDeadSession.setSessionId("session_dead");
taskStateWithDeadSession.setStateModelDefRef("Task");
taskStateWithDeadSession.setState("testTaskResourceName_1", "INIT");
accessor.setProperty(keyBuilder.currentState("localhost_3", "session_3", "testResourceName"),
stateWithLiveSession);
accessor.setProperty(keyBuilder.currentState("localhost_3", "session_dead", "testResourceName"),
stateWithDeadSession);
accessor.setProperty(
keyBuilder.taskCurrentState("localhost_3", "session_3", "testTaskResourceName"),
taskStateWithLiveSession);
accessor.setProperty(
keyBuilder.taskCurrentState("localhost_3", "session_dead", "testTaskResourceName"),
taskStateWithDeadSession);
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
CurrentStateOutput output3 = event.getAttribute(AttributeName.CURRENT_STATE.name());
String currentState =
output3.getCurrentState("testResourceName", new Partition("testResourceName_1"),
"localhost_3");
AssertJUnit.assertEquals(currentState, "OFFLINE");
// Non Task Framework event will cause task current states to be ignored
String taskCurrentState = output3
.getCurrentState("testTaskResourceName", new Partition("testTaskResourceName_1"),
"localhost_3");
AssertJUnit.assertNull(taskCurrentState);
// Add another state transition message which is stale
message = new Message(Message.MessageType.STATE_TRANSITION, "msg2");
message.setFromState("SLAVE");
message.setToState("OFFLINE");
message.setResourceName("testResourceName");
message.setPartitionName("testResourceName_1");
message.setTgtName("localhost_3");
message.setTgtSessionId("session_3");
accessor.setProperty(keyBuilder.message("localhost_" + 3, message.getId()), message);
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
CurrentStateOutput output4 = event.getAttribute(AttributeName.CURRENT_STATE.name());
AssertJUnit.assertEquals(dataCache.getStaleMessages().size(), 1);
AssertJUnit.assertTrue(dataCache.getStaleMessages().containsKey("localhost_3"));
AssertJUnit.assertTrue(dataCache.getStaleMessages().get("localhost_3").containsKey("msg2"));
// Use a task event to check that task current states are included
resourceMap = new HashMap<String, Resource>();
Resource testTaskResource = new Resource("testTaskResourceName");
testTaskResource.setStateModelDefRef("Task");
testTaskResource.addPartition("testTaskResourceName_1");
resourceMap.put("testTaskResourceName", testTaskResource);
ClusterEvent taskEvent = new ClusterEvent(ClusterEventType.Unknown);
taskEvent.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
taskEvent.addAttribute(AttributeName.ControllerDataProvider.name(),
new WorkflowControllerDataProvider());
runStage(taskEvent, new ReadClusterDataStage());
runStage(taskEvent, stage);
CurrentStateOutput output5 = taskEvent.getAttribute(AttributeName.CURRENT_STATE.name());
taskCurrentState = output5
.getCurrentState("testTaskResourceName", new Partition("testTaskResourceName_1"),
"localhost_3");
AssertJUnit.assertEquals(taskCurrentState, "INIT");
}
}
| 9,747 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceValidationStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixProperty;
import org.apache.helix.MockAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.IdealStateProperty;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.Resource;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResourceValidationStage {
private static final String PARTICIPANT = "localhost_1234";
private static final String STATE = "OFFLINE";
@Test
public void testIdealStateValidity() throws Exception {
MockAccessor accessor = new MockAccessor();
accessor.setProperty(accessor.keyBuilder().clusterConfig(), new ClusterConfig("TestCluster"));
// create some ideal states
String masterSlaveCustomResource = "masterSlaveCustomResource";
String onlineOfflineFullAutoResource = "onlineOfflineFullAutoResource";
String masterSlaveSemiAutoInvalidResource = "masterSlaveSemiAutoInvalidResource";
createIS(accessor, masterSlaveCustomResource, "MasterSlave", RebalanceMode.CUSTOMIZED);
createIS(accessor, onlineOfflineFullAutoResource, "OnlineOffline", RebalanceMode.FULL_AUTO);
createIS(accessor, masterSlaveSemiAutoInvalidResource, "MasterSlave", RebalanceMode.SEMI_AUTO);
// create some ideal state specs
createISSpec(accessor, masterSlaveCustomResource + "_spec", "MasterSlave",
RebalanceMode.CUSTOMIZED);
createISSpec(accessor, onlineOfflineFullAutoResource + "_spec", "OnlineOffline",
RebalanceMode.FULL_AUTO);
// add some state models
addStateModels(accessor);
// refresh the cache
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
cache.refresh(accessor);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
// run resource computation
new ResourceComputationStage().process(event);
Map<String, Resource> resourceMap =
event.getAttribute(AttributeName.RESOURCES_TO_REBALANCE.name());
Assert.assertTrue(resourceMap.containsKey(masterSlaveCustomResource));
Assert.assertTrue(resourceMap.containsKey(onlineOfflineFullAutoResource));
Assert.assertTrue(resourceMap.containsKey(masterSlaveSemiAutoInvalidResource));
// run resource validation
new ResourceValidationStage().process(event);
Map<String, Resource> finalResourceMap =
event.getAttribute(AttributeName.RESOURCES.name());
Assert.assertTrue(finalResourceMap.containsKey(masterSlaveCustomResource));
Assert.assertTrue(finalResourceMap.containsKey(onlineOfflineFullAutoResource));
Assert.assertFalse(finalResourceMap.containsKey(masterSlaveSemiAutoInvalidResource));
}
@Test
public void testNoSpec() throws Exception {
MockAccessor accessor = new MockAccessor();
accessor.setProperty(accessor.keyBuilder().clusterConfig(), new ClusterConfig("TestCluster"));
// create an ideal state and no spec
String masterSlaveCustomResource = "masterSlaveCustomResource";
createIS(accessor, masterSlaveCustomResource, "MasterSlave", RebalanceMode.CUSTOMIZED);
// add some state models
addStateModels(accessor);
// refresh the cache
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
cache.refresh(accessor);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
// run resource computation
new ResourceComputationStage().process(event);
Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name());
Assert.assertTrue(resourceMap.containsKey(masterSlaveCustomResource));
// run resource validation
new ResourceValidationStage().process(event);
Map<String, Resource> finalResourceMap = event.getAttribute(AttributeName.RESOURCES.name());
Assert.assertTrue(finalResourceMap.containsKey(masterSlaveCustomResource));
}
@Test
public void testMissingStateModel() throws Exception {
MockAccessor accessor = new MockAccessor();
accessor.setProperty(accessor.keyBuilder().clusterConfig(), new ClusterConfig("TestCluster"));
// create an ideal state and no spec
String masterSlaveCustomResource = "masterSlaveCustomResource";
String leaderStandbyCustomResource = "leaderStandbyCustomResource";
createIS(accessor, masterSlaveCustomResource, "MasterSlave", RebalanceMode.CUSTOMIZED);
createIS(accessor, leaderStandbyCustomResource, "LeaderStandby", RebalanceMode.CUSTOMIZED);
// add some state models (but not leader standby)
addStateModels(accessor);
// refresh the cache
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
cache.refresh(accessor);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
// run resource computation
new ResourceComputationStage().process(event);
Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name());
Assert.assertTrue(resourceMap.containsKey(masterSlaveCustomResource));
Assert.assertTrue(resourceMap.containsKey(leaderStandbyCustomResource));
// run resource validation
new ResourceValidationStage().process(event);
Map<String, Resource> finalResourceMap = event.getAttribute(AttributeName.RESOURCES.name());
Assert.assertTrue(finalResourceMap.containsKey(masterSlaveCustomResource));
Assert.assertFalse(finalResourceMap.containsKey(leaderStandbyCustomResource));
}
private void createIS(HelixDataAccessor accessor, String resourceId, String stateModelDefRef,
RebalanceMode rebalanceMode) {
IdealState idealState = new IdealState(resourceId);
idealState.setRebalanceMode(rebalanceMode);
idealState.setStateModelDefRef(stateModelDefRef);
idealState.setNumPartitions(1);
idealState.setReplicas("1");
idealState.getRecord().setListField(resourceId + "_0", ImmutableList.of(PARTICIPANT));
idealState.getRecord().setMapField(resourceId + "_0", ImmutableMap.of(PARTICIPANT, STATE));
accessor.setProperty(accessor.keyBuilder().idealStates(resourceId), idealState);
}
private void createISSpec(HelixDataAccessor accessor, String specId, String stateModelDefRef,
RebalanceMode rebalanceMode) {
PropertyKey propertyKey = accessor.keyBuilder().clusterConfig();
HelixProperty property = accessor.getProperty(propertyKey);
if (property == null) {
property = new HelixProperty("sampleClusterConfig");
}
String key = "IdealStateRule!" + specId;
String value =
IdealStateProperty.REBALANCE_MODE.toString() + "=" + rebalanceMode.toString() + ","
+ IdealStateProperty.STATE_MODEL_DEF_REF.toString() + "=" + stateModelDefRef;
property.getRecord().setSimpleField(key, value);
accessor.setProperty(propertyKey, property);
}
private void addStateModels(HelixDataAccessor accessor) {
StateModelDefinition masterSlave =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
accessor.setProperty(accessor.keyBuilder().stateModelDef(masterSlave.getId()), masterSlave);
StateModelDefinition onlineOffline =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline());
accessor.setProperty(accessor.keyBuilder().stateModelDef(onlineOffline.getId()), onlineOffline);
}
}
| 9,748 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/stages/TestCustomizedViewStage.java
|
package org.apache.helix.controller.stages;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import javax.management.ObjectName;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.CustomizedState;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.CustomizedView;
import org.apache.helix.monitoring.mbeans.ClusterStatusMonitor;
import org.apache.helix.monitoring.mbeans.CustomizedViewMonitor;
import org.apache.helix.monitoring.mbeans.MonitorDomainNames;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestCustomizedViewStage extends ZkUnitTestBase {
private final String RESOURCE_NAME = "TestDB";
private final String PARTITION_NAME = "TestDB_0";
private final String CUSTOMIZED_STATE_NAME = "customizedState1";
private final String INSTANCE_NAME = "localhost_1";
@Test
public void testCachedCustomizedViews() throws Exception {
String clusterName = "CLUSTER_" + TestHelper.getTestMethodName();
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[]{0, 1}, new String[]{"TestDB"}, 1, 2);
setupLiveInstances(clusterName, new int[]{0, 1});
setupStateModel(clusterName);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider(clusterName);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
CustomizedStateConfig config = new CustomizedStateConfig();
List<String> aggregationEnabledTypes = new ArrayList<>();
aggregationEnabledTypes.add(CUSTOMIZED_STATE_NAME);
config.setAggregationEnabledTypes(aggregationEnabledTypes);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.customizedStateConfig(), config);
CustomizedState customizedState = new CustomizedState(RESOURCE_NAME);
customizedState.setState(PARTITION_NAME, "STARTED");
accessor
.setProperty(keyBuilder.customizedState(INSTANCE_NAME, "customizedState1", RESOURCE_NAME),
customizedState);
CustomizedViewAggregationStage customizedViewComputeStage =
new CustomizedViewAggregationStage();
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh, false);
runStage(event, new ResourceComputationStage());
runStage(event, new CustomizedStateComputationStage());
runStage(event, customizedViewComputeStage);
Assert.assertEquals(cache.getCustomizedViewCacheMap().size(),
accessor.getChildNames(accessor.keyBuilder().customizedViews()).size());
// Assure there is no customized view got updated when running the stage again
List<CustomizedView> oldCustomizedViews =
accessor.getChildValues(accessor.keyBuilder().customizedViews(), true);
runStage(event, customizedViewComputeStage);
List<CustomizedView> newCustomizedViews =
accessor.getChildValues(accessor.keyBuilder().customizedViews(), true);
Assert.assertEquals(oldCustomizedViews, newCustomizedViews);
for (int i = 0; i < oldCustomizedViews.size(); i++) {
Assert.assertEquals(oldCustomizedViews.get(i).getStat().getVersion(),
newCustomizedViews.get(i).getStat().getVersion());
}
if (manager.isConnected()) {
manager.disconnect(); // For DummyClusterManager, this is not necessary
}
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
}
@Test
public void testLatencyMetricReporting() throws Exception {
String clusterName = "CLUSTER_" + TestHelper.getTestMethodName();
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[]{0, 1}, new String[]{"TestDB"}, 1, 2);
setupLiveInstances(clusterName, new int[]{0, 1});
setupStateModel(clusterName);
ClusterStatusMonitor clusterStatusMonitor = new ClusterStatusMonitor(clusterName);
ClusterEvent event = new ClusterEvent(clusterName, ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider(clusterName);
ExecutorService executor = Executors.newSingleThreadExecutor();
cache.setAsyncTasksThreadPool(executor);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
event.addAttribute(AttributeName.clusterStatusMonitor.name(), clusterStatusMonitor);
CustomizedStateConfig config = new CustomizedStateConfig();
List<String> aggregationEnabledTypes = new ArrayList<>();
aggregationEnabledTypes.add(CUSTOMIZED_STATE_NAME);
config.setAggregationEnabledTypes(aggregationEnabledTypes);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.customizedStateConfig(), config);
CustomizedState customizedState = new CustomizedState(RESOURCE_NAME);
customizedState.setState(PARTITION_NAME, "STATE");
customizedState.setStartTime(PARTITION_NAME, 1);
accessor.setProperty(
keyBuilder.customizedState(INSTANCE_NAME, CUSTOMIZED_STATE_NAME, RESOURCE_NAME),
customizedState);
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh, false);
runStage(event, new ResourceComputationStage());
runStage(event, new CustomizedStateComputationStage());
runStage(event, new CustomizedViewAggregationStage());
ObjectName objectName = new ObjectName(String
.format("%s:%s=%s,%s=%s", MonitorDomainNames.AggregatedView.name(), "Type",
"CustomizedView", "Cluster", clusterName));
Field customizedViewMonitor =
ClusterStatusMonitor.class.getDeclaredField("_customizedViewMonitor");
Assert.assertNotNull(customizedViewMonitor);
boolean hasLatencyReported = TestHelper.verify(() -> (long) _server.getAttribute(objectName,
CustomizedViewMonitor.UPDATE_TO_AGGREGATION_LATENCY_GAUGE + ".Max") != 0,
TestHelper.WAIT_DURATION);
Assert.assertTrue(hasLatencyReported);
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
executor.shutdownNow();
}
@Test
public void testLatencyCalculationWithEmptyTimestamp() throws Exception {
String clusterName = "CLUSTER_" + TestHelper.getTestMethodName();
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
setupIdealState(clusterName, new int[]{0, 1}, new String[]{"TestDB"}, 1, 2);
setupLiveInstances(clusterName, new int[]{0, 1});
setupStateModel(clusterName);
ClusterStatusMonitor clusterStatusMonitor = new ClusterStatusMonitor(clusterName);
ClusterEvent event = new ClusterEvent(clusterName, ClusterEventType.Unknown);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider(clusterName);
ExecutorService executor = Executors.newSingleThreadExecutor();
cache.setAsyncTasksThreadPool(executor);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
event.addAttribute(AttributeName.clusterStatusMonitor.name(), clusterStatusMonitor);
CustomizedStateConfig config = new CustomizedStateConfig();
List<String> aggregationEnabledTypes = new ArrayList<>();
aggregationEnabledTypes.add(CUSTOMIZED_STATE_NAME);
config.setAggregationEnabledTypes(aggregationEnabledTypes);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.customizedStateConfig(), config);
CustomizedState customizedState = new CustomizedState(RESOURCE_NAME);
customizedState.setState(PARTITION_NAME, "STATE");
accessor.setProperty(
keyBuilder.customizedState(INSTANCE_NAME, CUSTOMIZED_STATE_NAME, RESOURCE_NAME),
customizedState);
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh, false);
runStage(event, new ResourceComputationStage());
runStage(event, new CustomizedStateComputationStage());
runStage(event, new CustomizedViewAggregationStage());
ObjectName objectName = new ObjectName(String
.format("%s:%s=%s,%s=%s", MonitorDomainNames.AggregatedView.name(), "Type",
"CustomizedView", "Cluster", clusterName));
Field customizedViewMonitor =
ClusterStatusMonitor.class.getDeclaredField("_customizedViewMonitor");
Assert.assertNotNull(customizedViewMonitor);
boolean hasLatencyReported = TestHelper.verify(() -> (long) _server.getAttribute(objectName,
CustomizedViewMonitor.UPDATE_TO_AGGREGATION_LATENCY_GAUGE + ".Max") != 0,
TestHelper.WAIT_DURATION);
Assert.assertFalse(hasLatencyReported);
deleteLiveInstances(clusterName);
deleteCluster(clusterName);
executor.shutdownNow();
}
}
| 9,749 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/TestZeroReplicaAvoidance.java
|
package org.apache.helix.controller.rebalancer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.helix.controller.rebalancer.constraint.MonitoredAbnormalResolver;
import org.apache.helix.controller.stages.BaseStageTest;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class TestZeroReplicaAvoidance extends BaseStageTest {
@Test(dataProvider = "zeroReplicaInput")
public void testZeroReplicaAvoidanceDuringRebalance(StateModelDefinition stateModelDef,
List<String> instancePreferenceList, Map<String, String> currentStateMap,
Map<String, List<Message>> pendingMessages, Map<String, String> expectedBestPossibleMap) {
System.out
.println("START TestDelayedAutoRebalancer at " + new Date(System.currentTimeMillis()));
System.err.println("Test input: " + instancePreferenceList + ":" + currentStateMap + ":");
int numNode = 6;
Set<String> liveInstances = new HashSet<>();
for (int i = 0; i < numNode; i++) {
liveInstances.add("localhost_" + i);
}
IdealState is = new IdealState("test");
is.setReplicas("3");
Partition partition = new Partition("testPartition");
DelayedAutoRebalancer rebalancer = new DelayedAutoRebalancer();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (String instance : currentStateMap.keySet()) {
currentStateOutput.setCurrentState("test", partition, instance,
currentStateMap.get(instance));
}
Set<String> allInstances = new HashSet<>(instancePreferenceList);
allInstances.addAll(currentStateMap.keySet());
if (pendingMessages != null) {
for (String instance : allInstances) {
List<Message> messages = pendingMessages.get(instance);
if (messages != null) {
for (Message message : messages) {
currentStateOutput.setPendingMessage("test", partition, instance, message);
}
}
}
}
Map<String, String> bestPossibleMap = rebalancer
.computeBestPossibleStateForPartition(liveInstances, stateModelDef, instancePreferenceList,
currentStateOutput, Collections.emptySet(), is, new ClusterConfig("TestCluster"),
partition, MonitoredAbnormalResolver.DUMMY_STATE_RESOLVER);
Assert.assertEquals(bestPossibleMap, expectedBestPossibleMap,
"Differs, get " + bestPossibleMap + "\nexpected: " + expectedBestPossibleMap
+ "\ncurrentState: " + currentStateMap + "\npreferenceList: " + instancePreferenceList);
System.out
.println("END TestBestPossibleStateCalcStage at " + new Date(System.currentTimeMillis()));
}
@DataProvider(name = "zeroReplicaInput")
public Object[][] rebalanceStrategies() {
List<Object[]> data = new ArrayList<>();
data.addAll(loadTestInputs("TestDelayedAutoRebalancer.MasterSlave.json"));
data.addAll(loadTestInputs("TestDelayedAutoRebalancer.OnlineOffline.json"));
Object[][] ret = new Object[data.size()][];
for (int i = 0; i < data.size(); i++) {
ret[i] = data.get(i);
}
return ret;
}
private final String INPUT = "inputs";
private final String CURRENT_STATE = "currentStates";
private final String PENDING_MESSAGES = "pendingMessages";
private final String BEST_POSSIBLE_STATE = "bestPossibleStates";
private final String PREFERENCE_LIST = "preferenceList";
private final String STATE_MODEL = "statemodel";
public List<Object[]> loadTestInputs(String fileName) {
List<Object[]> ret = null;
InputStream inputStream = getClass().getClassLoader().getResourceAsStream(fileName);
try {
ObjectReader mapReader = new ObjectMapper().reader(Map.class);
Map<String, Object> inputMaps = mapReader.readValue(inputStream);
String stateModelName = (String) inputMaps.get(STATE_MODEL);
StateModelDefinition stateModelDef =
BuiltInStateModelDefinitions.valueOf(stateModelName).getStateModelDefinition();
List<Map<String, Object>> inputs = (List<Map<String, Object>>) inputMaps.get(INPUT);
ret = new ArrayList<>();
for (Map<String, Object> inMap : inputs) {
Map<String, String> currentStates = (Map<String, String>) inMap.get(CURRENT_STATE);
Map<String, String> bestPossibleStates =
(Map<String, String>) inMap.get(BEST_POSSIBLE_STATE);
List<String> preferenceList = (List<String>) inMap.get(PREFERENCE_LIST);
Map<String, String> pendingStates = (Map<String, String>) inMap.get(PENDING_MESSAGES);
Map<String, List<Message>> pendingMessages = null;
if (pendingStates != null) {
Random r = new Random();
pendingMessages = new HashMap<>();
for (String instance : pendingStates.keySet()) {
pendingMessages.put(instance, new ArrayList<>());
Message m = new Message(new ZNRecord(UUID.randomUUID().toString()));
m.setFromState(pendingStates.get(instance).split(":")[0]);
m.setToState(pendingStates.get(instance).split(":")[1]);
pendingMessages.get(instance).add(m);
}
}
ret.add(new Object[] {
stateModelDef, preferenceList, currentStates, pendingMessages, bestPossibleStates
});
}
} catch (IOException e) {
e.printStackTrace();
}
return ret;
}
}
| 9,750 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/TestAutoRebalanceStrategy.java
|
package org.apache.helix.controller.rebalancer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.MockAccessor;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.constraint.MonitoredAbnormalResolver;
import org.apache.helix.controller.rebalancer.strategy.AutoRebalanceStrategy;
import org.apache.helix.controller.rebalancer.strategy.RebalanceStrategy;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Partition;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestAutoRebalanceStrategy {
private static Logger logger = LoggerFactory.getLogger(TestAutoRebalanceStrategy.class);
/**
* Sanity test for a basic Master-Slave model
*/
@Test
public void simpleMasterSlaveTest() {
final int NUM_ITERATIONS = 10;
final int NUM_PARTITIONS = 10;
final int NUM_LIVE_NODES = 12;
final int NUM_TOTAL_NODES = 20;
final int MAX_PER_NODE = 5;
final String[] STATE_NAMES = {
"MASTER", "SLAVE"
};
final int[] STATE_COUNTS = {
1, 2
};
runTest("BasicMasterSlave", NUM_ITERATIONS, NUM_PARTITIONS, NUM_LIVE_NODES, NUM_TOTAL_NODES,
MAX_PER_NODE, STATE_NAMES, STATE_COUNTS);
}
/**
* Run a test for an arbitrary state model.
* @param name Name of the test state model
* @param numIterations Number of rebalance tasks to run
* @param numPartitions Number of partitions for the resource
* @param numLiveNodes Number of live nodes in the cluster
* @param numTotalNodes Number of nodes in the cluster, must be greater than or equal to
* numLiveNodes
* @param maxPerNode Maximum number of replicas a node can serve
* @param stateNames States ordered by preference
* @param stateCounts Number of replicas that should be in each state
*/
private void runTest(String name, int numIterations, int numPartitions, int numLiveNodes,
int numTotalNodes, int maxPerNode, String[] stateNames, int[] stateCounts) {
List<String> partitions = new ArrayList<String>();
for (int i = 0; i < numPartitions; i++) {
partitions.add("p_" + i);
}
List<String> liveNodes = new ArrayList<String>();
List<String> allNodes = new ArrayList<String>();
for (int i = 0; i < numTotalNodes; i++) {
allNodes.add("n_" + i);
if (i < numLiveNodes) {
liveNodes.add("n_" + i);
}
}
Map<String, Map<String, String>> currentMapping = new TreeMap<String, Map<String, String>>();
LinkedHashMap<String, Integer> states = new LinkedHashMap<String, Integer>();
for (int i = 0; i < Math.min(stateNames.length, stateCounts.length); i++) {
states.put(stateNames[i], stateCounts[i]);
}
StateModelDefinition stateModelDef = getIncompleteStateModelDef(name, stateNames[0], states);
new AutoRebalanceTester(partitions, states, liveNodes, currentMapping, allNodes, maxPerNode,
stateModelDef).runRepeatedly(numIterations);
}
/**
* Get a StateModelDefinition without transitions. The auto rebalancer doesn't take transitions
* into account when computing mappings, so this is acceptable.
* @param modelName name to give the model
* @param initialState initial state for all nodes
* @param states ordered map of state to count
* @return incomplete StateModelDefinition for rebalancing
*/
private StateModelDefinition getIncompleteStateModelDef(String modelName, String initialState,
LinkedHashMap<String, Integer> states) {
StateModelDefinition.Builder builder = new StateModelDefinition.Builder(modelName);
builder.initialState(initialState);
int i = states.size();
for (String state : states.keySet()) {
builder.addState(state, i);
builder.upperBound(state, states.get(state));
i--;
}
return builder.build();
}
class AutoRebalanceTester {
private static final double P_KILL = 0.45;
private static final double P_ADD = 0.1;
private static final double P_RESURRECT = 0.45;
private static final String RESOURCE_NAME = "resource";
private List<String> _partitions;
private LinkedHashMap<String, Integer> _states;
private List<String> _liveNodes;
private Set<String> _liveSet;
private Set<String> _removedSet;
private Set<String> _nonLiveSet;
private Map<String, Map<String, String>> _currentMapping;
private List<String> _allNodes;
private int _maxPerNode;
private StateModelDefinition _stateModelDef;
private Random _random;
public AutoRebalanceTester(List<String> partitions, LinkedHashMap<String, Integer> states,
List<String> liveNodes, Map<String, Map<String, String>> currentMapping,
List<String> allNodes, int maxPerNode, StateModelDefinition stateModelDef) {
_partitions = partitions;
_states = states;
_liveNodes = liveNodes;
_liveSet = new TreeSet<String>();
for (String node : _liveNodes) {
_liveSet.add(node);
}
_removedSet = new TreeSet<String>();
_nonLiveSet = new TreeSet<String>();
_currentMapping = currentMapping;
_allNodes = allNodes;
for (String node : allNodes) {
if (!_liveSet.contains(node)) {
_nonLiveSet.add(node);
}
}
_maxPerNode = maxPerNode;
_stateModelDef = stateModelDef;
_random = new Random();
}
/**
* Repeatedly randomly select a task to run and report the result
* @param numIterations
* Number of random tasks to run in sequence
*/
public void runRepeatedly(int numIterations) {
logger.info("~~~~ Initial State ~~~~~");
RebalanceStrategy strategy =
new AutoRebalanceStrategy(RESOURCE_NAME, _partitions, _states, _maxPerNode);
ZNRecord initialResult =
strategy.computePartitionAssignment(_allNodes, _liveNodes, _currentMapping, null);
_currentMapping = getMapping(initialResult.getListFields());
logger.info(_currentMapping.toString());
getRunResult(_currentMapping, initialResult.getListFields());
for (int i = 0; i < numIterations; i++) {
logger.info("~~~~ Iteration " + i + " ~~~~~");
ZNRecord znRecord = runOnceRandomly();
if (znRecord != null) {
final Map<String, List<String>> listResult = znRecord.getListFields();
final Map<String, Map<String, String>> mapResult = getMapping(listResult);
logger.info(mapResult.toString());
logger.info(listResult.toString());
getRunResult(mapResult, listResult);
_currentMapping = mapResult;
}
}
}
private Map<String, Map<String, String>> getMapping(final Map<String, List<String>> listResult) {
final Map<String, Map<String, String>> mapResult = new HashMap<String, Map<String, String>>();
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
MockAccessor accessor = new MockAccessor();
Builder keyBuilder = accessor.keyBuilder();
ClusterConfig clusterConfig = new ClusterConfig("TestCluster");
accessor.setProperty(keyBuilder.clusterConfig(), clusterConfig);
for (String node : _liveNodes) {
LiveInstance liveInstance = new LiveInstance(node);
liveInstance.setSessionId("testSession");
accessor.setProperty(keyBuilder.liveInstance(node), liveInstance);
}
cache.refresh(accessor);
IdealState is = new IdealState("resource");
for (String partition : _partitions) {
List<String> preferenceList = listResult.get(partition);
Map<String, String> currentStateMap = _currentMapping.get(partition);
Set<String> disabled = Collections.emptySet();
Partition p = new Partition(partition);
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
if (currentStateMap != null) {
for (String instance : currentStateMap.keySet()) {
currentStateOutput
.setCurrentState("resource", p, instance, currentStateMap.get(instance));
}
}
Map<String, String> assignment = new AutoRebalancer()
.computeBestPossibleStateForPartition(cache.getLiveInstances().keySet(), _stateModelDef,
preferenceList, currentStateOutput, disabled, is, clusterConfig, p,
MonitoredAbnormalResolver.DUMMY_STATE_RESOLVER);
mapResult.put(partition, assignment);
}
return mapResult;
}
/**
* Output various statistics and correctness check results
* @param mapFields
* The map-map assignment generated by the rebalancer
* @param listFields
* The map-list assignment generated by the rebalancer
*/
public void getRunResult(final Map<String, Map<String, String>> mapFields,
final Map<String, List<String>> listFields) {
logger.info("***** Statistics *****");
dumpStatistics(mapFields);
verifyCorrectness(mapFields, listFields);
}
/**
* Output statistics about the assignment
* @param mapFields
* The map-map assignment generated by the rebalancer
*/
public void dumpStatistics(final Map<String, Map<String, String>> mapFields) {
Map<String, Integer> partitionsPerNode = getPartitionBucketsForNode(mapFields);
int nodeCount = _liveNodes.size();
logger.info("Total number of nodes: " + nodeCount);
logger.info("Nodes: " + _liveNodes);
int sumPartitions = getSum(partitionsPerNode.values());
logger.info("Total number of partitions: " + sumPartitions);
double averagePartitions = getAverage(partitionsPerNode.values());
logger.info("Average number of partitions per node: " + averagePartitions);
double stdevPartitions = getStdev(partitionsPerNode.values(), averagePartitions);
logger.info("Standard deviation of partitions: " + stdevPartitions);
// Statistics about each state
Map<String, Map<String, Integer>> statesPerNode = getStateBucketsForNode(mapFields);
for (String state : _states.keySet()) {
Map<String, Integer> nodeStateCounts = new TreeMap<String, Integer>();
for (Entry<String, Map<String, Integer>> nodeStates : statesPerNode.entrySet()) {
Map<String, Integer> stateCounts = nodeStates.getValue();
if (stateCounts.containsKey(state)) {
nodeStateCounts.put(nodeStates.getKey(), stateCounts.get(state));
} else {
nodeStateCounts.put(nodeStates.getKey(), 0);
}
}
int sumStates = getSum(nodeStateCounts.values());
logger.info("Total number of state " + state + ": " + sumStates);
double averageStates = getAverage(nodeStateCounts.values());
logger.info("Average number of state " + state + " per node: " + averageStates);
double stdevStates = getStdev(nodeStateCounts.values(), averageStates);
logger.info("Standard deviation of state " + state + " per node: " + stdevStates);
}
}
/**
* Run a set of correctness tests, reporting success or failure
* @param mapFields
* The map-map assignment generated by the rebalancer
* @param listFields
* The map-list assignment generated by the rebalancer
*/
public void verifyCorrectness(final Map<String, Map<String, String>> mapFields,
final Map<String, List<String>> listFields) {
final Map<String, Integer> partitionsPerNode = getPartitionBucketsForNode(mapFields);
boolean maxConstraintMet = maxNotExceeded(partitionsPerNode);
assert maxConstraintMet : "Max per node constraint: FAIL";
logger.info("Max per node constraint: PASS");
boolean liveConstraintMet = onlyLiveAssigned(partitionsPerNode);
assert liveConstraintMet : "Only live nodes have partitions constraint: FAIL";
logger.info("Only live nodes have partitions constraint: PASS");
boolean stateAssignmentPossible = correctStateAssignmentCount(mapFields);
assert stateAssignmentPossible : "State replica constraint: FAIL";
logger.info("State replica constraint: PASS");
boolean nodesUniqueForPartitions = atMostOnePartitionReplicaPerNode(listFields);
assert nodesUniqueForPartitions : "Node uniqueness per partition constraint: FAIL";
logger.info("Node uniqueness per partition constraint: PASS");
}
private boolean maxNotExceeded(final Map<String, Integer> partitionsPerNode) {
for (String node : partitionsPerNode.keySet()) {
Integer value = partitionsPerNode.get(node);
if (value > _maxPerNode) {
logger.error("ERROR: Node " + node + " has " + value
+ " partitions despite a maximum of " + _maxPerNode);
return false;
}
}
return true;
}
private boolean onlyLiveAssigned(final Map<String, Integer> partitionsPerNode) {
for (final Entry<String, Integer> nodeState : partitionsPerNode.entrySet()) {
boolean isLive = _liveSet.contains(nodeState.getKey());
boolean isEmpty = nodeState.getValue() == 0;
if (!isLive && !isEmpty) {
logger.error("ERROR: Node " + nodeState.getKey() + " is not live, but has "
+ nodeState.getValue() + " replicas!");
return false;
}
}
return true;
}
private boolean correctStateAssignmentCount(final Map<String, Map<String, String>> assignment) {
for (final Entry<String, Map<String, String>> partitionEntry : assignment.entrySet()) {
final Map<String, String> nodeMap = partitionEntry.getValue();
final Map<String, Integer> stateCounts = new TreeMap<String, Integer>();
for (String state : nodeMap.values()) {
if (!stateCounts.containsKey(state)) {
stateCounts.put(state, 1);
} else {
stateCounts.put(state, stateCounts.get(state) + 1);
}
}
for (String state : stateCounts.keySet()) {
if (state.equals(HelixDefinedState.DROPPED.toString())) {
continue;
}
int count = stateCounts.get(state);
int maximumCount = _states.get(state);
if (count > maximumCount) {
logger.error("ERROR: State " + state + " for partition " + partitionEntry.getKey()
+ " has " + count + " replicas when " + maximumCount + " is allowed!");
return false;
}
}
}
return true;
}
private boolean atMostOnePartitionReplicaPerNode(final Map<String, List<String>> listFields) {
for (final Entry<String, List<String>> partitionEntry : listFields.entrySet()) {
Set<String> nodeSet = new HashSet<String>(partitionEntry.getValue());
int numUniques = nodeSet.size();
int total = partitionEntry.getValue().size();
if (numUniques < total) {
logger.error("ERROR: Partition " + partitionEntry.getKey() + " is assigned to " + total
+ " nodes, but only " + numUniques + " are unique!");
return false;
}
}
return true;
}
private double getAverage(final Collection<Integer> values) {
double sum = 0.0;
for (Integer value : values) {
sum += value;
}
if (values.size() != 0) {
return sum / values.size();
} else {
return -1.0;
}
}
private int getSum(final Collection<Integer> values) {
int sum = 0;
for (Integer value : values) {
sum += value;
}
return sum;
}
private double getStdev(final Collection<Integer> values, double mean) {
double sum = 0.0;
for (Integer value : values) {
double deviation = mean - value;
sum += Math.pow(deviation, 2.0);
}
if (values.size() != 0) {
sum /= values.size();
return Math.pow(sum, 0.5);
} else {
return -1.0;
}
}
private Map<String, Integer> getPartitionBucketsForNode(
final Map<String, Map<String, String>> assignment) {
Map<String, Integer> partitionsPerNode = new TreeMap<String, Integer>();
for (String node : _liveNodes) {
partitionsPerNode.put(node, 0);
}
for (Entry<String, Map<String, String>> partitionEntry : assignment.entrySet()) {
final Map<String, String> nodeMap = partitionEntry.getValue();
for (String node : nodeMap.keySet()) {
String state = nodeMap.get(node);
if (state.equals(HelixDefinedState.DROPPED.toString())) {
continue;
}
// add 1 for every occurrence of a node
if (!partitionsPerNode.containsKey(node)) {
partitionsPerNode.put(node, 1);
} else {
partitionsPerNode.put(node, partitionsPerNode.get(node) + 1);
}
}
}
return partitionsPerNode;
}
private Map<String, Map<String, Integer>> getStateBucketsForNode(
final Map<String, Map<String, String>> assignment) {
Map<String, Map<String, Integer>> result = new TreeMap<String, Map<String, Integer>>();
for (String n : _liveNodes) {
result.put(n, new TreeMap<String, Integer>());
}
for (Map<String, String> nodeStateMap : assignment.values()) {
for (Entry<String, String> nodeState : nodeStateMap.entrySet()) {
if (!result.containsKey(nodeState.getKey())) {
result.put(nodeState.getKey(), new TreeMap<String, Integer>());
}
Map<String, Integer> stateMap = result.get(nodeState.getKey());
if (!stateMap.containsKey(nodeState.getValue())) {
stateMap.put(nodeState.getValue(), 1);
} else {
stateMap.put(nodeState.getValue(), stateMap.get(nodeState.getValue()) + 1);
}
}
}
return result;
}
/**
* Randomly choose between killing, adding, or resurrecting a single node
* @return (Partition -> (Node -> State)) ZNRecord
*/
public ZNRecord runOnceRandomly() {
double choose = _random.nextDouble();
ZNRecord result = null;
if (choose < P_KILL) {
result = removeSingleNode(null);
} else if (choose < P_KILL + P_ADD) {
result = addSingleNode(null);
} else if (choose < P_KILL + P_ADD + P_RESURRECT) {
result = resurrectSingleNode(null);
}
return result;
}
/**
* Run rebalancer trying to add a never-live node
* @param node
* Optional String to add
* @return ZNRecord result returned by the rebalancer
*/
public ZNRecord addSingleNode(String node) {
logger.info("=================== add node =================");
if (_nonLiveSet.size() == 0) {
logger.warn("Cannot add node because there are no nodes left to add.");
return null;
}
// Get a random never-live node
if (node == null || !_nonLiveSet.contains(node)) {
node = getRandomSetElement(_nonLiveSet);
}
logger.info("Adding " + node);
_liveNodes.add(node);
_liveSet.add(node);
_nonLiveSet.remove(node);
return new AutoRebalanceStrategy(RESOURCE_NAME, _partitions, _states, _maxPerNode).
computePartitionAssignment(_allNodes, _liveNodes, _currentMapping, null);
}
/**
* Run rebalancer trying to remove a live node
* @param node
* Optional String to remove
* @return ZNRecord result returned by the rebalancer
*/
public ZNRecord removeSingleNode(String node) {
logger.info("=================== remove node =================");
if (_liveSet.size() == 0) {
logger.warn("Cannot remove node because there are no nodes left to remove.");
return null;
}
// Get a random never-live node
if (node == null || !_liveSet.contains(node)) {
node = getRandomSetElement(_liveSet);
}
logger.info("Removing " + node);
_removedSet.add(node);
_liveNodes.remove(node);
_liveSet.remove(node);
// the rebalancer expects that the current mapping doesn't contain deleted
// nodes
for (Map<String, String> nodeMap : _currentMapping.values()) {
if (nodeMap.containsKey(node)) {
nodeMap.remove(node);
}
}
return new AutoRebalanceStrategy(RESOURCE_NAME, _partitions, _states, _maxPerNode)
.computePartitionAssignment(_allNodes, _liveNodes, _currentMapping, null);
}
/**
* Run rebalancer trying to add back a removed node
* @param node
* Optional String to resurrect
* @return ZNRecord result returned by the rebalancer
*/
public ZNRecord resurrectSingleNode(String node) {
logger.info("=================== resurrect node =================");
if (_removedSet.size() == 0) {
logger.warn("Cannot remove node because there are no nodes left to resurrect.");
return null;
}
// Get a random never-live node
if (node == null || !_removedSet.contains(node)) {
node = getRandomSetElement(_removedSet);
}
logger.info("Resurrecting " + node);
_removedSet.remove(node);
_liveNodes.add(node);
_liveSet.add(node);
return new AutoRebalanceStrategy(RESOURCE_NAME, _partitions, _states, _maxPerNode)
.computePartitionAssignment(_allNodes, _liveNodes, _currentMapping, null);
}
private <T> T getRandomSetElement(Set<T> source) {
int element = _random.nextInt(source.size());
int i = 0;
for (T node : source) {
if (i == element) {
return node;
}
i++;
}
return null;
}
}
/**
* Tests the following scenario: nodes come up one by one, then one node is taken down. Preference
* lists should prefer nodes in the current mapping at all times, but when all nodes are in the
* current mapping, then it should distribute states as evenly as possible.
*/
@Test
public void testOrphansNotPreferred() {
final String RESOURCE_NAME = "resource";
final String[] PARTITIONS = {
"resource_0", "resource_1", "resource_2"
};
final StateModelDefinition STATE_MODEL =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
final int REPLICA_COUNT = 2;
final String[] NODES = {
"n0", "n1", "n2"
};
// initial state, one node, no mapping
List<String> allNodes = Lists.newArrayList(NODES[0]);
List<String> liveNodes = Lists.newArrayList(NODES[0]);
Map<String, Map<String, String>> currentMapping = Maps.newHashMap();
for (String partition : PARTITIONS) {
currentMapping.put(partition, new HashMap<String, String>());
}
// make sure that when the first node joins, a single replica is assigned fairly
List<String> partitions = ImmutableList.copyOf(PARTITIONS);
LinkedHashMap<String, Integer> stateCount =
STATE_MODEL.getStateCountMap(liveNodes.size(), REPLICA_COUNT);
ZNRecord znRecord =
new AutoRebalanceStrategy(RESOURCE_NAME, partitions, stateCount)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
Map<String, List<String>> preferenceLists = znRecord.getListFields();
for (String partition : currentMapping.keySet()) {
// make sure these are all MASTER
List<String> preferenceList = preferenceLists.get(partition);
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 1, "invalid preference list for " + partition);
}
// now assign a replica to the first node in the current mapping, and add a second node
allNodes.add(NODES[1]);
liveNodes.add(NODES[1]);
stateCount = STATE_MODEL.getStateCountMap(liveNodes.size(), REPLICA_COUNT);
for (String partition : PARTITIONS) {
currentMapping.get(partition).put(NODES[0], "MASTER");
}
znRecord =
new AutoRebalanceStrategy(RESOURCE_NAME, partitions, stateCount)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
preferenceLists = znRecord.getListFields();
for (String partition : currentMapping.keySet()) {
List<String> preferenceList = preferenceLists.get(partition);
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 2, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.get(0), NODES[0], "invalid preference list for "
+ partition);
Assert.assertEquals(preferenceList.get(1), NODES[1], "invalid preference list for "
+ partition);
}
// now set the current mapping to reflect this update and make sure that it distributes masters
for (String partition : PARTITIONS) {
currentMapping.get(partition).put(NODES[1], "SLAVE");
}
znRecord =
new AutoRebalanceStrategy(RESOURCE_NAME, partitions, stateCount)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
preferenceLists = znRecord.getListFields();
Set<String> firstNodes = Sets.newHashSet();
for (String partition : currentMapping.keySet()) {
List<String> preferenceList = preferenceLists.get(partition);
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 2, "invalid preference list for " + partition);
firstNodes.add(preferenceList.get(0));
}
Assert.assertEquals(firstNodes.size(), 2, "masters not evenly distributed");
// set a mapping corresponding to a valid mapping for 2 nodes, add a third node, check that the
// new node is never the most preferred
allNodes.add(NODES[2]);
liveNodes.add(NODES[2]);
stateCount = STATE_MODEL.getStateCountMap(liveNodes.size(), REPLICA_COUNT);
// recall that the other two partitions are [MASTER, SLAVE], which is fine, just reorder one
currentMapping.get(PARTITIONS[1]).put(NODES[0], "SLAVE");
currentMapping.get(PARTITIONS[1]).put(NODES[1], "MASTER");
znRecord =
new AutoRebalanceStrategy(RESOURCE_NAME, partitions, stateCount)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
preferenceLists = znRecord.getListFields();
boolean newNodeUsed = false;
for (String partition : currentMapping.keySet()) {
List<String> preferenceList = preferenceLists.get(partition);
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 2, "invalid preference list for " + partition);
if (preferenceList.contains(NODES[2])) {
newNodeUsed = true;
Assert.assertEquals(preferenceList.get(1), NODES[2],
"newly added node not at preference list tail for " + partition);
}
}
Assert.assertTrue(newNodeUsed, "not using " + NODES[2]);
// now remap this to take the new node into account, should go back to balancing masters, slaves
// evenly across all nodes
for (String partition : PARTITIONS) {
currentMapping.get(partition).clear();
}
currentMapping.get(PARTITIONS[0]).put(NODES[0], "MASTER");
currentMapping.get(PARTITIONS[0]).put(NODES[1], "SLAVE");
currentMapping.get(PARTITIONS[1]).put(NODES[1], "MASTER");
currentMapping.get(PARTITIONS[1]).put(NODES[2], "SLAVE");
currentMapping.get(PARTITIONS[2]).put(NODES[0], "MASTER");
currentMapping.get(PARTITIONS[2]).put(NODES[2], "SLAVE");
znRecord =
new AutoRebalanceStrategy(RESOURCE_NAME, partitions, stateCount)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
preferenceLists = znRecord.getListFields();
firstNodes.clear();
Set<String> secondNodes = Sets.newHashSet();
for (String partition : currentMapping.keySet()) {
List<String> preferenceList = preferenceLists.get(partition);
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 2, "invalid preference list for " + partition);
firstNodes.add(preferenceList.get(0));
secondNodes.add(preferenceList.get(1));
}
Assert.assertEquals(firstNodes.size(), 3, "masters not distributed evenly");
Assert.assertEquals(secondNodes.size(), 3, "slaves not distributed evenly");
// remove a node now, but use the current mapping with everything balanced just prior
liveNodes.remove(0);
stateCount = STATE_MODEL.getStateCountMap(liveNodes.size(), REPLICA_COUNT);
// remove all references of n0 from the mapping, keep everything else in a legal state
for (String partition : PARTITIONS) {
currentMapping.get(partition).clear();
}
currentMapping.get(PARTITIONS[0]).put(NODES[1], "MASTER");
currentMapping.get(PARTITIONS[1]).put(NODES[1], "MASTER");
currentMapping.get(PARTITIONS[1]).put(NODES[2], "SLAVE");
currentMapping.get(PARTITIONS[2]).put(NODES[2], "MASTER");
znRecord =
new AutoRebalanceStrategy(RESOURCE_NAME, partitions, stateCount)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
preferenceLists = znRecord.getListFields();
for (String partition : currentMapping.keySet()) {
List<String> preferenceList = preferenceLists.get(partition);
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 2, "invalid preference list for " + partition);
Map<String, String> stateMap = currentMapping.get(partition);
for (String participant : stateMap.keySet()) {
Assert.assertTrue(preferenceList.contains(participant), "minimal movement violated for "
+ partition);
}
for (String participant : preferenceList) {
if (!stateMap.containsKey(participant)) {
Assert.assertNotSame(preferenceList.get(0), participant,
"newly moved replica should not be master for " + partition);
}
}
}
// finally, adjust the current mapping to reflect 2 nodes and make sure everything's even again
for (String partition : PARTITIONS) {
currentMapping.get(partition).clear();
}
currentMapping.get(PARTITIONS[0]).put(NODES[1], "MASTER");
currentMapping.get(PARTITIONS[0]).put(NODES[2], "SLAVE");
currentMapping.get(PARTITIONS[1]).put(NODES[1], "SLAVE");
currentMapping.get(PARTITIONS[1]).put(NODES[2], "MASTER");
currentMapping.get(PARTITIONS[2]).put(NODES[1], "SLAVE");
currentMapping.get(PARTITIONS[2]).put(NODES[2], "MASTER");
znRecord =
new AutoRebalanceStrategy(RESOURCE_NAME, partitions, stateCount)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
preferenceLists = znRecord.getListFields();
firstNodes.clear();
for (String partition : currentMapping.keySet()) {
List<String> preferenceList = preferenceLists.get(partition);
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 2, "invalid preference list for " + partition);
firstNodes.add(preferenceList.get(0));
}
Assert.assertEquals(firstNodes.size(), 2, "masters not evenly distributed");
}
@Test public void test() {
int nPartitions = 16;
final String resourceName = "something";
final List<String> instanceNames =
Arrays.asList("node-1", "node-2", "node-3", "node-4"); // Initialize to 4 unique strings
final int nReplicas = 3;
List<String> partitions = new ArrayList<String>(nPartitions);
for (int i = 0; i < nPartitions; i++) {
partitions.add(Integer.toString(i));
}
LinkedHashMap<String, Integer> states = new LinkedHashMap<String, Integer>(2);
states.put("OFFLINE", 0);
states.put("ONLINE", nReplicas);
AutoRebalanceStrategy strategy = new AutoRebalanceStrategy(resourceName, partitions, states);
ZNRecord znRecord = strategy.computePartitionAssignment(instanceNames, instanceNames,
new HashMap<String, Map<String, String>>(0), null);
for (List p : znRecord.getListFields().values()) {
Assert.assertEquals(p.size(), nReplicas);
}
}
/**
* Tests the following scenario: there is only a single partition for a resource. Two nodes up,
* partition should
* be assigned to one of them. Take down that node, partition should move. Bring back up that
* node, partition should not move unnecessarily.
*/
@Test
public void testWontMoveSinglePartitionUnnecessarily() {
final String RESOURCE = "resource";
final String partition = "resource_0";
final StateModelDefinition STATE_MODEL =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline());
LinkedHashMap<String, Integer> stateCount = Maps.newLinkedHashMap();
stateCount.put("ONLINE", 1);
final String[] NODES = {"n0", "n1"};
// initial state, one node, no mapping
List<String> allNodes = Lists.newArrayList(NODES);
List<String> liveNodes = Lists.newArrayList(NODES);
Map<String, Map<String, String>> currentMapping = Maps.newHashMap();
currentMapping.put(partition, new HashMap<String, String>());
// Both nodes there
List<String> partitions = Lists.newArrayList(partition);
Map<String, String> upperBounds = Maps.newHashMap();
for (String state : STATE_MODEL.getStatesPriorityList()) {
upperBounds.put(state, STATE_MODEL.getNumInstancesPerState(state));
}
ZNRecord znRecord =
new AutoRebalanceStrategy(RESOURCE, partitions, stateCount, Integer.MAX_VALUE)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
Map<String, List<String>> preferenceLists = znRecord.getListFields();
List<String> preferenceList = preferenceLists.get(partition.toString());
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 1, "invalid preference list for " + partition);
String state = znRecord.getMapField(partition.toString()).get(preferenceList.get(0));
Assert.assertEquals(state, "ONLINE", "Invalid state for " + partition);
String preferredNode = preferenceList.get(0);
String otherNode = preferredNode.equals(NODES[0]) ? NODES[1] : NODES[0];
// ok, see what happens if we've got the partition on the other node (e.g. due to the preferred
// node being down).
currentMapping.get(partition).put(otherNode, state);
znRecord =
new AutoRebalanceStrategy(RESOURCE, partitions, stateCount, Integer.MAX_VALUE)
.computePartitionAssignment(allNodes, liveNodes, currentMapping, null);
preferenceLists = znRecord.getListFields();
preferenceList = preferenceLists.get(partition.toString());
Assert.assertNotNull(preferenceList, "invalid preference list for " + partition);
Assert.assertEquals(preferenceList.size(), 1, "invalid preference list for " + partition);
state = znRecord.getMapField(partition.toString()).get(preferenceList.get(0));
Assert.assertEquals(state, "ONLINE", "Invalid state for " + partition);
String finalPreferredNode = preferenceList.get(0);
// finally, make sure we haven't moved it.
Assert.assertEquals(finalPreferredNode, otherNode);
}
}
| 9,751 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/TestAutoRebalanceStrategyImbalanceAssignment.java
|
package org.apache.helix.controller.rebalancer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.strategy.AutoRebalanceStrategy;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestAutoRebalanceStrategyImbalanceAssignment {
private static final String resourceName = "ImbalanceResource";
@Test
public void testImbalanceAssignments() {
final int nReplicas = 5;
final int nPartitions = 20;
final int nNode = 10;
// Test all the combination of partitions, replicas and nodes
for (int i = nPartitions; i > 0; i--) {
for (int j = nReplicas; j > 0; j--) {
for (int k = nNode; k > 0; k--) {
if (k >= j) {
testAssignment(i, j, k);
}
}
}
}
}
private void testAssignment(int nPartitions, int nReplicas, int nNode) {
final List<String> instanceNames = new ArrayList<>();
for (int i = 0; i < nNode; i++) {
instanceNames.add("localhost_" + i);
}
List<String> partitions = new ArrayList<>(nPartitions);
for (int i = 0; i < nPartitions; i++) {
partitions.add(Integer.toString(i));
}
LinkedHashMap<String, Integer> states = new LinkedHashMap<>(2);
states.put("OFFLINE", 0);
states.put("ONLINE", nReplicas);
AutoRebalanceStrategy strategy = new AutoRebalanceStrategy(resourceName, partitions, states);
ZNRecord record = strategy.computePartitionAssignment(instanceNames, instanceNames,
new HashMap<String, Map<String, String>>(0), new ResourceControllerDataProvider());
for (Map<String, String> stateMapping : record.getMapFields().values()) {
Assert.assertEquals(stateMapping.size(), nReplicas);
}
}
}
| 9,752 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/TestAbstractRebalancer.java
|
package org.apache.helix.controller.rebalancer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.helix.controller.rebalancer.constraint.MonitoredAbnormalResolver;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Partition;
import org.apache.helix.util.TestInputLoader;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class TestAbstractRebalancer {
@Test(dataProvider = "TestComputeBestPossibleStateInput")
public void testComputeBestPossibleState(String comment, String stateModelName, List<String> liveInstances,
List<String> preferenceList, Map<String, String> currentStateMap, List<String> disabledInstancesForPartition,
Map<String, String> expectedBestPossibleMap) {
System.out.println("Test case comment: " + comment);
AutoRebalancer rebalancer = new AutoRebalancer();
Partition partition = new Partition("testPartition");
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (String instance : currentStateMap.keySet()) {
currentStateOutput
.setCurrentState("test", partition, instance, currentStateMap.get(instance));
}
Map<String, String> bestPossibleMap = rebalancer
.computeBestPossibleStateForPartition(new HashSet<>(liveInstances),
BuiltInStateModelDefinitions.valueOf(stateModelName).getStateModelDefinition(),
preferenceList, currentStateOutput, new HashSet<>(disabledInstancesForPartition),
new IdealState("test"), new ClusterConfig("TestCluster"), partition,
MonitoredAbnormalResolver.DUMMY_STATE_RESOLVER);
Assert.assertTrue(bestPossibleMap.equals(expectedBestPossibleMap));
}
@DataProvider(name = "TestComputeBestPossibleStateInput")
public Object[][] loadTestComputeBestPossibleStateInput() {
final String[] params = {"comment", "stateModel", "liveInstances", "preferenceList", "currentStateMap",
"disabledInstancesForPartition", "expectedBestPossibleStateMap"};
return TestInputLoader.loadTestInputs("TestAbstractRebalancer.ComputeBestPossibleState.json", params);
}
}
| 9,753 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/TestConstraintRebalanceStrategy.java
|
package org.apache.helix.controller.rebalancer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.helix.HelixException;
import org.apache.helix.api.rebalancer.constraint.AbstractRebalanceHardConstraint;
import org.apache.helix.api.rebalancer.constraint.AbstractRebalanceSoftConstraint;
import org.apache.helix.api.rebalancer.constraint.dataprovider.CapacityProvider;
import org.apache.helix.api.rebalancer.constraint.dataprovider.PartitionWeightProvider;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.constraint.PartitionWeightAwareEvennessConstraint;
import org.apache.helix.controller.rebalancer.constraint.TotalCapacityConstraint;
import org.apache.helix.controller.rebalancer.constraint.dataprovider.MockCapacityProvider;
import org.apache.helix.controller.rebalancer.constraint.dataprovider.MockPartitionWeightProvider;
import org.apache.helix.controller.rebalancer.strategy.ConstraintRebalanceStrategy;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestConstraintRebalanceStrategy {
private static Logger _logger = LoggerFactory.getLogger(TestConstraintRebalanceStrategy.class);
final String resourceNamePrefix = "resource";
final int nParticipants = 40;
final int nResources = 20;
final int nPartitions = 100;
final int nReplicas = 3;
final int defaultCapacity = 6000; // total = 6000*40 = 240000
final int resourceWeight = 10; // total = 20*100*3*10 = 60000
final String topState = "ONLINE";
final List<String> resourceNames = new ArrayList<>();
final List<String> instanceNames = new ArrayList<>();
final List<String> partitions = new ArrayList<>(nPartitions);
final ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
final LinkedHashMap<String, Integer> states = new LinkedHashMap<>(2);
@BeforeClass
public void beforeClass() {
for (int i = 0; i < nResources; i++) {
resourceNames.add(resourceNamePrefix + i);
}
for (int i = 0; i < nParticipants; i++) {
instanceNames.add("node" + i);
}
for (int i = 0; i < nPartitions; i++) {
partitions.add(Integer.toString(i));
}
setupMockCluster();
}
private void setupMockCluster() {
List<LiveInstance> liveInstanceList = new ArrayList<>();
Map<String, InstanceConfig> instanceConfigs = new HashMap<>();
for (String instance : instanceNames) {
LiveInstance liveInstance = new LiveInstance(instance);
liveInstanceList.add(liveInstance);
InstanceConfig config = new InstanceConfig(instance);
instanceConfigs.put(instance, config);
}
cache.setLiveInstances(liveInstanceList);
cache.setInstanceConfigMap(instanceConfigs);
ClusterConfig clusterConfig = new ClusterConfig("test");
clusterConfig.setTopologyAwareEnabled(false);
cache.setClusterConfig(clusterConfig);
states.put("OFFLINE", 0);
states.put(topState, nReplicas);
}
private Map<String, Map<String, Map<String, String>>> calculateAssignment(
List<AbstractRebalanceHardConstraint> hardConstraints,
List<AbstractRebalanceSoftConstraint> softConstraints) {
Map<String, Map<String, Map<String, String>>> result = new HashMap<>();
ConstraintRebalanceStrategy strategy =
new ConstraintRebalanceStrategy(hardConstraints, softConstraints);
for (String resourceName : resourceNames) {
Map<String, Map<String, String>> partitionMap = new HashMap<>();
strategy.init(resourceName, partitions, states, Integer.MAX_VALUE);
partitionMap.putAll(strategy.computePartitionAssignment(instanceNames, instanceNames,
new HashMap<String, Map<String, String>>(), cache).getMapFields());
result.put(resourceName, partitionMap);
}
return result;
}
private Map<String, Integer> checkPartitionUsage(
Map<String, Map<String, Map<String, String>>> assignment,
PartitionWeightProvider weightProvider) {
Map<String, Integer> weightCount = new HashMap<>();
for (String resource : assignment.keySet()) {
Map<String, Map<String, String>> partitionMap = assignment.get(resource);
for (String partition : partitionMap.keySet()) {
// check states
Map<String, Integer> stateCount = new HashMap<>(states);
Map<String, String> stateMap = partitionMap.get(partition);
for (String state : stateMap.values()) {
Assert.assertTrue(stateCount.containsKey(state));
stateCount.put(state, stateCount.get(state) - 1);
}
for (int count : stateCount.values()) {
Assert.assertEquals(count, 0);
}
// report weight
int partitionWeight = weightProvider.getPartitionWeight(resource, partition);
for (String instance : partitionMap.get(partition).keySet()) {
if (!weightCount.containsKey(instance)) {
weightCount.put(instance, partitionWeight);
} else {
weightCount.put(instance, weightCount.get(instance) + partitionWeight);
}
}
}
}
return weightCount;
}
@Test
public void testEvenness() {
// capacity / weight
Map<String, Integer> capacity = new HashMap<>();
for (String instance : instanceNames) {
capacity.put(instance, defaultCapacity);
}
PartitionWeightProvider weightProvider = new MockPartitionWeightProvider(resourceWeight);
CapacityProvider capacityProvider = new MockCapacityProvider(capacity, 0);
TotalCapacityConstraint capacityConstraint =
new TotalCapacityConstraint(weightProvider, capacityProvider);
PartitionWeightAwareEvennessConstraint evenConstraint =
new PartitionWeightAwareEvennessConstraint(weightProvider, capacityProvider);
Map<String, Map<String, Map<String, String>>> assignment = calculateAssignment(
Collections.<AbstractRebalanceHardConstraint>singletonList(capacityConstraint),
Collections.<AbstractRebalanceSoftConstraint>singletonList(evenConstraint));
Map<String, Integer> weightCount = checkPartitionUsage(assignment, weightProvider);
int max = Collections.max(weightCount.values());
int min = Collections.min(weightCount.values());
// Since the accuracy of Default evenness constraint is 0.01, diff should be 1/100 of participant capacity in max.
Assert.assertTrue((max - min) <= defaultCapacity / 100);
}
@Test
public void testEvennessByDefaultConstraint() {
Map<String, Map<String, Map<String, String>>> result = new HashMap<>();
ConstraintRebalanceStrategy strategy = new ConstraintRebalanceStrategy();
for (String resourceName : resourceNames) {
Map<String, Map<String, String>> partitionMap = new HashMap<>();
strategy.init(resourceName, partitions, states, Integer.MAX_VALUE);
partitionMap.putAll(strategy.computePartitionAssignment(instanceNames, instanceNames,
new HashMap<String, Map<String, String>>(), cache).getMapFields());
result.put(resourceName, partitionMap);
}
Map<String, Integer> weightCount = checkPartitionUsage(result, new PartitionWeightProvider() {
@Override
public int getPartitionWeight(String resource, String partition) {
return 1;
}
});
int max = Collections.max(weightCount.values());
int min = Collections.min(weightCount.values());
// Since the accuracy of Default evenness constraint is 0.01, diff should be 1/100 of participant capacity in max.
Assert.assertTrue((max - min) <= defaultCapacity / 100);
}
@Test
public void testCapacityAwareEvenness() {
// capacity / weight
int totalBucket = 0;
Map<String, Integer> capacity = new HashMap<>();
for (int i = 0; i < instanceNames.size(); i++) {
capacity.put(instanceNames.get(i), defaultCapacity * (1 + i % 3));
totalBucket += 1 + i % 3;
}
int partitionWeightGranularity = (int) (resourceWeight * 1.5);
int totalPartitionWeight = 0;
Random ran = new Random(System.currentTimeMillis());
Map<String, Map<String, Integer>> partitionWeightMap = new HashMap<>();
for (String resource : resourceNames) {
Map<String, Integer> weights = new HashMap<>();
for (String partition : partitions) {
int weight = resourceWeight / 2 + ran.nextInt(resourceWeight);
weights.put(partition, weight);
totalPartitionWeight += weight * nReplicas;
}
partitionWeightMap.put(resource, weights);
}
PartitionWeightProvider weightProvider =
new MockPartitionWeightProvider(partitionWeightMap, resourceWeight);
CapacityProvider capacityProvider = new MockCapacityProvider(capacity, 0);
PartitionWeightAwareEvennessConstraint evenConstraint =
new PartitionWeightAwareEvennessConstraint(weightProvider, capacityProvider);
Map<String, Map<String, Map<String, String>>> assignment =
calculateAssignment(Collections.EMPTY_LIST,
Collections.<AbstractRebalanceSoftConstraint>singletonList(evenConstraint));
Map<String, Integer> weightCount = checkPartitionUsage(assignment, weightProvider);
for (int i = 0; i < instanceNames.size(); i++) {
String instanceName = instanceNames.get(i);
int expectedUsage = (int) ((double) totalPartitionWeight) / totalBucket * (1 + i % 3);
int realUsage = weightCount.get(instanceName);
// When have different capacity, calculation in the rebalance algorithm would have more fractions, so lose the restriction to 90% to 110% compared with the ideal value.
Assert.assertTrue((expectedUsage - partitionWeightGranularity) * 0.9 <= realUsage
&& (expectedUsage + partitionWeightGranularity) * 1.1 >= realUsage);
}
}
@Test
public void testHardConstraintFails() {
// capacity / weight
Map<String, Integer> capacity = new HashMap<>();
for (String instance : instanceNames) {
// insufficient capacity
capacity.put(instance, defaultCapacity / 100);
}
PartitionWeightProvider weightProvider = new MockPartitionWeightProvider(resourceWeight);
CapacityProvider capacityProvider = new MockCapacityProvider(capacity, 0);
TotalCapacityConstraint capacityConstraint =
new TotalCapacityConstraint(weightProvider, capacityProvider);
try {
calculateAssignment(
Collections.<AbstractRebalanceHardConstraint>singletonList(capacityConstraint),
Collections.EMPTY_LIST);
Assert.fail("Assignment should fail because of insufficient capacity.");
} catch (HelixException e) {
// expected
}
}
@Test(dependsOnMethods = "testHardConstraintFails")
public void testConflictConstraint() {
// capacity / weight
Map<String, Integer> capacity = new HashMap<>();
for (String instance : instanceNames) {
// insufficient capacity
capacity.put(instance, defaultCapacity);
}
PartitionWeightProvider weightProvider = new MockPartitionWeightProvider(resourceWeight);
CapacityProvider capacityProvider = new MockCapacityProvider(capacity, 0);
TotalCapacityConstraint normalCapacityConstraint =
new TotalCapacityConstraint(weightProvider, capacityProvider);
TotalCapacityConstraint conflictingCapacityConstraint =
new TotalCapacityConstraint(weightProvider,
new MockCapacityProvider(Collections.EMPTY_MAP, 0));
List<AbstractRebalanceHardConstraint> constraints = new ArrayList<>();
constraints.add(normalCapacityConstraint);
constraints.add(conflictingCapacityConstraint);
try {
calculateAssignment(constraints, Collections.EMPTY_LIST);
Assert.fail("Assignment should fail because of the conflicting capacity constraint.");
} catch (HelixException e) {
// expected
}
}
@Test(dependsOnMethods = "testEvenness")
public void testSoftConstraintFails() {
// capacity / weight
Map<String, Integer> capacity = new HashMap<>();
for (String instance : instanceNames) {
// insufficient capacity
capacity.put(instance, defaultCapacity / 50);
}
PartitionWeightProvider weightProvider = new MockPartitionWeightProvider(resourceWeight);
CapacityProvider capacityProvider = new MockCapacityProvider(capacity, 0);
PartitionWeightAwareEvennessConstraint evenConstraint =
new PartitionWeightAwareEvennessConstraint(weightProvider, capacityProvider);
Map<String, Map<String, Map<String, String>>> assignment =
calculateAssignment(Collections.EMPTY_LIST,
Collections.<AbstractRebalanceSoftConstraint>singletonList(evenConstraint));
Map<String, Integer> weightCount = checkPartitionUsage(assignment, weightProvider);
int max = Collections.max(weightCount.values());
int min = Collections.min(weightCount.values());
// Since the accuracy of Default evenness constraint is 0.01, diff should be 1/100 of participant capacity in max.
Assert.assertTrue((max - min) <= defaultCapacity / 100);
}
@Test(dependsOnMethods = "testEvenness")
public void testRebalanceWithPreferredAssignment() {
// capacity / weight
Map<String, Integer> capacity = new HashMap<>();
for (String instance : instanceNames) {
capacity.put(instance, defaultCapacity);
}
PartitionWeightProvider weightProvider = new MockPartitionWeightProvider(resourceWeight);
CapacityProvider capacityProvider = new MockCapacityProvider(capacity, 0);
PartitionWeightAwareEvennessConstraint evenConstraint =
new PartitionWeightAwareEvennessConstraint(weightProvider, capacityProvider);
// inject valid partition assignment for one resources into preferred assignment.
List<String> instances = instanceNames.subList(0, nReplicas);
Map<String, Map<String, String>> preferredPartitionAssignment = new HashMap<>();
Map<String, String> replicaState = new HashMap<>();
for (String instance : instances) {
replicaState.put(instance, topState);
}
preferredPartitionAssignment.put(partitions.get(0), replicaState);
Map<String, Map<String, Map<String, String>>> preferredAssignment = new HashMap<>();
preferredAssignment.put(resourceNames.get(0), preferredPartitionAssignment);
// inject invalid partition assignment for one resources into preferred assignment.
instances = instanceNames.subList(0, nReplicas - 1);
Map<String, String> invalidReplicaState = new HashMap<>();
for (String instance : instances) {
invalidReplicaState.put(instance, topState);
}
preferredPartitionAssignment = new HashMap<>();
preferredPartitionAssignment.put(partitions.get(0), invalidReplicaState);
preferredAssignment.put(resourceNames.get(1), preferredPartitionAssignment);
Map<String, Map<String, Map<String, String>>> assignment = new HashMap<>();
ConstraintRebalanceStrategy strategy = new ConstraintRebalanceStrategy(Collections.EMPTY_LIST,
Collections.<AbstractRebalanceSoftConstraint>singletonList(evenConstraint));
for (String resourceName : resourceNames) {
Map<String, Map<String, String>> partitionMap = new HashMap<>();
strategy.init(resourceName, partitions, states, Integer.MAX_VALUE);
partitionMap.putAll(strategy.computePartitionAssignment(instanceNames, instanceNames,
preferredAssignment.containsKey(resourceName) ?
preferredAssignment.get(resourceName) :
Collections.EMPTY_MAP, cache).getMapFields());
assignment.put(resourceName, partitionMap);
}
// Even with preferred assignment, the weight should still be balance
Map<String, Integer> weightCount = checkPartitionUsage(assignment, weightProvider);
int max = Collections.max(weightCount.values());
int min = Collections.min(weightCount.values());
// Since the accuracy of Default evenness constraint is 0.01, diff should be 1/100 of participant capacity in max.
Assert.assertTrue((max - min) <= defaultCapacity / 100);
// the resource 0 assignment should be kept the same
Collection<String> resource_0_Assignment =
assignment.get(resourceNames.get(0)).get(partitions.get(0)).keySet();
Assert.assertTrue(resource_0_Assignment.containsAll(instanceNames.subList(0, nReplicas))
&& resource_0_Assignment.size() == nReplicas);
// the resource 1 assignment should be set to a valid one
Assert.assertTrue(
assignment.get(resourceNames.get(1)).get(partitions.get(0)).size() == nReplicas);
}
@Test
public void testTopologyAwareAssignment() {
// Topology Aware configuration
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
List<LiveInstance> liveInstanceList = new ArrayList<>();
Map<String, InstanceConfig> instanceConfigs = new HashMap<>();
for (int i = 0; i < instanceNames.size(); i++) {
String instance = instanceNames.get(i);
LiveInstance liveInstance = new LiveInstance(instance);
liveInstanceList.add(liveInstance);
InstanceConfig config = new InstanceConfig(instance);
config.setDomain(String.format("Rack=%s,Host=%s", i % (nParticipants / 5), instance));
instanceConfigs.put(instance, config);
}
cache.setLiveInstances(liveInstanceList);
cache.setInstanceConfigMap(instanceConfigs);
ClusterConfig clusterConfig = new ClusterConfig("test");
clusterConfig.setTopologyAwareEnabled(true);
clusterConfig.setTopology("/Rack/Host");
clusterConfig.setFaultZoneType("Rack");
cache.setClusterConfig(clusterConfig);
Map<String, Map<String, Map<String, String>>> result = new HashMap<>();
ConstraintRebalanceStrategy strategy = new ConstraintRebalanceStrategy();
for (String resourceName : resourceNames) {
Map<String, Map<String, String>> partitionMap = new HashMap<>();
strategy.init(resourceName, partitions, states, Integer.MAX_VALUE);
partitionMap.putAll(strategy.computePartitionAssignment(instanceNames, instanceNames,
new HashMap<String, Map<String, String>>(), cache).getMapFields());
result.put(resourceName, partitionMap);
}
Map<String, Integer> weightCount = checkPartitionUsage(result, new PartitionWeightProvider() {
@Override
public int getPartitionWeight(String resource, String partition) {
return defaultCapacity;
}
});
int max = Collections.max(weightCount.values());
int min = Collections.min(weightCount.values());
Assert.assertTrue((max - min) <= defaultCapacity / 100);
// check for domain assignment
Map<String, Set<String>> domainPartitionMap = new HashMap<>();
for (Map<String, Map<String, String>> partitionMap : result.values()) {
domainPartitionMap.clear();
for (String partition : partitionMap.keySet()) {
for (String instance : partitionMap.get(partition).keySet()) {
String domain = instanceConfigs.get(instance).getDomainAsString().split(",")[0].split("=")[1];
if (domainPartitionMap.containsKey(domain)) {
Assert.assertFalse(domainPartitionMap.get(domain).contains(partition));
} else {
domainPartitionMap.put(domain, new HashSet<String>());
}
domainPartitionMap.get(domain).add(partition);
}
}
}
}
}
| 9,754 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/util/TestResourceUsageCalculator.java
|
package org.apache.helix.controller.rebalancer.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.model.Partition;
import org.apache.helix.model.ResourceAssignment;
import org.apache.helix.util.TestInputLoader;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class TestResourceUsageCalculator {
@Test(dataProvider = "TestMeasureBaselineDivergenceInput")
public void testMeasureBaselineDivergence(Map<String, Map<String, Map<String, String>>> baseline,
Map<String, Map<String, Map<String, String>>> someMatchBestPossible,
Map<String, Map<String, Map<String, String>>> noMatchBestPossible) {
Map<String, ResourceAssignment> baselineAssignment = buildResourceAssignment(baseline);
Map<String, ResourceAssignment> someMatchBestPossibleAssignment =
buildResourceAssignment(someMatchBestPossible);
Map<String, ResourceAssignment> noMatchBestPossibleAssignment =
buildResourceAssignment(noMatchBestPossible);
// Empty best possible assignment.
Assert.assertEquals(ResourceUsageCalculator
.measureBaselineDivergence(baselineAssignment, Collections.emptyMap()), 1.0d);
// Empty baseline assignment.
Assert.assertEquals(ResourceUsageCalculator
.measureBaselineDivergence(Collections.emptyMap(), noMatchBestPossibleAssignment), 1.0d);
Assert.assertEquals(ResourceUsageCalculator
.measureBaselineDivergence(baselineAssignment, noMatchBestPossibleAssignment), 1.0d);
Assert.assertEquals(ResourceUsageCalculator
.measureBaselineDivergence(baselineAssignment, someMatchBestPossibleAssignment),
(1.0d - (double) 1 / (double) 3));
Assert.assertEquals(
ResourceUsageCalculator.measureBaselineDivergence(baselineAssignment, baselineAssignment),
0.0d);
}
@Test
public void testCalculateAveragePartitionWeight() {
Map<String, Map<String, Integer>> partitionCapacityMap = ImmutableMap.of(
"partition1", ImmutableMap.of("capacity1", 20, "capacity2", 40),
"partition2", ImmutableMap.of("capacity1", 30, "capacity2", 50),
"partition3", ImmutableMap.of("capacity1", 16, "capacity2", 30));
Map<String, Integer> averageCapacityWeightMap =
ResourceUsageCalculator.calculateAveragePartitionWeight(partitionCapacityMap);
Map<String, Integer> expectedAverageWeightMap =
ImmutableMap.of("capacity1", 22, "capacity2", 40);
Assert.assertNotNull(averageCapacityWeightMap);
Assert.assertEquals(averageCapacityWeightMap, expectedAverageWeightMap);
}
private Map<String, ResourceAssignment> buildResourceAssignment(
Map<String, Map<String, Map<String, String>>> resourceMap) {
Map<String, ResourceAssignment> assignment = new HashMap<>();
for (Map.Entry<String, Map<String, Map<String, String>>> resourceEntry
: resourceMap.entrySet()) {
ResourceAssignment resource = new ResourceAssignment(resourceEntry.getKey());
Map<String, Map<String, String>> partitionMap = resourceEntry.getValue();
for (Map.Entry<String, Map<String, String>> partitionEntry : partitionMap.entrySet()) {
resource.addReplicaMap(new Partition(partitionEntry.getKey()), partitionEntry.getValue());
}
assignment.put(resourceEntry.getKey(), resource);
}
return assignment;
}
@DataProvider(name = "TestMeasureBaselineDivergenceInput")
private Object[][] loadTestMeasureBaselineDivergenceInput() {
final String[] params =
new String[]{"baseline", "someMatchBestPossible", "noMatchBestPossible"};
return TestInputLoader
.loadTestInputs("TestResourceUsageCalculator.MeasureBaselineDivergence.json", params);
}
}
| 9,755 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/TestWagedInstanceCapacity.java
|
package org.apache.helix.controller.rebalancer.waged;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang3.RandomUtils;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.Resource;
import org.apache.helix.model.ResourceConfig;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
public class TestWagedInstanceCapacity {
private static final int INSTANCE_COUNT = 3;
private static final int RESOURCE_COUNT = 1;
private static final int PARTITION_COUNT = 3;
private static final List<String> CAPACITY_KEYS = Lists.newArrayList("CU", "PARTCOUNT", "DISK");
private static final Map<String, Integer> DEFAULT_INSTANCE_CAPACITY_MAP =
ImmutableMap.of("CU", 100, "PARTCOUNT", 10, "DISK", 100);
private static final Map<String, Integer> DEFAULT_PART_CAPACITY_MAP =
ImmutableMap.of("CU", 40, "PARTCOUNT", 1, "DISK", 1);
private ResourceControllerDataProvider _clusterData;
private Map<String, Resource> _resourceMap;
private CurrentStateOutput _currentStateOutput;
private WagedInstanceCapacity _wagedInstanceCapacity;
@BeforeMethod
public void setUp() {
// prepare cluster data
_clusterData = new ResourceControllerDataProvider();
Map<String, InstanceConfig> instanceConfigMap = generateInstanceCapacityConfigs();
_clusterData.setInstanceConfigMap(instanceConfigMap);
_clusterData.setResourceConfigMap(generateResourcePartitionCapacityConfigs());
_clusterData.setIdealStates(generateIdealStates());
ClusterConfig clusterConfig = new ClusterConfig("test");
clusterConfig.setTopologyAwareEnabled(false);
clusterConfig.setInstanceCapacityKeys(CAPACITY_KEYS);
_clusterData.setClusterConfig(clusterConfig);
// prepare current state output
_resourceMap = generateResourceMap();
_currentStateOutput = populateCurrentStatesForResources(_resourceMap, instanceConfigMap.keySet());
// prepare instance of waged-instance capacity
_wagedInstanceCapacity = new WagedInstanceCapacity(_clusterData);
}
@Test
public void testProcessCurrentState() {
Map<String, Integer> partCapMap = ImmutableMap.of("CU", 10, "PARTCOUNT", 10, "DISK", 100);
Assert.assertTrue(_wagedInstanceCapacity.checkAndReduceInstanceCapacity(
"instance-0", "resource-0", "partition-0", partCapMap));
Map<String, Integer> instanceAvailableCapacity = _wagedInstanceCapacity.getInstanceAvailableCapacity("instance-0");
Assert.assertTrue(instanceAvailableCapacity.get("CU").equals(90));
}
@Test
public void testProcessCurrentStateWithUnableToAssignPart() {
Map<String, Integer> partCapMap = ImmutableMap.of("CU", 110, "PARTCOUNT", 10, "DISK", 100);
Assert.assertFalse(_wagedInstanceCapacity.checkAndReduceInstanceCapacity(
"instance-0", "resource-0", "partition-0", partCapMap));
Map<String, Integer> instanceAvailableCapacity = _wagedInstanceCapacity.getInstanceAvailableCapacity("instance-0");
Assert.assertTrue(instanceAvailableCapacity.get("CU").equals(100));
}
@Test
public void testProcessCurrentStateWithDoubleCharge() {
Map<String, Integer> partCapMap = ImmutableMap.of("CU", 10, "PARTCOUNT", 10, "DISK", 100);
Assert.assertTrue(_wagedInstanceCapacity.checkAndReduceInstanceCapacity(
"instance-0", "resource-0", "partition-0", partCapMap));
// charge again
Assert.assertTrue(_wagedInstanceCapacity.checkAndReduceInstanceCapacity(
"instance-0", "resource-0", "partition-0", partCapMap));
Map<String, Integer> instanceAvailableCapacity = _wagedInstanceCapacity.getInstanceAvailableCapacity("instance-0");
Assert.assertTrue(instanceAvailableCapacity.get("CU").equals(90));
}
// -- static helpers
private Map<String, InstanceConfig> generateInstanceCapacityConfigs() {
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
for (int i = 0; i < INSTANCE_COUNT; i ++) {
String instanceName = "instance-" + i;
InstanceConfig config = new InstanceConfig(instanceName);
config.setInstanceCapacityMap(DEFAULT_INSTANCE_CAPACITY_MAP);
instanceConfigMap.put(instanceName, config);
}
return instanceConfigMap;
}
private Map<String, ResourceConfig> generateResourcePartitionCapacityConfigs() {
Map<String, ResourceConfig> resourceConfigMap = new HashMap<>();
try {
Map<String, Map<String, Integer>> partitionsCapacityMap = new HashMap<>();
partitionsCapacityMap.put("DEFAULT", DEFAULT_PART_CAPACITY_MAP);
for (String resourceName : getResourceNames()) {
ResourceConfig config = new ResourceConfig(resourceName);
config.setPartitionCapacityMap(partitionsCapacityMap);
resourceConfigMap.put(resourceName, config);
}
} catch(IOException e) {
throw new RuntimeException("error while setting partition capacity map");
}
return resourceConfigMap;
}
private List<IdealState> generateIdealStates() {
return getResourceNames().stream()
.map(resourceName -> {
IdealState idealState = new IdealState(resourceName);
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
idealState.setRebalancerClassName(WagedRebalancer.class.getName());
return idealState;
})
.collect(Collectors.toList());
}
private static CurrentStateOutput populateCurrentStatesForResources(
Map<String, Resource> resourceMap, Set<String> instanceNames) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
resourceMap.forEach((resourceName, resource) ->
resource.getPartitions().forEach(partition -> {
int masterPartIdx = RandomUtils.nextInt(0, instanceNames.size());
int idx = 0;
for (Iterator<String> it = instanceNames.iterator(); it.hasNext(); idx ++) {
currentStateOutput.setCurrentState(
resourceName, partition, it.next(), (idx == masterPartIdx) ? "MASTER" : "SLAVE");
}
}));
return currentStateOutput;
}
private static Map<String, Resource> generateResourceMap() {
return getResourceNames().stream()
.map(resourceName -> {
Resource resource = new Resource(resourceName);
IntStream.range(0, PARTITION_COUNT)
.mapToObj(i -> "partition-" + i)
.forEach(resource::addPartition);
return resource;
})
.collect(Collectors.toMap(Resource::getResourceName, Function.identity()));
}
private static List<String> getResourceNames() {
return IntStream.range(0, RESOURCE_COUNT)
.mapToObj(i -> "resource-" + i)
.collect(Collectors.toList());
}
}
| 9,756 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/TestWagedRebalancerMetrics.java
|
package org.apache.helix.controller.rebalancer.waged;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
import javax.management.AttributeNotFoundException;
import javax.management.JMException;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import org.apache.helix.HelixConstants;
import org.apache.helix.HelixRebalanceException;
import org.apache.helix.TestHelper;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.controller.rebalancer.waged.constraints.MockRebalanceAlgorithm;
import org.apache.helix.controller.rebalancer.waged.model.AbstractTestClusterModel;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.ClusterEvent;
import org.apache.helix.controller.stages.ClusterEventType;
import org.apache.helix.controller.stages.CurrentStateComputationStage;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.controller.stages.ReadClusterDataStage;
import org.apache.helix.mock.MockManager;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Resource;
import org.apache.helix.monitoring.mbeans.ClusterStatusMonitor;
import org.apache.helix.monitoring.mbeans.InstanceMonitor;
import org.apache.helix.monitoring.metrics.MetricCollector;
import org.apache.helix.monitoring.metrics.WagedRebalancerMetricCollector;
import org.apache.helix.monitoring.metrics.model.CountMetric;
import org.apache.helix.monitoring.metrics.model.RatioMetric;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.when;
public class TestWagedRebalancerMetrics extends AbstractTestClusterModel {
private static final String TEST_STRING = "TEST";
private MetricCollector _metricCollector;
private Set<String> _instances;
private MockRebalanceAlgorithm _algorithm;
private MockAssignmentMetadataStore _metadataStore;
@BeforeClass
public void initialize() {
super.initialize();
_instances = new HashSet<>();
_instances.add(_testInstanceId);
_algorithm = new MockRebalanceAlgorithm();
// Initialize a mock assignment metadata store
_metadataStore = new MockAssignmentMetadataStore();
}
@Test
public void testMetricValuePropagation()
throws JMException, HelixRebalanceException, IOException {
_metadataStore.reset();
_metricCollector = new WagedRebalancerMetricCollector(TEST_STRING);
WagedRebalancer rebalancer =
new WagedRebalancer(_metadataStore, _algorithm, Optional.of(_metricCollector));
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey(), entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().stream()
.forEach(partition -> resource.addPartition(partition));
return resource;
}));
Map<String, IdealState> newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
// Check that there exists a non-zero value in the metrics
Assert.assertTrue(_metricCollector.getMetricMap().values().stream()
.anyMatch(metric -> (long) metric.getLastEmittedMetricValue() > 0L));
}
@Test
public void testWagedRebalanceMetrics()
throws Exception {
_metadataStore.reset();
MetricCollector metricCollector = new WagedRebalancerMetricCollector(TEST_STRING);
WagedRebalancer rebalancer =
new WagedRebalancer(_metadataStore, _algorithm, Optional.of(metricCollector));
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey(), entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().stream()
.forEach(partition -> resource.addPartition(partition));
return resource;
}));
Assert.assertEquals((long) metricCollector.getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.GlobalBaselineCalcCounter.name(),
CountMetric.class).getLastEmittedMetricValue(), 0L);
Assert.assertEquals((long) metricCollector.getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.PartialRebalanceCounter.name(),
CountMetric.class).getLastEmittedMetricValue(), 0L);
Assert.assertEquals((double) metricCollector.getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.BaselineDivergenceGauge.name(),
RatioMetric.class).getLastEmittedMetricValue(), 0.0d);
// Cluster config change will trigger baseline recalculation and partial rebalance.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
// Add a field to the cluster config so the cluster config will be marked as changed in the change detector.
clusterData.getClusterConfig().getRecord().setSimpleField("foo", "bar");
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Assert.assertEquals((long) metricCollector.getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.GlobalBaselineCalcCounter.name(),
CountMetric.class).getLastEmittedMetricValue(), 1L);
Assert.assertEquals((long) metricCollector.getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.PartialRebalanceCounter.name(),
CountMetric.class).getLastEmittedMetricValue(), 1L);
// Wait for asyncReportBaselineDivergenceGauge to complete and verify.
Assert.assertTrue(TestHelper.verify(() -> (double) metricCollector.getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.BaselineDivergenceGauge.name(),
RatioMetric.class).getLastEmittedMetricValue() == 0.0d, TestHelper.WAIT_DURATION));
}
/*
* Integration test for WAGED instance capacity metrics.
*/
@Test
public void testInstanceCapacityMetrics() throws Exception {
final String clusterName = TestHelper.getTestMethodName();
final ClusterStatusMonitor monitor = new ClusterStatusMonitor(clusterName);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ResourceControllerDataProvider cache = setupClusterDataCache();
Map<String, Resource> resourceMap = cache.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
event.addAttribute(AttributeName.helixmanager.name(), new MockManager());
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.clusterStatusMonitor.name(), monitor);
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ReadClusterDataStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.handle(event);
final MBeanServerConnection mBeanServer = ManagementFactory.getPlatformMBeanServer();
for (String instance : _instances) {
String instanceBeanName = String.format("%s=%s,instanceName=%s",
ClusterStatusMonitor.CLUSTER_DN_KEY, clusterName, instance);
ObjectName instanceObjectName = monitor.getObjectName(instanceBeanName);
Assert.assertTrue(TestHelper
.verify(() -> mBeanServer.isRegistered(instanceObjectName),
TestHelper.WAIT_DURATION));
// Verify capacity gauge metrics
for (Map.Entry<String, Integer> capacityEntry : _capacityDataMap.entrySet()) {
String capacityKey = capacityEntry.getKey();
String attributeName = capacityKey + "Gauge";
Assert.assertTrue(TestHelper.verify(() -> {
try {
return (long) mBeanServer.getAttribute(instanceObjectName, attributeName)
== _capacityDataMap.get(capacityKey);
} catch (AttributeNotFoundException e) {
return false;
}
}, TestHelper.WAIT_DURATION), "Instance capacity gauge metric is not found or incorrect!");
Assert.assertEquals((long) mBeanServer.getAttribute(instanceObjectName, attributeName),
(long) _capacityDataMap.get(capacityKey));
}
// Verify MaxCapacityUsageGauge
Assert.assertTrue(TestHelper.verify(() -> {
try {
double actualMaxUsage = (double) mBeanServer.getAttribute(instanceObjectName,
InstanceMonitor.InstanceMonitorMetric.MAX_CAPACITY_USAGE_GAUGE.metricName());
// The values are manually calculated from the capacity configs, to make the code simple.
double expectedMaxUsage = instance.equals(_testInstanceId) ? 0.4 : 0.0;
return Math.abs(actualMaxUsage - expectedMaxUsage) < 0.000001d;
} catch (AttributeNotFoundException e) {
return false;
}
}, TestHelper.WAIT_DURATION), "MaxCapacityUsageGauge is not found or incorrect");
}
}
@Override
protected ResourceControllerDataProvider setupClusterDataCache() throws IOException {
ResourceControllerDataProvider testCache = super.setupClusterDataCache();
// Set up mock idealstate
Map<String, IdealState> isMap = new HashMap<>();
for (String resource : _resourceNames) {
IdealState is = new IdealState(resource);
is.setNumPartitions(_partitionNames.size());
is.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
is.setStateModelDefRef("MasterSlave");
is.setReplicas("100");
is.setRebalancerClassName(WagedRebalancer.class.getName());
_partitionNames.stream()
.forEach(partition -> is.setPreferenceList(partition, Collections.emptyList()));
isMap.put(resource, is);
}
when(testCache.getIdealState(anyString())).thenAnswer(
(Answer<IdealState>) invocationOnMock -> isMap.get(invocationOnMock.getArguments()[0]));
when(testCache.getIdealStates()).thenReturn(isMap);
when(testCache.getAsyncTasksThreadPool()).thenReturn(Executors.newSingleThreadExecutor());
// Set up 2 more instances
for (int i = 1; i < 3; i++) {
String instanceName = _testInstanceId + i;
_instances.add(instanceName);
// 1. Set up the default instance information with capacity configuration.
InstanceConfig testInstanceConfig = createMockInstanceConfig(instanceName);
Map<String, InstanceConfig> instanceConfigMap = testCache.getInstanceConfigMap();
instanceConfigMap.put(instanceName, testInstanceConfig);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
// 2. Mock the live instance node for the default instance.
LiveInstance testLiveInstance = createMockLiveInstance(instanceName);
Map<String, LiveInstance> liveInstanceMap = testCache.getLiveInstances();
liveInstanceMap.put(instanceName, testLiveInstance);
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
when(testCache.getEnabledInstances()).thenReturn(liveInstanceMap.keySet());
when(testCache.getEnabledLiveInstances()).thenReturn(liveInstanceMap.keySet());
when(testCache.getAllInstances()).thenReturn(_instances);
}
return testCache;
}
}
| 9,757 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/TestWagedRebalancer.java
|
package org.apache.helix.controller.rebalancer.waged;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.helix.HelixConstants;
import org.apache.helix.HelixRebalanceException;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.strategy.CrushRebalanceStrategy;
import org.apache.helix.controller.rebalancer.waged.constraints.MockRebalanceAlgorithm;
import org.apache.helix.controller.rebalancer.waged.model.AbstractTestClusterModel;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterModel;
import org.apache.helix.controller.rebalancer.waged.model.OptimalAssignment;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.model.ResourceAssignment;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.monitoring.metrics.WagedRebalancerMetricCollector;
import org.apache.helix.monitoring.metrics.model.CountMetric;
import org.apache.helix.monitoring.metrics.model.LatencyMetric;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestWagedRebalancer extends AbstractTestClusterModel {
private MockRebalanceAlgorithm _algorithm;
private MockAssignmentMetadataStore _metadataStore;
@BeforeClass
public void initialize() {
super.initialize();
_algorithm = new MockRebalanceAlgorithm();
// Initialize a mock assignment metadata store
_metadataStore = new MockAssignmentMetadataStore();
}
@Override
protected ResourceControllerDataProvider setupClusterDataCache() throws IOException {
ResourceControllerDataProvider testCache = super.setupClusterDataCache();
// Set up mock idealstate
Map<String, IdealState> isMap = new HashMap<>();
for (String resource : _resourceNames) {
IdealState is = new IdealState(resource);
is.setNumPartitions(_partitionNames.size());
is.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
is.setStateModelDefRef("MasterSlave");
is.setReplicas("3");
is.setRebalancerClassName(WagedRebalancer.class.getName());
_partitionNames
.forEach(partition -> is.setPreferenceList(partition, Collections.emptyList()));
isMap.put(resource, is);
}
when(testCache.getIdealState(anyString())).thenAnswer(
(Answer<IdealState>) invocationOnMock -> isMap.get(invocationOnMock.getArguments()[0]));
when(testCache.getIdealStates()).thenReturn(isMap);
// Set up 2 more instances
for (int i = 1; i < 3; i++) {
String instanceName = _testInstanceId + i;
_instances.add(instanceName);
// 1. Set up the default instance information with capacity configuration.
InstanceConfig testInstanceConfig = createMockInstanceConfig(instanceName);
Map<String, InstanceConfig> instanceConfigMap = testCache.getInstanceConfigMap();
instanceConfigMap.put(instanceName, testInstanceConfig);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
// 2. Mock the live instance node for the default instance.
LiveInstance testLiveInstance = createMockLiveInstance(instanceName);
Map<String, LiveInstance> liveInstanceMap = testCache.getLiveInstances();
liveInstanceMap.put(instanceName, testLiveInstance);
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
when(testCache.getEnabledInstances()).thenReturn(liveInstanceMap.keySet());
when(testCache.getEnabledLiveInstances()).thenReturn(liveInstanceMap.keySet());
when(testCache.getAllInstances()).thenReturn(_instances);
}
return testCache;
}
@Test
public void testRebalance() throws IOException, HelixRebalanceException {
_metadataStore.reset();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
// Mocking the change types for triggering a baseline rebalance.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
when(clusterData.checkAndReduceCapacity(Mockito.any(), Mockito.any(),
Mockito.any())).thenReturn(true);
Map<String, IdealState> newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Map<String, ResourceAssignment> algorithmResult = _algorithm.getRebalanceResult();
// Since there is no special condition, the calculated IdealStates should be exactly the same
// as the mock algorithm result.
validateRebalanceResult(resourceMap, newIdealStates, algorithmResult);
Assert.assertFalse(_metadataStore.getBaseline().isEmpty());
Assert.assertFalse(_metadataStore.getBestPossibleAssignment().isEmpty());
// Calculate with empty resource list. The rebalancer shall clean up all the assignment status.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.IDEAL_STATE));
clusterData.getIdealStates().clear();
newIdealStates = rebalancer
.computeNewIdealStates(clusterData, Collections.emptyMap(), new CurrentStateOutput());
Assert.assertTrue(newIdealStates.isEmpty());
Assert.assertTrue(_metadataStore.getBaseline().isEmpty());
Assert.assertTrue(_metadataStore.getBestPossibleAssignment().isEmpty());
}
@Test(dependsOnMethods = "testRebalance")
public void testPartialRebalance() throws IOException, HelixRebalanceException {
_metadataStore.reset();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
// Mocking the change types for triggering a baseline rebalance.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
// Test with partial resources listed in the resourceMap input.
// Remove the first resource from the input. Note it still exists in the cluster data cache.
_metadataStore.reset();
resourceMap.remove(_resourceNames.get(0));
Map<String, IdealState> newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Map<String, ResourceAssignment> algorithmResult = _algorithm.getRebalanceResult();
validateRebalanceResult(resourceMap, newIdealStates, algorithmResult);
}
@Test(dependsOnMethods = "testRebalance")
public void testRebalanceWithCurrentState() throws IOException, HelixRebalanceException {
_metadataStore.reset();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
// Mocking the change types for triggering a baseline rebalance.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
// Test with current state exists, so the rebalancer should calculate for the intermediate state
// Create current state based on the cluster data cache.
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (String instanceName : _instances) {
for (Map.Entry<String, CurrentState> csEntry : clusterData
.getCurrentState(instanceName, _sessionId).entrySet()) {
String resourceName = csEntry.getKey();
CurrentState cs = csEntry.getValue();
for (Map.Entry<String, String> partitionStateEntry : cs.getPartitionStateMap().entrySet()) {
currentStateOutput.setCurrentState(resourceName,
new Partition(partitionStateEntry.getKey()), instanceName,
partitionStateEntry.getValue());
}
}
}
// The state calculation will be adjusted based on the current state.
// So test the following cases:
// 1.1. Disable a resource, and the partitions in CS will be offline.
String disabledResourceName = _resourceNames.get(0);
clusterData.getIdealState(disabledResourceName).enable(false);
// 1.2. Adding more unknown partitions to the CS, so they will be dropped.
String droppingResourceName = _resourceNames.get(1);
String droppingPartitionName = "UnknownPartition";
String droppingFromInstance = _testInstanceId;
currentStateOutput.setCurrentState(droppingResourceName, new Partition(droppingPartitionName),
droppingFromInstance, "SLAVE");
resourceMap.get(droppingResourceName).addPartition(droppingPartitionName);
Map<String, IdealState> newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, currentStateOutput);
// All the replica state should be OFFLINE
IdealState disabledIdealState = newIdealStates.get(disabledResourceName);
for (String partition : disabledIdealState.getPartitionSet()) {
Assert.assertTrue(disabledIdealState.getInstanceStateMap(partition).values().stream()
.allMatch(state -> state.equals("OFFLINE")));
}
// the dropped partition should be dropped.
IdealState droppedIdealState = newIdealStates.get(droppingResourceName);
Assert.assertEquals(
droppedIdealState.getInstanceStateMap(droppingPartitionName).get(droppingFromInstance),
"DROPPED");
}
@Test(dependsOnMethods = "testRebalance")
public void testPartialBaselineAvailability() throws IOException, HelixRebalanceException {
Map<String, ResourceAssignment> testResourceAssignmentMap = new HashMap<>();
ZNRecord mappingNode = new ZNRecord(_resourceNames.get(0));
HashMap<String, String> mapping = new HashMap<>();
mapping.put(_testInstanceId, "MASTER");
mappingNode.setMapField(_partitionNames.get(0), mapping);
testResourceAssignmentMap.put(_resourceNames.get(0), new ResourceAssignment(mappingNode));
_metadataStore.reset();
_metadataStore.persistBaseline(testResourceAssignmentMap);
_metadataStore.persistBestPossibleAssignment(testResourceAssignmentMap);
// Test algorithm that passes along the best possible assignment
RebalanceAlgorithm algorithm = Mockito.mock(RebalanceAlgorithm.class);
when(algorithm.calculate(any())).thenAnswer(new Answer<OptimalAssignment>() {
@Override
public OptimalAssignment answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
ClusterModel argClusterModel = (ClusterModel) args[0];
OptimalAssignment optimalAssignment = Mockito.mock(OptimalAssignment.class);
when(optimalAssignment.getOptimalResourceAssignment())
.thenReturn(argClusterModel.getContext().getBestPossibleAssignment());
new OptimalAssignment();
return optimalAssignment;
}
});
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, algorithm, Optional.empty());
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
// Mocking the change types for triggering a baseline rebalance.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
// Mock a current state
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
currentStateOutput.setCurrentState(_resourceNames.get(1), new Partition(_partitionNames.get(1)),
_testInstanceId, "SLAVE");
// Record current states into testBaseline; the logic should have loaded current state into
// baseline as a fallback mechanism
ZNRecord mappingNode2 = new ZNRecord(_resourceNames.get(1));
HashMap<String, String> mapping2 = new HashMap<>();
mapping2.put(_testInstanceId, "SLAVE");
mappingNode2.setMapField(_partitionNames.get(1), mapping2);
testResourceAssignmentMap.put(_resourceNames.get(1), new ResourceAssignment(mappingNode2));
// Call compute, calculate() should have been called twice in global and partial rebalance
rebalancer.computeNewIdealStates(clusterData, resourceMap, currentStateOutput);
ArgumentCaptor<ClusterModel> argumentCaptor = ArgumentCaptor.forClass(ClusterModel.class);
verify(algorithm, times(2)).calculate(argumentCaptor.capture());
// In the first execution, the past baseline is loaded into the new best possible state
Map<String, ResourceAssignment> firstCallBestPossibleAssignment =
argumentCaptor.getAllValues().get(0).getContext().getBestPossibleAssignment();
Assert.assertEquals(firstCallBestPossibleAssignment.size(), testResourceAssignmentMap.size());
Assert.assertEquals(firstCallBestPossibleAssignment, testResourceAssignmentMap);
// In the second execution, the result from the algorithm (which is just the best possible
// state) is loaded as the baseline, and the best possible state is from persisted + current state
Map<String, ResourceAssignment> secondCallBaselineAssignment =
argumentCaptor.getAllValues().get(1).getContext().getBaselineAssignment();
Map<String, ResourceAssignment> secondCallBestPossibleAssignment =
argumentCaptor.getAllValues().get(1).getContext().getBestPossibleAssignment();
Assert.assertEquals(secondCallBaselineAssignment.size(), testResourceAssignmentMap.size());
Assert.assertEquals(secondCallBaselineAssignment, testResourceAssignmentMap);
Assert.assertEquals(secondCallBestPossibleAssignment.size(), testResourceAssignmentMap.size());
Assert.assertEquals(secondCallBestPossibleAssignment, testResourceAssignmentMap);
Assert.assertEquals(_metadataStore.getBaseline().size(), testResourceAssignmentMap.size());
Assert.assertEquals(_metadataStore.getBestPossibleAssignment().size(),
testResourceAssignmentMap.size());
Assert.assertEquals(_metadataStore.getBaseline(), testResourceAssignmentMap);
Assert.assertEquals(_metadataStore.getBestPossibleAssignment(), testResourceAssignmentMap);
}
@Test(dependsOnMethods = "testRebalance", expectedExceptions = HelixRebalanceException.class, expectedExceptionsMessageRegExp = "Input contains invalid resource\\(s\\) that cannot be rebalanced by the WAGED rebalancer. \\[Resource1\\] Failure Type: INVALID_INPUT")
public void testNonCompatibleConfiguration()
throws IOException, HelixRebalanceException {
_metadataStore.reset();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
ResourceControllerDataProvider clusterData = setupClusterDataCache();
String nonCompatibleResourceName = _resourceNames.get(0);
clusterData.getIdealState(nonCompatibleResourceName)
.setRebalancerClassName(CrushRebalanceStrategy.class.getName());
// The input resource Map shall contain all the valid resources.
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
}
// TODO test with invalid capacity configuration which will fail the cluster model constructing.
@Test(dependsOnMethods = "testRebalance")
public void testInvalidClusterStatus() throws IOException, HelixRebalanceException {
_metadataStore.reset();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
ResourceControllerDataProvider clusterData = setupClusterDataCache();
String invalidResource = _resourceNames.get(0);
// The state model does not exist
clusterData.getIdealState(invalidResource).setStateModelDefRef("foobar");
// The input resource Map shall contain all the valid resources.
Map<String, Resource> resourceMap = clusterData.getIdealStates().keySet().stream().collect(
Collectors.toMap(resourceName -> resourceName, Resource::new));
try {
rebalancer.computeBestPossibleAssignment(clusterData, resourceMap,
clusterData.getEnabledLiveInstances(), new CurrentStateOutput(), _algorithm);
Assert.fail("Rebalance shall fail.");
} catch (HelixRebalanceException ex) {
Assert.assertEquals(ex.getFailureType(), HelixRebalanceException.Type.FAILED_TO_CALCULATE);
Assert.assertEquals(ex.getMessage(),
"Failed to calculate for the new best possible. Failure Type: FAILED_TO_CALCULATE");
}
// The rebalance will be done with empty mapping result since there is no previously calculated
// assignment.
Assert.assertTrue(
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput())
.isEmpty());
}
@Test(dependsOnMethods = "testRebalance")
public void testInvalidRebalancerStatus() throws IOException {
// Mock a metadata store that will fail on all the calls.
AssignmentMetadataStore metadataStore = Mockito.mock(AssignmentMetadataStore.class);
when(metadataStore.getBestPossibleAssignment())
.thenThrow(new RuntimeException("Mock Error. Metadata store fails."));
WagedRebalancer rebalancer = new WagedRebalancer(metadataStore, _algorithm, Optional.empty());
ResourceControllerDataProvider clusterData = setupClusterDataCache();
// The input resource Map shall contain all the valid resources.
Map<String, Resource> resourceMap = clusterData.getIdealStates().keySet().stream().collect(
Collectors.toMap(resourceName -> resourceName, Resource::new));
try {
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Assert.fail("Rebalance shall fail.");
} catch (HelixRebalanceException ex) {
Assert.assertEquals(ex.getFailureType(),
HelixRebalanceException.Type.INVALID_REBALANCER_STATUS);
Assert.assertEquals(ex.getMessage(),
"Failed to get the current best possible assignment because of unexpected error. Failure Type: INVALID_REBALANCER_STATUS");
}
}
@Test(dependsOnMethods = "testRebalance")
public void testAlgorithmException()
throws IOException, HelixRebalanceException {
_metadataStore.reset();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
ResourceControllerDataProvider clusterData = setupClusterDataCache();
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
// Rebalance with normal configuration. So the assignment will be persisted in the metadata store.
Map<String, IdealState> result =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
// Recreate a rebalance with the same metadata store but bad algorithm instance.
RebalanceAlgorithm badAlgorithm = Mockito.mock(RebalanceAlgorithm.class);
when(badAlgorithm.calculate(any())).thenThrow(new HelixRebalanceException("Algorithm fails.",
HelixRebalanceException.Type.FAILED_TO_CALCULATE));
rebalancer = new WagedRebalancer(_metadataStore, badAlgorithm, Optional.empty());
// Calculation will fail
try {
rebalancer.computeBestPossibleAssignment(clusterData, resourceMap,
clusterData.getEnabledLiveInstances(), new CurrentStateOutput(), badAlgorithm);
Assert.fail("Rebalance shall fail.");
} catch (HelixRebalanceException ex) {
Assert.assertEquals(ex.getFailureType(), HelixRebalanceException.Type.FAILED_TO_CALCULATE);
Assert.assertEquals(ex.getMessage(), "Failed to calculate for the new best possible. Failure Type: FAILED_TO_CALCULATE");
}
// But if call with the public method computeNewIdealStates(), the rebalance will return with
// the previous rebalance result.
Map<String, IdealState> newResult =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Assert.assertEquals(newResult, result);
// Ensure failure has been recorded
Assert.assertEquals(rebalancer.getMetricCollector().getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.RebalanceFailureCounter.name(),
CountMetric.class).getValue().longValue(), 1L);
}
@Test(dependsOnMethods = "testRebalance")
public void testRebalanceOnChanges() throws IOException, HelixRebalanceException {
// Test continuously rebalance with the same rebalancer with different internal state. Ensure
// that the rebalancer handles different input (different cluster changes) based on the internal
// state in a correct way.
// Note that this test relies on the MockRebalanceAlgorithm implementation. The mock algorithm
// won't propagate any existing assignment from the cluster model.
_metadataStore.reset();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
// 1. rebalance with baseline calculation done
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
// Cluster config change will trigger baseline to be recalculated.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
// Update the config so the cluster config will be marked as changed.
ClusterConfig clusterConfig = clusterData.getClusterConfig();
Map<String, Integer> defaultCapacityMap =
new HashMap<>(clusterConfig.getDefaultInstanceCapacityMap());
defaultCapacityMap.put("foobar", 0);
clusterConfig.setDefaultInstanceCapacityMap(defaultCapacityMap);
clusterData.setClusterConfig(clusterConfig);
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
Map<String, IdealState> newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Map<String, ResourceAssignment> algorithmResult = _algorithm.getRebalanceResult();
// Since there is no special condition, the calculated IdealStates should be exactly the same
// as the mock algorithm result.
validateRebalanceResult(resourceMap, newIdealStates, algorithmResult);
Map<String, ResourceAssignment> baseline = _metadataStore.getBaseline();
Assert.assertEquals(algorithmResult, baseline);
Map<String, ResourceAssignment> bestPossibleAssignment =
_metadataStore.getBestPossibleAssignment();
Assert.assertEquals(algorithmResult, bestPossibleAssignment);
// 2. rebalance with one resource changed in the Resource Config znode only
String changedResourceName = _resourceNames.get(0);
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.RESOURCE_CONFIG));
ResourceConfig config = new ResourceConfig(clusterData.getResourceConfig(changedResourceName).getRecord());
// Update the config so the resource will be marked as changed.
Map<String, Map<String, Integer>> capacityMap = config.getPartitionCapacityMap();
capacityMap.get(ResourceConfig.DEFAULT_PARTITION_KEY).put("foobar", 0);
config.setPartitionCapacityMap(capacityMap);
when(clusterData.getResourceConfig(changedResourceName)).thenReturn(config);
clusterData.getResourceConfigMap().put(changedResourceName, config);
// Although the input contains 2 resources, the rebalancer shall only call the algorithm to
// rebalance the changed one.
newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Map<String, ResourceAssignment> partialAlgorithmResult = _algorithm.getRebalanceResult();
// Verify that only the changed resource has been included in the calculation.
validateRebalanceResult(
Collections.singletonMap(changedResourceName, new Resource(changedResourceName)),
newIdealStates, partialAlgorithmResult);
// Best possible assignment contains the new assignment of only one resource.
baseline = _metadataStore.getBaseline();
Assert.assertEquals(baseline, partialAlgorithmResult);
// Best possible assignment contains the new assignment of only one resource.
bestPossibleAssignment = _metadataStore.getBestPossibleAssignment();
Assert.assertEquals(bestPossibleAssignment, partialAlgorithmResult);
// * Before the next test, recover the best possible assignment record.
_metadataStore.persistBestPossibleAssignment(algorithmResult);
_metadataStore.persistBaseline(algorithmResult);
// 3. rebalance with current state change only
// Create a new cluster data cache to simulate cluster change
clusterData = setupClusterDataCache();
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CURRENT_STATE));
// Modify any current state
CurrentState cs =
clusterData.getCurrentState(_testInstanceId, _sessionId).get(_resourceNames.get(0));
// Update the tag so the ideal state will be marked as changed.
cs.setInfo(_partitionNames.get(0), "mock update");
// Although the input contains 2 resources, the rebalancer shall not try to recalculate
// assignment since there is only current state change.
newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Map<String, ResourceAssignment> newAlgorithmResult = _algorithm.getRebalanceResult();
// Verify that only the changed resource has been included in the calculation.
validateRebalanceResult(Collections.emptyMap(), newIdealStates, newAlgorithmResult);
// There should be no changes in the baseline since only the currentStates changed
baseline = _metadataStore.getBaseline();
Assert.assertEquals(baseline, algorithmResult);
// The BestPossible assignment should have been updated since computeNewIdealStates() should have been called.
bestPossibleAssignment = _metadataStore.getBestPossibleAssignment();
Assert.assertEquals(bestPossibleAssignment, newAlgorithmResult);
// 4. rebalance with no change but best possible state record missing.
// This usually happens when the persisted assignment state is gone.
clusterData = setupClusterDataCache(); // Note this mock data cache won't report any change.
// Even with no change, since the previous assignment is empty, the rebalancer will still
// calculate the assignment for both resources.
newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
newAlgorithmResult = _algorithm.getRebalanceResult();
// Verify that both resource has been included in the calculation.
validateRebalanceResult(resourceMap, newIdealStates, newAlgorithmResult);
// There should not be any changes in the baseline.
baseline = _metadataStore.getBaseline();
Assert.assertEquals(baseline, algorithmResult);
// The BestPossible assignment should have been updated since computeNewIdealStates() should have been called.
bestPossibleAssignment = _metadataStore.getBestPossibleAssignment();
Assert.assertEquals(bestPossibleAssignment, newAlgorithmResult);
}
@Test(dependsOnMethods = "testRebalance")
public void testEmergencyRebalance() throws IOException, HelixRebalanceException {
_metadataStore.reset();
ResourceControllerDataProvider clusterData = setupClusterDataCache();
MockRebalanceAlgorithm spyAlgorithm = Mockito.spy(new MockRebalanceAlgorithm());
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, spyAlgorithm, Optional.empty());
// Cluster config change will trigger baseline to be recalculated.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
Map<String, Resource> resourceMap =
clusterData.getIdealStates().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
// Populate best possible assignment
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
// Global Rebalance once, Partial Rebalance once
verify(spyAlgorithm, times(2)).calculate(any());
// Artificially insert an offline node in the best possible assignment
Map<String, ResourceAssignment> bestPossibleAssignment =
_metadataStore.getBestPossibleAssignment();
String offlineResource = _resourceNames.get(0);
String offlinePartition = _partitionNames.get(0);
String offlineState = "MASTER";
String offlineInstance = "offlineInstance";
for (Partition partition : bestPossibleAssignment.get(offlineResource).getMappedPartitions()) {
if (partition.getPartitionName().equals(offlinePartition)) {
bestPossibleAssignment.get(offlineResource)
.addReplicaMap(partition, Collections.singletonMap(offlineInstance, offlineState));
}
}
_metadataStore.persistBestPossibleAssignment(bestPossibleAssignment);
// This should trigger both emergency rebalance and partial rebalance
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
ArgumentCaptor<ClusterModel> capturedClusterModel = ArgumentCaptor.forClass(ClusterModel.class);
// 2 from previous case, Emergency + Partial from this case, 4 in total
verify(spyAlgorithm, times(4)).calculate(capturedClusterModel.capture());
// In the cluster model for Emergency rebalance, the assignableReplica is the offline one
ClusterModel clusterModelForEmergencyRebalance = capturedClusterModel.getAllValues().get(2);
Assert.assertEquals(clusterModelForEmergencyRebalance.getAssignableReplicaMap().size(), 1);
Assert.assertEquals(clusterModelForEmergencyRebalance.getAssignableReplicaMap().get(offlineResource).size(), 1);
AssignableReplica assignableReplica =
clusterModelForEmergencyRebalance.getAssignableReplicaMap().get(offlineResource).iterator().next();
Assert.assertEquals(assignableReplica.getPartitionName(), offlinePartition);
Assert.assertEquals(assignableReplica.getReplicaState(), offlineState);
bestPossibleAssignment = _metadataStore.getBestPossibleAssignment();
for (Map.Entry<String, ResourceAssignment> entry : bestPossibleAssignment.entrySet()) {
ResourceAssignment resourceAssignment = entry.getValue();
for (Partition partition : resourceAssignment.getMappedPartitions()) {
for (String instance: resourceAssignment.getReplicaMap(partition).keySet()) {
Assert.assertNotSame(instance, offlineInstance);
}
}
}
}
@Test(dependsOnMethods = "testRebalance")
public void testRebalanceOverwriteTrigger() throws IOException, HelixRebalanceException {
_metadataStore.reset();
ResourceControllerDataProvider clusterData = setupClusterDataCache();
// Enable delay rebalance
ClusterConfig clusterConfig = clusterData.getClusterConfig();
clusterConfig.setDelayRebalaceEnabled(true);
clusterConfig.setRebalanceDelayTime(1);
clusterData.setClusterConfig(clusterConfig);
// force create a fake offlineInstance that's in delay window
Set<String> instances = new HashSet<>(_instances);
String offlineInstance = "offlineInstance";
instances.add(offlineInstance);
when(clusterData.getAllInstances()).thenReturn(instances);
Map<String, Long> instanceOfflineTimeMap = new HashMap<>();
instanceOfflineTimeMap.put(offlineInstance, System.currentTimeMillis() + Integer.MAX_VALUE);
when(clusterData.getInstanceOfflineTimeMap()).thenReturn(instanceOfflineTimeMap);
Map<String, InstanceConfig> instanceConfigMap = clusterData.getInstanceConfigMap();
instanceConfigMap.put(offlineInstance, createMockInstanceConfig(offlineInstance));
when(clusterData.getInstanceConfigMap()).thenReturn(instanceConfigMap);
// Set minActiveReplica to 0 so that requireRebalanceOverwrite returns false
Map<String, IdealState> isMap = new HashMap<>();
for (String resource : _resourceNames) {
IdealState idealState = clusterData.getIdealState(resource);
idealState.setMinActiveReplicas(0);
isMap.put(resource, idealState);
}
when(clusterData.getIdealState(anyString())).thenAnswer(
(Answer<IdealState>) invocationOnMock -> isMap.get(invocationOnMock.getArguments()[0]));
when(clusterData.getIdealStates()).thenReturn(isMap);
MockRebalanceAlgorithm spyAlgorithm = Mockito.spy(new MockRebalanceAlgorithm());
WagedRebalancer rebalancer = Mockito.spy(new WagedRebalancer(_metadataStore, spyAlgorithm, Optional.empty()));
// Cluster config change will trigger baseline to be recalculated.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
Map<String, Resource> resourceMap =
clusterData.getIdealStates().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
// Populate best possible assignment
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
verify(rebalancer, times(1)).requireRebalanceOverwrite(any(), any());
Assert.assertEquals(rebalancer.getMetricCollector().getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.RebalanceOverwriteCounter.name(),
CountMetric.class).getValue().longValue(), 0L);
Assert.assertEquals(rebalancer.getMetricCollector().getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.RebalanceOverwriteLatencyGauge.name(),
LatencyMetric.class).getLastEmittedMetricValue().longValue(), -1L);
// Set minActiveReplica to 1 so that requireRebalanceOverwrite returns true
for (String resource : _resourceNames) {
IdealState idealState = clusterData.getIdealState(resource);
idealState.setMinActiveReplicas(3);
isMap.put(resource, idealState);
}
when(clusterData.getIdealState(anyString())).thenAnswer(
(Answer<IdealState>) invocationOnMock -> isMap.get(invocationOnMock.getArguments()[0]));
when(clusterData.getIdealStates()).thenReturn(isMap);
_metadataStore.reset();
// Update the config so the cluster config will be marked as changed.
clusterConfig = clusterData.getClusterConfig();
Map<String, Integer> defaultCapacityMap =
new HashMap<>(clusterConfig.getDefaultInstanceCapacityMap());
defaultCapacityMap.put("foobar", 0);
clusterConfig.setDefaultInstanceCapacityMap(defaultCapacityMap);
clusterData.setClusterConfig(clusterConfig);
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
verify(rebalancer, times(2)).requireRebalanceOverwrite(any(), any());
Assert.assertEquals(rebalancer.getMetricCollector().getMetric(
WagedRebalancerMetricCollector.WagedRebalancerMetricNames.RebalanceOverwriteCounter.name(),
CountMetric.class).getValue().longValue(), 1L);
Assert.assertTrue(rebalancer.getMetricCollector()
.getMetric(WagedRebalancerMetricCollector.WagedRebalancerMetricNames.RebalanceOverwriteLatencyGauge.name(),
LatencyMetric.class).getLastEmittedMetricValue() > 0L);
}
@Test(dependsOnMethods = "testRebalanceOverwriteTrigger")
public void testRebalanceOverwrite() throws HelixRebalanceException, IOException {
_metadataStore.reset();
ResourceControllerDataProvider clusterData = setupClusterDataCache();
// Enable delay rebalance
ClusterConfig clusterConfig = clusterData.getClusterConfig();
clusterConfig.setDelayRebalaceEnabled(true);
clusterConfig.setRebalanceDelayTime(1);
clusterData.setClusterConfig(clusterConfig);
String instance0 = _testInstanceId;
String instance1 = instance0 + "1";
String instance2 = instance0 + "2";
String offlineInstance = "offlineInstance";
// force create a fake offlineInstance that's in delay window
Set<String> instances = new HashSet<>(_instances);
instances.add(offlineInstance);
when(clusterData.getAllInstances()).thenReturn(instances);
when(clusterData.getEnabledInstances()).thenReturn(instances);
when(clusterData.getEnabledLiveInstances()).thenReturn(ImmutableSet.of(instance0, instance1, instance2));
Map<String, Long> instanceOfflineTimeMap = new HashMap<>();
instanceOfflineTimeMap.put(offlineInstance, System.currentTimeMillis() + Integer.MAX_VALUE);
when(clusterData.getInstanceOfflineTimeMap()).thenReturn(instanceOfflineTimeMap);
Map<String, InstanceConfig> instanceConfigMap = clusterData.getInstanceConfigMap();
instanceConfigMap.put(offlineInstance, createMockInstanceConfig(offlineInstance));
when(clusterData.getInstanceConfigMap()).thenReturn(instanceConfigMap);
Map<String, IdealState> isMap = new HashMap<>();
for (String resource : _resourceNames) {
IdealState idealState = clusterData.getIdealState(resource);
idealState.setMinActiveReplicas(2);
isMap.put(resource, idealState);
}
when(clusterData.getIdealState(anyString())).thenAnswer(
(Answer<IdealState>) invocationOnMock -> isMap.get(invocationOnMock.getArguments()[0]));
when(clusterData.getIdealStates()).thenReturn(isMap);
MockRebalanceAlgorithm algorithm = new MockRebalanceAlgorithm();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, algorithm, Optional.empty());
// Cluster config change will trigger baseline to be recalculated.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
Map<String, Resource> resourceMap =
clusterData.getIdealStates().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
Map<String, Map<String, Map<String, String>>> input = ImmutableMap.of(
_resourceNames.get(0),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of(instance1, "MASTER", instance2, "SLAVE"),
_partitionNames.get(1), ImmutableMap.of(instance2, "MASTER", offlineInstance, "OFFLINE"), // Partition2-SLAVE
_partitionNames.get(2), ImmutableMap.of(instance1, "SLAVE", instance2, "MASTER"),
_partitionNames.get(3), ImmutableMap.of(instance1, "SLAVE", instance2, "SLAVE")),
_resourceNames.get(1),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of(instance1, "MASTER", instance2, "SLAVE"),
_partitionNames.get(1), ImmutableMap.of(instance1, "MASTER", instance2, "SLAVE"),
_partitionNames.get(2), ImmutableMap.of(instance1, "MASTER", instance2, "SLAVE"),
_partitionNames.get(3), ImmutableMap.of(offlineInstance, "OFFLINE", instance2, "SLAVE")) // Partition4-MASTER
);
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
input.forEach((resource, inputMap) ->
inputMap.forEach((partition, stateInstance) ->
stateInstance.forEach((tmpInstance, state) ->
currentStateOutput.setCurrentState(resource, new Partition(partition), tmpInstance, state))));
rebalancer.setPartialRebalanceAsyncMode(true);
Map<String, IdealState> newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, currentStateOutput);
Assert.assertEquals(newIdealStates.get(_resourceNames.get(0)).getPreferenceLists().size(), 4);
Assert.assertEquals(newIdealStates.get(_resourceNames.get(1)).getPreferenceLists().size(), 4);
Assert.assertEquals(newIdealStates.get(_resourceNames.get(0)).getPreferenceList(_partitionNames.get(1)).size(), 3);
Assert.assertEquals(newIdealStates.get(_resourceNames.get(0)).getPreferenceList(_partitionNames.get(3)).size(), 2);
Assert.assertEquals(newIdealStates.get(_resourceNames.get(1)).getPreferenceList(_partitionNames.get(3)).size(), 3);
Assert.assertEquals(newIdealStates.get(_resourceNames.get(1)).getPreferenceList(_partitionNames.get(0)).size(), 2);
}
@Test(dependsOnMethods = "testRebalance")
public void testReset() throws IOException, HelixRebalanceException {
_metadataStore.reset();
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
// Mocking the change types for triggering a baseline rebalance.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
Map<String, IdealState> newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
Map<String, ResourceAssignment> algorithmResult = _algorithm.getRebalanceResult();
validateRebalanceResult(resourceMap, newIdealStates, algorithmResult);
// Clean up algorithm result for the next test step
algorithmResult.clear();
// Try to trigger a new rebalancer, since nothing has been changed. There will be no rebalance.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
Assert.assertEquals(
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput()),
newIdealStates);
algorithmResult = _algorithm.getRebalanceResult();
Assert.assertEquals(algorithmResult, Collections.emptyMap());
// Reset the rebalance and do the same operation. Without any cache info, the rebalancer will
// finish the complete rebalance.
rebalancer.reset();
algorithmResult.clear();
// Try to trigger a new rebalancer, since nothing has been changed. There will be no rebalance.
when(clusterData.getRefreshedChangeTypes())
.thenReturn(Collections.singleton(HelixConstants.ChangeType.CLUSTER_CONFIG));
newIdealStates =
rebalancer.computeNewIdealStates(clusterData, resourceMap, new CurrentStateOutput());
algorithmResult = _algorithm.getRebalanceResult();
validateRebalanceResult(resourceMap, newIdealStates, algorithmResult);
}
private void validateRebalanceResult(Map<String, Resource> resourceMap,
Map<String, IdealState> newIdealStates, Map<String, ResourceAssignment> expectedResult) {
Assert.assertEquals(newIdealStates.keySet(), resourceMap.keySet());
for (String resourceName : expectedResult.keySet()) {
Assert.assertTrue(newIdealStates.containsKey(resourceName));
IdealState is = newIdealStates.get(resourceName);
ResourceAssignment assignment = expectedResult.get(resourceName);
Assert.assertEquals(is.getPartitionSet(), new HashSet<>(assignment.getMappedPartitions()
.stream().map(Partition::getPartitionName).collect(Collectors.toSet())));
for (String partitionName : is.getPartitionSet()) {
Assert.assertEquals(is.getInstanceStateMap(partitionName),
assignment.getReplicaMap(new Partition(partitionName)));
}
}
}
@Test
public void testResourceWeightProvider() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
WagedResourceWeightsProvider dataProvider = new WagedResourceWeightsProvider(testCache);
Map<String, Integer> weights1 = ImmutableMap.of("item1", 3, "item2", 6, "item3", 0);
Assert.assertEquals(dataProvider.getPartitionWeights("Resource1", "Partition1"), weights1);
Assert.assertEquals(dataProvider.getPartitionWeights("Resource1", "Partition2"), weights1);
Map<String, Integer> weights2 = ImmutableMap.of("item1", 5, "item2", 10, "item3", 0);
Assert.assertEquals(dataProvider.getPartitionWeights("Resource2", "Partition2"), weights2);
}
@Test
public void testInstanceCapacityProvider() throws IOException, HelixRebalanceException {
WagedRebalancer rebalancer = new WagedRebalancer(_metadataStore, _algorithm, Optional.empty());
// Generate the input for the rebalancer.
ResourceControllerDataProvider clusterData = setupClusterDataCache();
// force create a fake offlineInstance that's in delay window
Set<String> instances = new HashSet<>(_instances);
when(clusterData.getAllInstances()).thenReturn(instances);
when(clusterData.getEnabledInstances()).thenReturn(instances);
when(clusterData.getEnabledLiveInstances()).thenReturn(instances);
Map<String, InstanceConfig> instanceConfigMap = clusterData.getInstanceConfigMap();
when(clusterData.getInstanceConfigMap()).thenReturn(instanceConfigMap);
Map<String, IdealState> isMap = new HashMap<>();
for (String resource : _resourceNames) {
IdealState idealState = clusterData.getIdealState(resource);
idealState.setMinActiveReplicas(2);
isMap.put(resource, idealState);
}
Map<String, Resource> resourceMap = clusterData.getIdealStates().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> {
Resource resource = new Resource(entry.getKey());
entry.getValue().getPartitionSet().forEach(resource::addPartition);
return resource;
}));
WagedInstanceCapacity provider = new WagedInstanceCapacity(clusterData);
Map<String, Integer> weights1 = ImmutableMap.of("item1", 20, "item2", 40, "item3", 30);
Map<String, Integer> capacity = provider.getInstanceAvailableCapacity("testInstanceId");
Assert.assertEquals(provider.getInstanceAvailableCapacity("testInstanceId"), weights1);
Assert.assertEquals(provider.getInstanceAvailableCapacity("testInstanceId1"), weights1);
Assert.assertEquals(provider.getInstanceAvailableCapacity("testInstanceId2"), weights1);
}
}
| 9,758 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/MockAssignmentMetadataStore.java
|
package org.apache.helix.controller.rebalancer.waged;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.Map;
import org.apache.helix.BucketDataAccessor;
import org.apache.helix.model.ResourceAssignment;
import org.mockito.Mockito;
/**
* A mock up metadata store for unit test.
* This mock datastore persist assignments in memory only.
*/
public class MockAssignmentMetadataStore extends AssignmentMetadataStore {
MockAssignmentMetadataStore() {
super(Mockito.mock(BucketDataAccessor.class), "");
}
public Map<String, ResourceAssignment> getBaseline() {
return _globalBaseline == null ? Collections.emptyMap() : _globalBaseline;
}
public void persistBaseline(Map<String, ResourceAssignment> globalBaseline) {
_globalBaseline = globalBaseline;
}
public Map<String, ResourceAssignment> getBestPossibleAssignment() {
return _bestPossibleAssignment == null ? Collections.emptyMap() : _bestPossibleAssignment;
}
public void persistBestPossibleAssignment(
Map<String, ResourceAssignment> bestPossibleAssignment) {
_bestPossibleAssignment = bestPossibleAssignment;
_bestPossibleVersion++;
}
public synchronized boolean asyncUpdateBestPossibleAssignmentCache(
Map<String, ResourceAssignment> bestPossibleAssignment, int newVersion) {
// Check if the version is stale by this point
if (newVersion > _bestPossibleVersion) {
_bestPossibleAssignment = bestPossibleAssignment;
_bestPossibleVersion = newVersion;
return true;
}
return false;
}
public void close() {
// do nothing
}
}
| 9,759 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/TestAssignmentMetadataStore.java
|
package org.apache.helix.controller.rebalancer.waged;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.manager.zk.ZkBucketDataAccessor;
import org.apache.helix.model.Partition;
import org.apache.helix.model.ResourceAssignment;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestAssignmentMetadataStore extends ZkTestBase {
private static final int DEFAULT_BUCKET_SIZE = 50 * 1024; // 50KB
private static final String BASELINE_KEY = "BASELINE";
private static final String BEST_POSSIBLE_KEY = "BEST_POSSIBLE";
protected static final String TEST_DB = "TestDB";
protected HelixManager _manager;
protected final String CLASS_NAME = getShortClassName();
protected final String CLUSTER_NAME = CLUSTER_PREFIX + "_" + CLASS_NAME;
private AssignmentMetadataStore _store;
@BeforeClass
public void beforeClass() throws Exception {
super.beforeClass();
// setup storage cluster
_gSetupTool.addCluster(CLUSTER_NAME, true);
// create cluster manager
_manager = HelixManagerFactory
.getZKHelixManager(CLUSTER_NAME, "Admin", InstanceType.ADMINISTRATOR, ZK_ADDR);
_manager.connect();
// Create AssignmentMetadataStore. No version clean up to ensure the test result is stable.
_store = new AssignmentMetadataStore(
new ZkBucketDataAccessor(_manager.getMetadataStoreConnectionString(), DEFAULT_BUCKET_SIZE,
Integer.MAX_VALUE), _manager.getClusterName());
}
@AfterClass
public void afterClass() {
if (_store != null) {
_store.close();
}
if (_manager != null) {
_manager.disconnect();
}
_gSetupTool.deleteCluster(CLUSTER_NAME);
}
/**
* TODO: Reading baseline will be empty because AssignmentMetadataStore isn't being used yet by
* the new rebalancer. Modify this integration test once the WAGED rebalancer
* starts using AssignmentMetadataStore's persist APIs.
* TODO: WAGED Rebalancer currently does NOT work with ZKClusterVerifier because verifier's
* HelixManager is null, and that causes an NPE when instantiating AssignmentMetadataStore.
*/
@Test
public void testReadEmptyBaseline() {
// This should be the first test. Assert there is no record in ZK.
// Check that only one version exists
Assert.assertEquals(getExistingVersionNumbers(BASELINE_KEY).size(), 0);
Assert.assertEquals(getExistingVersionNumbers(BEST_POSSIBLE_KEY).size(), 0);
// Read from cache and the result is empty.
Assert.assertTrue(_store.getBaseline().isEmpty());
Assert.assertTrue(_store.getBestPossibleAssignment().isEmpty());
}
@Test(dependsOnMethods = "testReadEmptyBaseline")
public void testAssignmentCache() {
Map<String, ResourceAssignment> dummyAssignment = getDummyAssignment();
// Call persist functions
_store.persistBaseline(dummyAssignment);
_store.persistBestPossibleAssignment(dummyAssignment);
// Check that only one version exists
Assert.assertEquals(getExistingVersionNumbers(BASELINE_KEY).size(), 1);
Assert.assertEquals(getExistingVersionNumbers(BEST_POSSIBLE_KEY).size(), 1);
// Same data in cache
Assert.assertEquals(_store._bestPossibleAssignment, dummyAssignment);
Assert.assertEquals(_store._globalBaseline, dummyAssignment);
dummyAssignment.values().stream().forEach(assignment -> {
assignment.addReplicaMap(new Partition("foo"), Collections.emptyMap());
});
// Call persist functions
_store.persistBaseline(dummyAssignment);
_store.persistBestPossibleAssignment(dummyAssignment);
// Check that two versions exist
Assert.assertEquals(getExistingVersionNumbers(BASELINE_KEY).size(), 2);
Assert.assertEquals(getExistingVersionNumbers(BEST_POSSIBLE_KEY).size(), 2);
// Same data in cache
Assert.assertEquals(_store._bestPossibleAssignment, dummyAssignment);
Assert.assertEquals(_store._globalBaseline, dummyAssignment);
// Clear cache
_store.reset();
Assert.assertEquals(_store._bestPossibleAssignment, null);
Assert.assertEquals(_store._globalBaseline, null);
// Check the persisted data is not changed.
Assert.assertEquals(getExistingVersionNumbers(BASELINE_KEY).size(), 2);
Assert.assertEquals(getExistingVersionNumbers(BEST_POSSIBLE_KEY).size(), 2);
}
@Test(dependsOnMethods = "testAssignmentCache")
void testClearAssignment() {
// Check the persisted data is not empty
List<String> baselineVersions = getExistingVersionNumbers(BASELINE_KEY);
List<String> bestPossibleVersions = getExistingVersionNumbers(BEST_POSSIBLE_KEY);
int baselineVersionCount = baselineVersions.size();
int bestPossibleVersionCount = bestPossibleVersions.size();
Assert.assertTrue(baselineVersionCount > 0);
Assert.assertTrue(bestPossibleVersionCount > 0);
_store.clearAssignmentMetadata();
// 1. cache is cleaned up
Assert.assertEquals(_store._bestPossibleAssignment, Collections.emptyMap());
Assert.assertEquals(_store._globalBaseline, Collections.emptyMap());
// 2. refresh the cache and then read from ZK again to ensure the persisted assignments is empty
_store.reset();
Assert.assertEquals(_store.getBaseline(), Collections.emptyMap());
Assert.assertEquals(_store.getBestPossibleAssignment(), Collections.emptyMap());
// 3. check that there is
Assert.assertEquals(getExistingVersionNumbers(BASELINE_KEY).size(), baselineVersionCount + 1);
Assert.assertEquals(getExistingVersionNumbers(BEST_POSSIBLE_KEY).size(), bestPossibleVersionCount + 1);
}
private Map<String, ResourceAssignment> getDummyAssignment() {
// Generate a dummy assignment
Map<String, ResourceAssignment> dummyAssignment = new HashMap<>();
ResourceAssignment assignment = new ResourceAssignment(TEST_DB);
Partition partition = new Partition(TEST_DB);
Map<String, String> replicaMap = new HashMap<>();
replicaMap.put(TEST_DB, TEST_DB);
assignment.addReplicaMap(partition, replicaMap);
dummyAssignment.put(TEST_DB, new ResourceAssignment(TEST_DB));
return dummyAssignment;
}
/**
* Returns a list of existing version numbers only.
*
* @param metadataType
* @return
*/
private List<String> getExistingVersionNumbers(String metadataType) {
List<String> children = _baseAccessor
.getChildNames("/" + CLUSTER_NAME + "/ASSIGNMENT_METADATA/" + metadataType,
AccessOption.PERSISTENT);
if (children == null) {
children = Collections.EMPTY_LIST;
}
children.remove("LAST_SUCCESSFUL_WRITE");
children.remove("LAST_WRITE");
return children;
}
}
| 9,760 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestResourcePartitionAntiAffinityConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.when;
import java.util.Collections;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableSet;
public class TestResourcePartitionAntiAffinityConstraint {
private static final String TEST_PARTITION = "TestPartition";
private static final String TEST_RESOURCE = "TestResource";
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final SoftConstraint _constraint = new ResourcePartitionAntiAffinityConstraint();
@Test
public void testGetAssignmentScore() {
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testNode.getAssignedPartitionsByResource(TEST_RESOURCE)).thenReturn(
ImmutableSet.of(TEST_PARTITION + "1", TEST_PARTITION + "2", TEST_PARTITION + "3"));
when(_clusterContext.getEstimatedMaxPartitionByResource(TEST_RESOURCE)).thenReturn(10);
double score = _constraint.getAssignmentScore(_testNode, _testReplica, _clusterContext);
double normalizedScore = _constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertEquals(score, 0.3);
Assert.assertTrue(normalizedScore > 0.99);
}
@Test
public void testGetAssignmentScoreMaxScore() {
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testNode.getAssignedPartitionsByResource(TEST_RESOURCE)).thenReturn(Collections.emptySet());
when(_clusterContext.getEstimatedMaxPartitionByResource(TEST_RESOURCE)).thenReturn(10);
double score = _constraint.getAssignmentScore(_testNode, _testReplica, _clusterContext);
double normalizedScore = _constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertEquals(score, 0.0);
Assert.assertEquals(normalizedScore, 1.0);
}
}
| 9,761 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestTopStateMaxCapacityUsageInstanceConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Matchers.anyMap;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestTopStateMaxCapacityUsageInstanceConstraint {
private AssignableReplica _testReplica;
private AssignableNode _testNode;
private ClusterContext _clusterContext;
private final SoftConstraint _constraint = new TopStateMaxCapacityUsageInstanceConstraint();
@BeforeMethod
public void setUp() {
_testNode = mock(AssignableNode.class);
_testReplica = mock(AssignableReplica.class);
_clusterContext = mock(ClusterContext.class);
}
@Test
public void testGetNormalizedScore() {
when(_testReplica.isReplicaTopState()).thenReturn(true);
when(_testNode.getTopStateProjectedHighestUtilization(anyMap())).thenReturn(0.8f);
when(_clusterContext.getEstimatedTopStateMaxUtilization()).thenReturn(1f);
double score = _constraint.getAssignmentScore(_testNode, _testReplica, _clusterContext);
// Convert to float so as to compare with equal.
Assert.assertEquals((float) score, 0.8f);
double normalizedScore =
_constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertTrue(normalizedScore > 0.99);
}
}
| 9,762 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestValidGroupTagConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.when;
import java.util.Collections;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableSet;
public class TestValidGroupTagConstraint {
private static final String TEST_TAG = "testTag";
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final HardConstraint _constraint = new ValidGroupTagConstraint();
@Test
public void testConstraintValid() {
when(_testReplica.hasResourceInstanceGroupTag()).thenReturn(true);
when(_testReplica.getResourceInstanceGroupTag()).thenReturn(TEST_TAG);
when(_testNode.getInstanceTags()).thenReturn(ImmutableSet.of(TEST_TAG));
Assert.assertTrue(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void testConstraintInValid() {
when(_testReplica.hasResourceInstanceGroupTag()).thenReturn(true);
when(_testReplica.getResourceInstanceGroupTag()).thenReturn(TEST_TAG);
when(_testNode.getInstanceTags()).thenReturn(Collections.emptySet());
Assert.assertFalse(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void testConstraintWhenReplicaHasNoTag() {
when(_testReplica.hasResourceInstanceGroupTag()).thenReturn(false);
Assert.assertTrue(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
}
| 9,763 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestMaxCapacityUsageInstanceConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Matchers.anyMap;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestMaxCapacityUsageInstanceConstraint {
private AssignableReplica _testReplica;
private AssignableNode _testNode;
private ClusterContext _clusterContext;
private final SoftConstraint _constraint = new MaxCapacityUsageInstanceConstraint();
@BeforeMethod
public void setUp() {
_testNode = mock(AssignableNode.class);
_testReplica = mock(AssignableReplica.class);
_clusterContext = mock(ClusterContext.class);
}
@Test
public void testGetNormalizedScore() {
when(_testNode.getGeneralProjectedHighestUtilization(anyMap())).thenReturn(0.8f);
when(_clusterContext.getEstimatedMaxUtilization()).thenReturn(1f);
double score = _constraint.getAssignmentScore(_testNode, _testReplica, _clusterContext);
// Convert to float so as to compare with equal.
Assert.assertEquals((float) score,0.8f);
double normalizedScore =
_constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertTrue(normalizedScore > 0.99);
}
}
| 9,764 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestConstraintBasedAlgorithm.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.helix.HelixRebalanceException;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterModel;
import org.apache.helix.controller.rebalancer.waged.model.ClusterModelTestHelper;
import org.apache.helix.controller.rebalancer.waged.model.OptimalAssignment;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestConstraintBasedAlgorithm {
@Test(expectedExceptions = HelixRebalanceException.class)
public void testCalculateNoValidAssignment() throws IOException, HelixRebalanceException {
HardConstraint mockHardConstraint = mock(HardConstraint.class);
SoftConstraint mockSoftConstraint = mock(SoftConstraint.class);
when(mockHardConstraint.isAssignmentValid(any(), any(), any())).thenReturn(false);
when(mockSoftConstraint.getAssignmentNormalizedScore(any(), any(), any())).thenReturn(1.0);
ConstraintBasedAlgorithm algorithm =
new ConstraintBasedAlgorithm(ImmutableList.of(mockHardConstraint),
ImmutableMap.of(mockSoftConstraint, 1f));
ClusterModel clusterModel = new ClusterModelTestHelper().getDefaultClusterModel();
algorithm.calculate(clusterModel);
}
@Test
public void testCalculateWithValidAssignment() throws IOException, HelixRebalanceException {
HardConstraint mockHardConstraint = mock(HardConstraint.class);
SoftConstraint mockSoftConstraint = mock(SoftConstraint.class);
when(mockHardConstraint.isAssignmentValid(any(), any(), any())).thenReturn(true);
when(mockSoftConstraint.getAssignmentNormalizedScore(any(), any(), any())).thenReturn(1.0);
ConstraintBasedAlgorithm algorithm =
new ConstraintBasedAlgorithm(ImmutableList.of(mockHardConstraint),
ImmutableMap.of(mockSoftConstraint, 1f));
ClusterModel clusterModel = new ClusterModelTestHelper().getDefaultClusterModel();
OptimalAssignment optimalAssignment = algorithm.calculate(clusterModel);
Assert.assertFalse(optimalAssignment.hasAnyFailure());
}
@Test
public void testCalculateScoreDeterminism() throws IOException, HelixRebalanceException {
HardConstraint mockHardConstraint = mock(HardConstraint.class);
SoftConstraint mockSoftConstraint = mock(SoftConstraint.class);
when(mockHardConstraint.isAssignmentValid(any(), any(), any())).thenReturn(true);
when(mockSoftConstraint.getAssignmentNormalizedScore(any(), any(), any())).thenReturn(1.0);
ConstraintBasedAlgorithm algorithm =
new ConstraintBasedAlgorithm(ImmutableList.of(mockHardConstraint),
ImmutableMap.of(mockSoftConstraint, 1f));
ClusterModel clusterModel = new ClusterModelTestHelper().getMultiNodeClusterModel();
OptimalAssignment optimalAssignment = algorithm.calculate(clusterModel);
optimalAssignment.getOptimalResourceAssignment().values().forEach(
resourceAssignment -> resourceAssignment.getMappedPartitions().forEach(partition -> {
Assert.assertEquals(resourceAssignment.getReplicaMap(partition).keySet().size(), 1);
Assert.assertTrue(resourceAssignment.getReplicaMap(partition)
.containsKey(ClusterModelTestHelper.TEST_INSTANCE_ID_1));
}));
}
// Add capacity related hard/soft constraint to test sorting algorithm in ConstraintBasedAlgorithm.
@Test
public void testSortingByResourceCapacity() throws IOException, HelixRebalanceException {
HardConstraint nodeCapacityConstraint = new NodeCapacityConstraint();
SoftConstraint soft1 = new MaxCapacityUsageInstanceConstraint();
SoftConstraint soft2 = new InstancePartitionsCountConstraint();
ConstraintBasedAlgorithm algorithm =
new ConstraintBasedAlgorithm(ImmutableList.of(nodeCapacityConstraint),
ImmutableMap.of(soft1, 1f, soft2, 1f));
ClusterModel clusterModel = new ClusterModelTestHelper().getMultiNodeClusterModel();
OptimalAssignment optimalAssignment = algorithm.calculate(clusterModel);
Assert.assertFalse(optimalAssignment.hasAnyFailure());
}
// Add neg test for error handling in ConstraintBasedAlgorithm replica sorting.
@Test
public void testSortingEarlyQuitLackCapacity() throws IOException, HelixRebalanceException {
HardConstraint nodeCapacityConstraint = new NodeCapacityConstraint();
SoftConstraint soft1 = new MaxCapacityUsageInstanceConstraint();
SoftConstraint soft2 = new InstancePartitionsCountConstraint();
ConstraintBasedAlgorithm algorithm =
new ConstraintBasedAlgorithm(ImmutableList.of(nodeCapacityConstraint),
ImmutableMap.of(soft1, 1f, soft2, 1f));
ClusterModel clusterModel =
new ClusterModelTestHelper().getMultiNodeClusterModelNegativeSetup();
try {
OptimalAssignment optimalAssignment = algorithm.calculate(clusterModel);
} catch (HelixRebalanceException ex) {
Assert.assertEquals(ex.getFailureType(), HelixRebalanceException.Type.FAILED_TO_CALCULATE);
Assert.assertEquals(ex.getMessage(),
"The cluster does not have enough item1 capacity for all partitions. Failure Type: FAILED_TO_CALCULATE");
}
}
@Test
public void testCalculateWithInvalidAssignmentForNodeCapacity() throws IOException {
HardConstraint nodeCapacityConstraint = new NodeCapacityConstraint();
SoftConstraint soft1 = new MaxCapacityUsageInstanceConstraint();
SoftConstraint soft2 = new InstancePartitionsCountConstraint();
ConstraintBasedAlgorithm algorithm =
new ConstraintBasedAlgorithm(ImmutableList.of(nodeCapacityConstraint),
ImmutableMap.of(soft1, 1f, soft2, 1f));
ClusterModel clusterModel = new ClusterModelTestHelper().getMultiNodeClusterModel();
// increase the ask capacity of item 3, which will trigger the capacity constraint to fail.
Map<String, Set<AssignableReplica>> assignableReplicaMap = new HashMap<>(clusterModel.getAssignableReplicaMap());
Set<AssignableReplica> resourceAssignableReplicas = assignableReplicaMap.get("Resource3");
AssignableReplica replica = resourceAssignableReplicas.iterator().next();
replica.getCapacity().put("item3", 40); // available: 30, requested: 40.
try {
algorithm.calculate(clusterModel);
} catch (HelixRebalanceException ex) {
Assert.assertEquals(ex.getFailureType(), HelixRebalanceException.Type.FAILED_TO_CALCULATE);
}
}
}
| 9,765 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestSamePartitionOnInstanceConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.when;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableSet;
public class TestSamePartitionOnInstanceConstraint {
private static final String TEST_RESOURCE = "TestResource";
private static final String TEST_PARTITIOIN = TEST_RESOURCE + "0";
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final HardConstraint _constraint = new SamePartitionOnInstanceConstraint();
@Test
public void testConstraintValid() {
when(_testNode.getAssignedPartitionsByResource(TEST_RESOURCE))
.thenReturn(ImmutableSet.of("dummy"));
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testReplica.getPartitionName()).thenReturn(TEST_PARTITIOIN);
Assert.assertTrue(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void testConstraintInValid() {
when(_testNode.getAssignedPartitionsByResource(TEST_RESOURCE))
.thenReturn(ImmutableSet.of(TEST_PARTITIOIN));
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testReplica.getPartitionName()).thenReturn(TEST_PARTITIOIN);
Assert.assertFalse(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
}
| 9,766 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestInstancePartitionsCountConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.when;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestInstancePartitionsCountConstraint {
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final SoftConstraint _constraint = new InstancePartitionsCountConstraint();
@Test
public void testWhenInstanceIsIdle() {
when(_testNode.getAssignedReplicaCount()).thenReturn(0);
double score =
_constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertEquals(score, 1.0);
}
@Test
public void testWhenInstanceIsFull() {
when(_testNode.getAssignedReplicaCount()).thenReturn(10);
when(_clusterContext.getEstimatedMaxPartitionCount()).thenReturn(10);
double score =
_constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertEquals(score, 0.5);
}
@Test
public void testWhenInstanceHalfOccupied() {
when(_testNode.getAssignedReplicaCount()).thenReturn(10);
when(_clusterContext.getEstimatedMaxPartitionCount()).thenReturn(20);
double score =
_constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertTrue(score > 0.99);
}
}
| 9,767 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestResourceTopStateAntiAffinityConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
public class TestResourceTopStateAntiAffinityConstraint {
private AssignableReplica _testReplica;
private AssignableNode _testNode;
private ClusterContext _clusterContext;
private final SoftConstraint _constraint = new ResourceTopStateAntiAffinityConstraint();
@BeforeMethod
public void init() {
_testReplica = Mockito.mock(AssignableReplica.class);
_testNode = Mockito.mock(AssignableNode.class);
_clusterContext = Mockito.mock(ClusterContext.class);
}
@Test
public void testGetAssignmentScoreWhenReplicaNotTopState() {
when(_testReplica.isReplicaTopState()).thenReturn(false);
double score = _constraint.getAssignmentScore(_testNode, _testReplica, _clusterContext);
double normalizedScore =
_constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertEquals(score, 0.0);
Assert.assertEquals(normalizedScore, 1.0);
verifyZeroInteractions(_testNode);
verifyZeroInteractions(_clusterContext);
}
@Test
public void testGetAssignmentScoreWhenReplicaIsTopStateHeavyLoad() {
when(_testReplica.isReplicaTopState()).thenReturn(true);
when(_testNode.getAssignedTopStatePartitionsCount()).thenReturn(20);
when(_clusterContext.getEstimatedMaxTopStateCount()).thenReturn(20);
double score = _constraint.getAssignmentScore(_testNode, _testReplica, _clusterContext);
double normalizedScore =
_constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertEquals(score, 1.0);
Assert.assertEquals(normalizedScore, 0.5);
}
@Test
public void testGetAssignmentScoreWhenReplicaIsTopStateLightLoad() {
when(_testReplica.isReplicaTopState()).thenReturn(true);
when(_testNode.getAssignedTopStatePartitionsCount()).thenReturn(0);
when(_clusterContext.getEstimatedMaxTopStateCount()).thenReturn(20);
double score = _constraint.getAssignmentScore(_testNode, _testReplica, _clusterContext);
double normalizedScore =
_constraint.getAssignmentNormalizedScore(_testNode, _testReplica, _clusterContext);
Assert.assertEquals(score, 0.0);
Assert.assertEquals(normalizedScore, 1.0);
}
}
| 9,768 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/MockRebalanceAlgorithm.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.controller.rebalancer.waged.RebalanceAlgorithm;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterModel;
import org.apache.helix.controller.rebalancer.waged.model.OptimalAssignment;
import org.apache.helix.model.Partition;
import org.apache.helix.model.ResourceAssignment;
import org.mockito.Mockito;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import static org.mockito.Mockito.when;
/**
* A mock up rebalance algorithm for unit test.
* Note that the mock algorithm won't propagate the existing assignment to the output as a real
* algorithm will do. This is for the convenience of testing.
*/
public class MockRebalanceAlgorithm implements RebalanceAlgorithm {
Map<String, ResourceAssignment> _resultHistory = Collections.emptyMap();
@Override
public OptimalAssignment calculate(ClusterModel clusterModel) {
// If no predefined rebalance result setup, do card dealing.
Map<String, ResourceAssignment> result = new HashMap<>();
Iterator<AssignableNode> nodeIterator =
clusterModel.getAssignableNodes().values().stream().sorted().iterator();
for (String resource : clusterModel.getAssignableReplicaMap().keySet()) {
Iterator<AssignableReplica> replicaIterator =
clusterModel.getAssignableReplicaMap().get(resource).stream().sorted().iterator();
while (replicaIterator.hasNext()) {
AssignableReplica replica = replicaIterator.next();
if (!nodeIterator.hasNext()) {
nodeIterator = clusterModel.getAssignableNodes().values().stream().sorted().iterator();
}
AssignableNode node = nodeIterator.next();
// Put the assignment
ResourceAssignment assignment = result.computeIfAbsent(replica.getResourceName(),
resourceName -> new ResourceAssignment(resourceName));
Partition partition = new Partition(replica.getPartitionName());
if (assignment.getReplicaMap(partition).isEmpty()) {
assignment.addReplicaMap(partition, new HashMap<>());
}
assignment.getReplicaMap(partition).put(node.getInstanceName(), replica.getReplicaState());
}
}
_resultHistory = result;
// Mock the return value for supporting test.
OptimalAssignment optimalAssignment = Mockito.mock(OptimalAssignment.class);
when(optimalAssignment.getOptimalResourceAssignment()).thenReturn(result);
return optimalAssignment;
}
public Map<String, ResourceAssignment> getRebalanceResult() {
return new HashMap<>(_resultHistory);
}
}
| 9,769 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestNodeMaxPartitionLimitConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.when;
import java.util.Collections;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestNodeMaxPartitionLimitConstraint {
private static final String TEST_RESOURCE = "TestResource";
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final HardConstraint _constraint = new NodeMaxPartitionLimitConstraint();
@Test
public void testConstraintValid() {
when(_testNode.getAssignedReplicaCount()).thenReturn(0);
when(_testNode.getMaxPartition()).thenReturn(10);
when(_testNode.getAssignedPartitionsByResource(TEST_RESOURCE))
.thenReturn(Collections.emptySet());
when(_testReplica.getResourceMaxPartitionsPerInstance()).thenReturn(5);
Assert.assertTrue(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void testConstraintInvalid() {
when(_testNode.getAssignedReplicaCount()).thenReturn(10);
when(_testNode.getMaxPartition()).thenReturn(5);
Assert.assertFalse(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
}
| 9,770 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestPartitionMovementConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.apache.helix.model.Partition;
import org.apache.helix.model.ResourceAssignment;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestPartitionMovementConstraint {
private static final String INSTANCE = "TestInstance";
private static final String RESOURCE = "TestResource";
private static final String PARTITION = "TestPartition";
private AssignableNode _testNode;
private AssignableReplica _testReplica;
private ClusterContext _clusterContext;
private SoftConstraint _baselineInfluenceConstraint = new BaselineInfluenceConstraint();
private SoftConstraint _partitionMovementConstraint = new PartitionMovementConstraint();
@BeforeMethod
public void init() {
_testNode = mock(AssignableNode.class);
_testReplica = mock(AssignableReplica.class);
_clusterContext = mock(ClusterContext.class);
when(_testReplica.getResourceName()).thenReturn(RESOURCE);
when(_testReplica.getPartitionName()).thenReturn(PARTITION);
when(_testNode.getInstanceName()).thenReturn(INSTANCE);
}
@Test
public void testGetAssignmentScoreWhenBestPossibleBaselineMissing() {
when(_clusterContext.getBaselineAssignment()).thenReturn(Collections.emptyMap());
when(_clusterContext.getBestPossibleAssignment()).thenReturn(Collections.emptyMap());
verifyScore(_baselineInfluenceConstraint, _testNode, _testReplica, _clusterContext, 0.0, 0.0);
verifyScore(_partitionMovementConstraint, _testNode, _testReplica, _clusterContext, 0.0, 0.0);
}
@Test
public void testGetAssignmentScoreWhenBestPossibleMissing() {
ResourceAssignment mockResourceAssignment = mock(ResourceAssignment.class);
when(mockResourceAssignment.getReplicaMap(new Partition(PARTITION)))
.thenReturn(ImmutableMap.of(INSTANCE, "Master"));
Map<String, ResourceAssignment> assignmentMap =
ImmutableMap.of(RESOURCE, mockResourceAssignment);
when(_clusterContext.getBaselineAssignment()).thenReturn(assignmentMap);
when(_clusterContext.getBestPossibleAssignment()).thenReturn(Collections.emptyMap());
// when the calculated states are both equal to the replica's current state
when(_testReplica.getReplicaState()).thenReturn("Master");
verifyScore(_baselineInfluenceConstraint, _testNode, _testReplica, _clusterContext, 0.0, 0.0);
verifyScore(_partitionMovementConstraint, _testNode, _testReplica, _clusterContext, 1.0, 1.0);
// when the calculated states are both different from the replica's current state
when(_testReplica.getReplicaState()).thenReturn("Slave");
verifyScore(_baselineInfluenceConstraint, _testNode, _testReplica, _clusterContext, 0.0, 0.0);
verifyScore(_partitionMovementConstraint, _testNode, _testReplica, _clusterContext, 0.5, 0.5);
}
@Test
public void testGetAssignmentScore() {
String instanceNameA = INSTANCE + "A";
String instanceNameB = INSTANCE + "B";
String instanceNameC = INSTANCE + "C";
AssignableNode testAssignableNode = mock(AssignableNode.class);
ResourceAssignment bestPossibleResourceAssignment = mock(ResourceAssignment.class);
when(bestPossibleResourceAssignment.getReplicaMap(new Partition(PARTITION)))
.thenReturn(ImmutableMap.of(instanceNameA, "Master", instanceNameB, "Slave"));
when(_clusterContext.getBestPossibleAssignment())
.thenReturn(ImmutableMap.of(RESOURCE, bestPossibleResourceAssignment));
ResourceAssignment baselineResourceAssignment = mock(ResourceAssignment.class);
when(baselineResourceAssignment.getReplicaMap(new Partition(PARTITION)))
.thenReturn(ImmutableMap.of(instanceNameA, "Slave", instanceNameC, "Master"));
when(_clusterContext.getBaselineAssignment())
.thenReturn(ImmutableMap.of(RESOURCE, baselineResourceAssignment));
// when the replica's state matches with best possible, allocation matches with baseline
when(testAssignableNode.getInstanceName()).thenReturn(instanceNameA);
when(_testReplica.getReplicaState()).thenReturn("Master");
verifyScore(_baselineInfluenceConstraint, testAssignableNode, _testReplica, _clusterContext,
0.5, 0.5);
verifyScore(_partitionMovementConstraint, testAssignableNode, _testReplica, _clusterContext,
1.0, 1.0);
// when the replica's allocation matches with best possible only
when(testAssignableNode.getInstanceName()).thenReturn(instanceNameB);
when(_testReplica.getReplicaState()).thenReturn("Master");
verifyScore(_baselineInfluenceConstraint, testAssignableNode, _testReplica, _clusterContext,
0.0, 0.0);
verifyScore(_partitionMovementConstraint, testAssignableNode, _testReplica, _clusterContext,
0.5, 0.5);
// when the replica's state matches with baseline only
when(testAssignableNode.getInstanceName()).thenReturn(instanceNameC);
when(_testReplica.getReplicaState()).thenReturn("Master");
verifyScore(_baselineInfluenceConstraint, testAssignableNode, _testReplica, _clusterContext,
1.0, 1.0);
verifyScore(_partitionMovementConstraint, testAssignableNode, _testReplica, _clusterContext,
0.0, 0.0);
// when the replica's allocation matches with baseline only
when(testAssignableNode.getInstanceName()).thenReturn(instanceNameC);
when(_testReplica.getReplicaState()).thenReturn("Slave");
verifyScore(_baselineInfluenceConstraint, testAssignableNode, _testReplica, _clusterContext,
0.5, 0.5);
verifyScore(_partitionMovementConstraint, testAssignableNode, _testReplica, _clusterContext,
0.0, 0.0);
}
private static void verifyScore(SoftConstraint constraint, AssignableNode node,
AssignableReplica replica, ClusterContext clusterContext, double expectedScore,
double expectedNormalizedScore) {
double score = constraint.getAssignmentScore(node, replica, clusterContext);
double normalizedScore = constraint.getAssignmentNormalizedScore(node, replica, clusterContext);
Assert.assertEquals(score, expectedScore);
Assert.assertEquals(normalizedScore, expectedNormalizedScore);
}
}
| 9,771 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestPartitionActivateConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.when;
import java.util.Collections;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
public class TestPartitionActivateConstraint {
private static final String TEST_PARTITION = "TestPartition";
private static final String TEST_RESOURCE = "TestResource";
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final HardConstraint _constraint = new ReplicaActivateConstraint();
@Test
public void testConstraintValid() {
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testReplica.getPartitionName()).thenReturn(TEST_PARTITION);
when(_testNode.getDisabledPartitionsMap())
.thenReturn(ImmutableMap.of(TEST_PARTITION, Collections.emptyList()));
Assert.assertTrue(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
when(_testNode.getDisabledPartitionsMap())
.thenReturn(ImmutableMap.of(TEST_PARTITION, ImmutableList.of("dummy")));
Assert.assertTrue(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void testConstraintInvalidWhenReplicaIsDisabled() {
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testReplica.getPartitionName()).thenReturn(TEST_PARTITION);
when(_testNode.getDisabledPartitionsMap())
.thenReturn(ImmutableMap.of(TEST_PARTITION, ImmutableList.of(TEST_PARTITION)));
Assert.assertTrue(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
}
| 9,772 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestSoftConstraintNormalizeFunction.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestSoftConstraintNormalizeFunction {
@Test
public void testDefaultNormalizeFunction() {
int maxScore = 100;
int minScore = 0;
SoftConstraint softConstraint = new SoftConstraint(maxScore, minScore) {
@Override
protected double getAssignmentScore(AssignableNode node, AssignableReplica replica,
ClusterContext clusterContext) {
return 0;
}
};
for (int i = minScore; i <= maxScore; i++) {
double normalized = softConstraint.getNormalizeFunction().scale(i);
Assert.assertTrue(normalized <= 1 && normalized >= 0,
String.format("input: %s, output: %s", i, normalized));
}
}
}
| 9,773 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestReplicaActivateConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class TestReplicaActivateConstraint {
private static final String TEST_RESOURCE = "testResource";
private static final String TEST_PARTITION = "testPartition";
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final HardConstraint _faultZoneAwareConstraint = new ReplicaActivateConstraint();
@Test
public void validWhenEmptyDisabledReplicaMap() {
Map<String, List<String>> disabledReplicaMap = new HashMap<>();
disabledReplicaMap.put(TEST_RESOURCE, new ArrayList<>());
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testReplica.getPartitionName()).thenReturn(TEST_PARTITION);
when(_testNode.getDisabledPartitionsMap()).thenReturn(disabledReplicaMap);
Assert.assertTrue(_faultZoneAwareConstraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void invalidWhenPartitionIsDisabled() {
Map<String, List<String>> disabledReplicaMap = new HashMap<>();
disabledReplicaMap.put(TEST_RESOURCE, Collections.singletonList(TEST_PARTITION));
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testReplica.getPartitionName()).thenReturn(TEST_PARTITION);
when(_testNode.getDisabledPartitionsMap()).thenReturn(disabledReplicaMap);
Assert.assertFalse(_faultZoneAwareConstraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
}
| 9,774 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestFaultZoneAwareConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.collect.ImmutableSet;
import java.util.Collections;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class TestFaultZoneAwareConstraint {
private static final String TEST_PARTITION = "testPartition";
private static final String TEST_ZONE = "testZone";
private static final String TEST_RESOURCE = "testResource";
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final HardConstraint _faultZoneAwareConstraint = new FaultZoneAwareConstraint();
@BeforeMethod
public void init() {
when(_testReplica.getResourceName()).thenReturn(TEST_RESOURCE);
when(_testReplica.getPartitionName()).thenReturn(TEST_PARTITION);
when(_testNode.getFaultZone()).thenReturn(TEST_ZONE);
}
@Test
public void inValidWhenFaultZoneAlreadyAssigned() {
when(_testNode.hasFaultZone()).thenReturn(true);
when(_clusterContext.getPartitionsForResourceAndFaultZone(TEST_RESOURCE, TEST_ZONE)).thenReturn(
ImmutableSet.of(TEST_PARTITION));
Assert.assertFalse(
_faultZoneAwareConstraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void validWhenEmptyAssignment() {
when(_testNode.hasFaultZone()).thenReturn(true);
when(_clusterContext.getPartitionsForResourceAndFaultZone(TEST_RESOURCE, TEST_ZONE)).thenReturn(Collections.emptySet());
Assert.assertTrue(
_faultZoneAwareConstraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void validWhenNoFaultZone() {
when(_testNode.hasFaultZone()).thenReturn(false);
Assert.assertTrue(
_faultZoneAwareConstraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
}
| 9,775 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/constraints/TestNodeCapacityConstraint.java
|
package org.apache.helix.controller.rebalancer.waged.constraints;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.when;
import org.apache.helix.controller.rebalancer.waged.model.AssignableNode;
import org.apache.helix.controller.rebalancer.waged.model.AssignableReplica;
import org.apache.helix.controller.rebalancer.waged.model.ClusterContext;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
public class TestNodeCapacityConstraint {
private final AssignableReplica _testReplica = Mockito.mock(AssignableReplica.class);
private final AssignableNode _testNode = Mockito.mock(AssignableNode.class);
private final ClusterContext _clusterContext = Mockito.mock(ClusterContext.class);
private final HardConstraint _constraint = new NodeCapacityConstraint();
@Test
public void testConstraintValidWhenNodeHasEnoughSpace() {
String key = "testKey";
when(_testNode.getRemainingCapacity()).thenReturn(ImmutableMap.of(key, 10));
when(_testReplica.getCapacity()).thenReturn(ImmutableMap.of(key, 5));
Assert.assertTrue(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
@Test
public void testConstraintInValidWhenNodeHasInsufficientSpace() {
String key = "testKey";
when(_testNode.getRemainingCapacity()).thenReturn(ImmutableMap.of(key, 1));
when(_testReplica.getCapacity()).thenReturn(ImmutableMap.of(key, 5));
Assert.assertFalse(_constraint.isAssignmentValid(_testNode, _testReplica, _clusterContext));
}
}
| 9,776 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/model/TestAssignableReplica.java
|
package org.apache.helix.controller.rebalancer.waged.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.HelixException;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestAssignableReplica {
String resourceName = "Resource";
String partitionNamePrefix = "partition";
String masterState = "Master";
int masterPriority = StateModelDefinition.TOP_STATE_PRIORITY;
String slaveState = "Slave";
int slavePriority = 2;
@Test
public void testConstructReplicaWithResourceConfig() throws IOException {
// Init assignable replica with a basic config object
Map<String, Integer> capacityDataMapResource1 = new HashMap<>();
capacityDataMapResource1.put("item1", 3);
capacityDataMapResource1.put("item2", 6);
ResourceConfig testResourceConfigResource = new ResourceConfig(resourceName);
testResourceConfigResource.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMapResource1));
ClusterConfig testClusterConfig = new ClusterConfig("testCluster");
testClusterConfig.setInstanceCapacityKeys(new ArrayList<>(capacityDataMapResource1.keySet()));
String partitionName = partitionNamePrefix + 1;
AssignableReplica replica =
new AssignableReplica(testClusterConfig, testResourceConfigResource, partitionName,
masterState, masterPriority);
Assert.assertEquals(replica.getResourceName(), resourceName);
Assert.assertEquals(replica.getPartitionName(), partitionName);
Assert.assertEquals(replica.getReplicaState(), masterState);
Assert.assertEquals(replica.getStatePriority(), masterPriority);
Assert.assertTrue(replica.isReplicaTopState());
Assert.assertEquals(replica.getCapacity(), capacityDataMapResource1);
Assert.assertEquals(replica.getResourceInstanceGroupTag(), null);
Assert.assertEquals(replica.getResourceMaxPartitionsPerInstance(), Integer.MAX_VALUE);
// Modify the config and initialize more replicas.
// 1. update capacity
Map<String, Integer> capacityDataMapResource2 = new HashMap<>();
capacityDataMapResource2.put("item1", 5);
capacityDataMapResource2.put("item2", 10);
// We should not directly modify the contents returned by getPartitionCapacityMap()
// This will not guard against the modification of the kv pairs in the inner maps as this
// is not creating a deepCopy but will ensure we don't override top level kv pairs in
// testResourceConfigResource.
Map<String, Map<String, Integer>> capacityMap =
new HashMap<>(testResourceConfigResource.getPartitionCapacityMap());
String partitionName2 = partitionNamePrefix + 2;
capacityMap.put(partitionName2, capacityDataMapResource2);
testResourceConfigResource.setPartitionCapacityMap(capacityMap);
// 2. update instance group tag and max partitions per instance
String group = "DEFAULT";
int maxPartition = 10;
testResourceConfigResource.getRecord()
.setSimpleField(ResourceConfig.ResourceConfigProperty.INSTANCE_GROUP_TAG.toString(), group);
testResourceConfigResource.getRecord()
.setIntField(ResourceConfig.ResourceConfigProperty.MAX_PARTITIONS_PER_INSTANCE.name(),
maxPartition);
replica = new AssignableReplica(testClusterConfig, testResourceConfigResource, partitionName,
masterState, masterPriority);
Assert.assertEquals(replica.getCapacity(), capacityDataMapResource1);
Assert.assertEquals(replica.getResourceInstanceGroupTag(), group);
Assert.assertEquals(replica.getResourceMaxPartitionsPerInstance(), maxPartition);
replica = new AssignableReplica(testClusterConfig, testResourceConfigResource, partitionName2,
slaveState, slavePriority);
Assert.assertEquals(replica.getResourceName(), resourceName);
Assert.assertEquals(replica.getPartitionName(), partitionName2);
Assert.assertEquals(replica.getReplicaState(), slaveState);
Assert.assertEquals(replica.getStatePriority(), slavePriority);
Assert.assertFalse(replica.isReplicaTopState());
Assert.assertEquals(replica.getCapacity(), capacityDataMapResource2);
Assert.assertEquals(replica.getResourceInstanceGroupTag(), group);
Assert.assertEquals(replica.getResourceMaxPartitionsPerInstance(), maxPartition);
}
/**
* Tests that if default partition weight map is configured in ClusterConfig and NOT in
* ResourceConfig. AssignableReplica actually will get the default weight from ClusterConfig
* even though it's not set in ResourceConfig.
*/
@Test
public void testDefaultPartitionWeight() {
Map<String, Integer> defaultWeightDataMapResource = new HashMap<>();
defaultWeightDataMapResource.put("item1", 3);
defaultWeightDataMapResource.put("item2", 6);
ClusterConfig testClusterConfig = new ClusterConfig("testClusterConfigId");
testClusterConfig
.setInstanceCapacityKeys(new ArrayList<>(defaultWeightDataMapResource.keySet()));
testClusterConfig.setDefaultPartitionWeightMap(defaultWeightDataMapResource);
ResourceConfig testResourceConfigResource = new ResourceConfig(resourceName);
AssignableReplica replica = new AssignableReplica(testClusterConfig, testResourceConfigResource,
partitionNamePrefix + 1, masterState, masterPriority);
Assert.assertEquals(replica.getCapacity().size(), defaultWeightDataMapResource.size());
Assert.assertEquals(replica.getCapacity(), defaultWeightDataMapResource);
}
@Test
public void testIncompletePartitionWeightConfig() throws IOException {
// Init assignable replica with a basic config object
Map<String, Integer> capacityDataMapResource = new HashMap<>();
capacityDataMapResource.put("item1", 3);
capacityDataMapResource.put("item2", 6);
ResourceConfig testResourceConfigResource = new ResourceConfig(resourceName);
testResourceConfigResource.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMapResource));
ClusterConfig testClusterConfig = new ClusterConfig("testCluster");
List<String> requiredCapacityKeys = new ArrayList<>(capacityDataMapResource.keySet());
// Remove one required key, so it becomes a unnecessary item.
String unnecessaryCapacityKey = requiredCapacityKeys.remove(0);
// Add one new required key, so it does not exist in the resource config.
String newCapacityKey = "newCapacityKey";
requiredCapacityKeys.add(newCapacityKey);
testClusterConfig.setInstanceCapacityKeys(requiredCapacityKeys);
try {
new AssignableReplica(testClusterConfig, testResourceConfigResource,
partitionNamePrefix + 1, masterState, masterPriority);
Assert.fail("Creating new replica should fail because of incomplete partition weight.");
} catch (HelixException ex) {
// expected
}
Map<String, Integer> defaultCapacityDataMap = new HashMap<>();
for (String key : requiredCapacityKeys) {
defaultCapacityDataMap.put(key, 0);
}
testClusterConfig.setDefaultPartitionWeightMap(defaultCapacityDataMap);
AssignableReplica replica = new AssignableReplica(testClusterConfig, testResourceConfigResource,
partitionNamePrefix + 1, masterState, masterPriority);
Assert.assertTrue(replica.getCapacity().keySet().containsAll(requiredCapacityKeys));
Assert.assertEquals(replica.getCapacity().get(newCapacityKey).intValue(), 0);
Assert.assertFalse(replica.getCapacity().containsKey(unnecessaryCapacityKey));
}
}
| 9,777 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/model/TestClusterModelProvider.java
|
package org.apache.helix.controller.rebalancer.waged.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.helix.HelixConstants;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.util.DelayedRebalanceUtil;
import org.apache.helix.controller.rebalancer.waged.WagedRebalancer;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.model.ResourceAssignment;
import org.apache.helix.model.ResourceConfig;
import org.mockito.Mockito;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.when;
public class TestClusterModelProvider extends AbstractTestClusterModel {
Map<String, ResourceConfig> _resourceConfigMap = new HashMap<>();
@Override
protected ResourceControllerDataProvider setupClusterDataCache() throws IOException {
ResourceControllerDataProvider testCache = super.setupClusterDataCache();
// Set up mock idealstate
Map<String, IdealState> isMap = new HashMap<>();
for (String resource : _resourceNames) {
IdealState is = new IdealState(resource);
is.setNumPartitions(_partitionNames.size());
is.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
is.setStateModelDefRef("MasterSlave");
is.setReplicas("3");
is.setRebalancerClassName(WagedRebalancer.class.getName());
_partitionNames.stream()
.forEach(partition -> is.setPreferenceList(partition, Collections.emptyList()));
isMap.put(resource, is);
}
when(testCache.getIdealState(anyString())).thenAnswer(
(Answer<IdealState>) invocationOnMock -> isMap.get(invocationOnMock.getArguments()[0]));
// Set up 2 more instances
for (int i = 1; i < 3; i++) {
String instanceName = _testInstanceId + i;
_instances.add(instanceName);
// 1. Set up the default instance information with capacity configuration.
InstanceConfig testInstanceConfig = createMockInstanceConfig(instanceName);
Map<String, InstanceConfig> instanceConfigMap = testCache.getInstanceConfigMap();
instanceConfigMap.put(instanceName, testInstanceConfig);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
// 2. Mock the live instance node for the default instance.
LiveInstance testLiveInstance = createMockLiveInstance(instanceName);
Map<String, LiveInstance> liveInstanceMap = testCache.getLiveInstances();
liveInstanceMap.put(instanceName, testLiveInstance);
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
}
return testCache;
}
@Test
public void testFindToBeAssignedReplicasForMinActiveReplica() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
String instance1 = _testInstanceId;
String offlineInstance = _testInstanceId + "1";
String instance2 = _testInstanceId + "2";
Map<String, LiveInstance> liveInstanceMap = new HashMap<>();
liveInstanceMap.put(instance1, createMockLiveInstance(instance1));
liveInstanceMap.put(instance2, createMockLiveInstance(instance2));
Set<String> activeInstances = new HashSet<>();
activeInstances.add(instance1);
activeInstances.add(instance2);
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
when(testCache.getEnabledLiveInstances()).thenReturn(activeInstances);
// test 0, empty input
Assert.assertEquals(
DelayedRebalanceUtil.findToBeAssignedReplicasForMinActiveReplica(testCache, Collections.emptySet(),
activeInstances, Collections.emptyMap(), new HashMap<>()),
Collections.emptySet());
// test 1, one partition under minActiveReplica
Map<String, Map<String, Map<String, String>>> input = ImmutableMap.of(
_resourceNames.get(0),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of("MASTER", instance1),
_partitionNames.get(1), ImmutableMap.of("OFFLINE", offlineInstance)), // Partition2-MASTER
_resourceNames.get(1),
ImmutableMap.of(
_partitionNames.get(2), ImmutableMap.of("MASTER", instance1),
_partitionNames.get(3), ImmutableMap.of("SLAVE", instance1))
);
Map<String, Set<AssignableReplica>> replicaMap = new HashMap<>(); // to populate
Map<String, ResourceAssignment> currentAssignment = new HashMap<>(); // to populate
prepareData(input, replicaMap, currentAssignment, testCache, 1);
Map<String, Set<AssignableReplica>> allocatedReplicas = new HashMap<>();
Set<AssignableReplica> toBeAssignedReplicas =
DelayedRebalanceUtil.findToBeAssignedReplicasForMinActiveReplica(testCache, replicaMap.keySet(), activeInstances,
currentAssignment, allocatedReplicas);
Assert.assertEquals(toBeAssignedReplicas.size(), 1);
Assert.assertTrue(toBeAssignedReplicas.stream().map(AssignableReplica::toString).collect(Collectors.toSet())
.contains("Resource1-Partition2-MASTER"));
AssignableReplica replica = toBeAssignedReplicas.iterator().next();
Assert.assertEquals(replica.getReplicaState(), "MASTER");
Assert.assertEquals(replica.getPartitionName(), "Partition2");
// test 2, no additional replica to be assigned
testCache = setupClusterDataCache();
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
when(testCache.getEnabledLiveInstances()).thenReturn(activeInstances);
input = ImmutableMap.of(
_resourceNames.get(0),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of("MASTER", instance1),
_partitionNames.get(1), ImmutableMap.of("SLAVE", instance2)),
_resourceNames.get(1),
ImmutableMap.of(
_partitionNames.get(2), ImmutableMap.of("MASTER", instance1),
_partitionNames.get(3), ImmutableMap.of("SLAVE", instance2))
);
replicaMap = new HashMap<>(); // to populate
currentAssignment = new HashMap<>(); // to populate
prepareData(input, replicaMap, currentAssignment, testCache, 1);
allocatedReplicas = new HashMap<>();
toBeAssignedReplicas =
DelayedRebalanceUtil.findToBeAssignedReplicasForMinActiveReplica(testCache, replicaMap.keySet(), activeInstances,
currentAssignment, allocatedReplicas);
Assert.assertTrue(toBeAssignedReplicas.isEmpty());
Assert.assertEquals(allocatedReplicas.get(instance1).size(), 2);
Assert.assertEquals(allocatedReplicas.get(instance2).size(), 2);
// test 3, minActiveReplica==2, two partitions falling short
testCache = setupClusterDataCache();
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
when(testCache.getEnabledLiveInstances()).thenReturn(activeInstances);
input = ImmutableMap.of(
_resourceNames.get(0),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of("MASTER", instance1, "SLAVE", instance2),
_partitionNames.get(1), ImmutableMap.of("MASTER", instance1, "OFFLINE", offlineInstance)), // Partition2-SLAVE
_resourceNames.get(1),
ImmutableMap.of(
_partitionNames.get(2), ImmutableMap.of("MASTER", instance1, "SLAVE", instance2),
_partitionNames.get(3), ImmutableMap.of("SLAVE", instance1, "OFFLINE", offlineInstance)) // Partition4-MASTER
);
replicaMap = new HashMap<>(); // to populate
currentAssignment = new HashMap<>(); // to populate
prepareData(input, replicaMap, currentAssignment, testCache, 2);
allocatedReplicas = new HashMap<>();
toBeAssignedReplicas =
DelayedRebalanceUtil.findToBeAssignedReplicasForMinActiveReplica(testCache, replicaMap.keySet(), activeInstances,
currentAssignment, allocatedReplicas);
Assert.assertEquals(toBeAssignedReplicas.size(), 2);
Assert.assertEquals(toBeAssignedReplicas.stream().map(AssignableReplica::toString).collect(Collectors.toSet()),
ImmutableSet.of("Resource1-Partition2-SLAVE", "Resource2-Partition4-MASTER"));
Assert.assertEquals(allocatedReplicas.get(instance1).size(), 4);
Assert.assertEquals(allocatedReplicas.get(instance2).size(), 2);
}
@Test(dependsOnMethods = "testFindToBeAssignedReplicasForMinActiveReplica")
public void testClusterModelForDelayedRebalanceOverwrite() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
String instance1 = _testInstanceId;
String offlineInstance = _testInstanceId + "1";
String instance2 = _testInstanceId + "2";
Map<String, LiveInstance> liveInstanceMap = new HashMap<>();
liveInstanceMap.put(instance1, createMockLiveInstance(instance1));
liveInstanceMap.put(instance2, createMockLiveInstance(instance2));
Set<String> activeInstances = new HashSet<>();
activeInstances.add(instance1);
activeInstances.add(instance2);
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
when(testCache.getEnabledLiveInstances()).thenReturn(activeInstances);
// test 1, one partition under minActiveReplica
Map<String, Map<String, Map<String, String>>> input = ImmutableMap.of(
_resourceNames.get(0),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of("MASTER", instance1),
_partitionNames.get(1), ImmutableMap.of("OFFLINE", offlineInstance), // Partition2-MASTER
_partitionNames.get(2), ImmutableMap.of("MASTER", instance2),
_partitionNames.get(3), ImmutableMap.of("MASTER", instance2)),
_resourceNames.get(1),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of("MASTER", instance2),
_partitionNames.get(1), ImmutableMap.of("MASTER", instance2),
_partitionNames.get(2), ImmutableMap.of("MASTER", instance1),
_partitionNames.get(3), ImmutableMap.of("OFFLINE", offlineInstance)) // Partition4-MASTER
);
Map<String, Set<AssignableReplica>> replicaMap = new HashMap<>(); // to populate
Map<String, ResourceAssignment> currentAssignment = new HashMap<>(); // to populate
prepareData(input, replicaMap, currentAssignment, testCache, 1);
Map<String, Resource> resourceMap = _resourceNames.stream().collect(Collectors.toMap(resource -> resource, Resource::new));
ClusterModel clusterModel = ClusterModelProvider.generateClusterModelForDelayedRebalanceOverwrites(testCache,
resourceMap, activeInstances, currentAssignment);
Assert.assertEquals(clusterModel.getAssignableNodes().size(), 2);
Assert.assertTrue(clusterModel.getAssignableNodes().containsKey(instance1));
Assert.assertTrue(clusterModel.getAssignableNodes().containsKey(instance2));
Assert.assertEquals(clusterModel.getAssignableNodes().get(instance1).getAssignedReplicas().size(), 2);
Assert.assertEquals(clusterModel.getAssignableNodes().get(instance2).getAssignedReplicas().size(), 4);
Assert.assertEquals(clusterModel.getAssignableReplicaMap().get("Resource1").size(), 1);
Assert.assertEquals(clusterModel.getAssignableReplicaMap().get("Resource1").iterator().next().toString(),
"Resource1-Partition2-MASTER");
Assert.assertEquals(clusterModel.getAssignableReplicaMap().get("Resource2").size(), 1);
Assert.assertEquals(clusterModel.getAssignableReplicaMap().get("Resource2").iterator().next().toString(),
"Resource2-Partition4-MASTER");
// test 2, minActiveReplica==2, three partitions falling short
testCache = setupClusterDataCache();
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
when(testCache.getEnabledLiveInstances()).thenReturn(activeInstances);
input = ImmutableMap.of(
_resourceNames.get(0),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of("MASTER", instance1, "SLAVE", instance2),
_partitionNames.get(1), ImmutableMap.of("MASTER", instance1, "OFFLINE", offlineInstance), // Partition2-SLAVE
_partitionNames.get(2), ImmutableMap.of("OFFLINE", offlineInstance, "SLAVE", instance2), // Partition3-MASTER
_partitionNames.get(3), ImmutableMap.of("MASTER", instance1, "SLAVE", instance2)),
_resourceNames.get(1),
ImmutableMap.of(
_partitionNames.get(0), ImmutableMap.of("MASTER", instance1, "SLAVE", instance2),
_partitionNames.get(1), ImmutableMap.of("MASTER", instance1, "SLAVE", instance2),
_partitionNames.get(2), ImmutableMap.of("MASTER", instance1, "SLAVE", instance2),
_partitionNames.get(3), ImmutableMap.of("OFFLINE", offlineInstance, "ERROR", instance2)) // Partition4-MASTER
);
replicaMap = new HashMap<>(); // to populate
currentAssignment = new HashMap<>(); // to populate
prepareData(input, replicaMap, currentAssignment, testCache, 2);
clusterModel = ClusterModelProvider.generateClusterModelForDelayedRebalanceOverwrites(testCache,
resourceMap, activeInstances, currentAssignment);
Assert.assertEquals(clusterModel.getAssignableNodes().size(), 2);
Assert.assertTrue(clusterModel.getAssignableNodes().containsKey(instance1));
Assert.assertTrue(clusterModel.getAssignableNodes().containsKey(instance2));
Assert.assertEquals(clusterModel.getAssignableNodes().get(instance1).getAssignedReplicas().size(), 6);
Assert.assertEquals(clusterModel.getAssignableNodes().get(instance2).getAssignedReplicas().size(), 7);
Set<String> replicaSet = clusterModel.getAssignableReplicaMap().get(_resourceNames.get(0))
.stream()
.map(AssignableReplica::toString)
.collect(Collectors.toSet());
Assert.assertEquals(replicaSet.size(), 2);
Assert.assertTrue(replicaSet.contains("Resource1-Partition2-SLAVE"));
Assert.assertTrue(replicaSet.contains("Resource1-Partition3-MASTER"));
replicaSet = clusterModel.getAssignableReplicaMap().get(_resourceNames.get(1))
.stream()
.map(AssignableReplica::toString)
.collect(Collectors.toSet());
Assert.assertEquals(replicaSet.size(), 1);
Assert.assertTrue(replicaSet.contains("Resource2-Partition4-MASTER"));
}
/**
* Prepare mock objects with given input. This methods prepare replicaMap and populate testCache with currentState.
*
* @param input <resource, <partition, <state, instance> > >
* @param replicaMap The data map to prepare, a set of AssignableReplica by resource name.
* @param currentAssignment The data map to prepare, resourceAssignment by resource name
* @param testCache The mock object to prepare
*/
private void prepareData(Map<String, Map<String, Map<String, String>>> input,
Map<String, Set<AssignableReplica>> replicaMap,
Map<String, ResourceAssignment> currentAssignment,
ResourceControllerDataProvider testCache,
int minActiveReplica) {
// Set up mock idealstate
Map<String, IdealState> isMap = new HashMap<>();
for (String resource : _resourceNames) {
ResourceConfig resourceConfig = new ResourceConfig.Builder(resource)
.setMinActiveReplica(minActiveReplica)
.setNumReplica(3)
.build();
_resourceConfigMap.put(resource, resourceConfig);
IdealState is = new IdealState(resource);
is.setNumPartitions(_partitionNames.size());
is.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
is.setStateModelDefRef("MasterSlave");
is.setReplicas("3");
is.setMinActiveReplicas(minActiveReplica);
is.setRebalancerClassName(WagedRebalancer.class.getName());
_partitionNames.forEach(partition -> is.setPreferenceList(partition, Collections.emptyList()));
isMap.put(resource, is);
}
when(testCache.getIdealState(anyString())).thenAnswer(
(Answer<IdealState>) invocationOnMock -> isMap.get(invocationOnMock.getArguments()[0]));
when(testCache.getResourceConfig(anyString())).thenAnswer(
(Answer<ResourceConfig>) invocationOnMock -> _resourceConfigMap.get(invocationOnMock.getArguments()[0]));
// <instance, <resource, CurrentState>>
Map<String, Map<String, CurrentState>> currentStateByInstanceByResource = new HashMap<>();
Map<String, Map<String, Map<String, String>>> stateByInstanceByResourceByPartition = new HashMap<>();
for (String resource : input.keySet()) {
Set<AssignableReplica> replicas = new HashSet<>();
replicaMap.put(resource, replicas);
ResourceConfig resourceConfig = _resourceConfigMap.get(resource);
for (String partition : input.get(resource).keySet()) {
input.get(resource).get(partition).forEach(
(state, instance) -> {
stateByInstanceByResourceByPartition
.computeIfAbsent(instance, k -> new HashMap<>())
.computeIfAbsent(resource, k -> new HashMap<>())
.put(partition, state);
replicas.add(new MockAssignableReplica(resourceConfig, partition, state));
});
}
}
for (String instance : stateByInstanceByResourceByPartition.keySet()) {
for (String resource : stateByInstanceByResourceByPartition.get(instance).keySet()) {
Map<String, String> partitionState = stateByInstanceByResourceByPartition.get(instance).get(resource);
CurrentState testCurrentStateResource = mockCurrentStateResource(partitionState);
currentStateByInstanceByResource.computeIfAbsent(instance, k -> new HashMap<>()).put(resource, testCurrentStateResource);
}
}
for (String instance : currentStateByInstanceByResource.keySet()) {
when(testCache.getCurrentState(instance, _sessionId)).thenReturn(currentStateByInstanceByResource.get(instance));
when(testCache.getCurrentState(instance, _sessionId, false))
.thenReturn(currentStateByInstanceByResource.get(instance));
}
// Mock a baseline assignment based on the current states.
for (String resource : _resourceNames) {
// <partition, <instance, state>>
Map<String, Map<String, String>> assignmentMap = new HashMap<>();
for (String instance : _instances) {
CurrentState cs = testCache.getCurrentState(instance, _sessionId).get(resource);
if (cs != null) {
for (Map.Entry<String, String> stateEntry : cs.getPartitionStateMap().entrySet()) {
assignmentMap.computeIfAbsent(stateEntry.getKey(), k -> new HashMap<>())
.put(instance, stateEntry.getValue());
}
ResourceAssignment assignment = new ResourceAssignment(resource);
assignmentMap.keySet().forEach(partition -> assignment
.addReplicaMap(new Partition(partition), assignmentMap.get(partition)));
currentAssignment.put(resource, assignment);
}
}
}
}
private CurrentState mockCurrentStateResource(Map<String, String> partitionState) {
CurrentState testCurrentStateResource = Mockito.mock(CurrentState.class);
when(testCurrentStateResource.getResourceName()).thenReturn(_resourceNames.get(0));
when(testCurrentStateResource.getPartitionStateMap()).thenReturn(partitionState);
when(testCurrentStateResource.getStateModelDefRef()).thenReturn("MasterSlave");
when(testCurrentStateResource.getSessionId()).thenReturn(_sessionId);
for (Map.Entry<String, String> entry : partitionState.entrySet()) {
when(testCurrentStateResource.getState(entry.getKey())).thenReturn(entry.getValue());
}
return testCurrentStateResource;
}
@Test
public void testGenerateClusterModel() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
// 1. test generating a cluster model with empty assignment
ClusterModel clusterModel = ClusterModelProvider.generateClusterModelForBaseline(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances, Collections.emptyMap(), Collections.emptyMap());
// There should be no existing assignment.
Assert.assertFalse(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.anyMatch(resourceMap -> !resourceMap.isEmpty()));
Assert.assertFalse(clusterModel.getAssignableNodes().values().stream()
.anyMatch(node -> node.getAssignedReplicaCount() != 0));
// Have all 3 instances
Assert.assertEquals(
clusterModel.getAssignableNodes().values().stream().map(AssignableNode::getInstanceName)
.collect(Collectors.toSet()), _instances);
// Shall have 2 resources and 4 replicas, since all nodes are in the same fault zone.
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 2);
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.size() == 4));
// Adjust instance fault zone, so they have different fault zones.
testCache.getInstanceConfigMap().values().stream()
.forEach(config -> config.setZoneId(config.getInstanceName()));
clusterModel = ClusterModelProvider.generateClusterModelForBaseline(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances, Collections.emptyMap(), Collections.emptyMap());
// Shall have 2 resources and 12 replicas after fault zone adjusted.
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 2);
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.size() == 12));
// 2. test with only one active node
clusterModel = ClusterModelProvider.generateClusterModelForBaseline(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
Collections.singleton(_testInstanceId), Collections.emptyMap(), Collections.emptyMap());
// Have only one instance
Assert.assertEquals(
clusterModel.getAssignableNodes().values().stream().map(AssignableNode::getInstanceName)
.collect(Collectors.toSet()), Collections.singleton(_testInstanceId));
// Shall have 4 assignable replicas because there is only one valid node.
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.size() == 4));
// 3. test with no active instance
clusterModel = ClusterModelProvider.generateClusterModelForBaseline(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
Collections.emptySet(), Collections.emptyMap(), Collections.emptyMap());
// Have only one instance
Assert.assertEquals(clusterModel.getAssignableNodes().size(), 0);
// Shall have 0 assignable replicas because there are 0 valid nodes.
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.isEmpty()));
// 4. test with baseline assignment
// Mock a baseline assignment based on the current states.
Map<String, ResourceAssignment> baselineAssignment = new HashMap<>();
for (String resource : _resourceNames) {
// <partition, <instance, state>>
Map<String, Map<String, String>> assignmentMap = new HashMap<>();
CurrentState cs = testCache.getCurrentState(_testInstanceId, _sessionId).get(resource);
if (cs != null) {
for (Map.Entry<String, String> stateEntry : cs.getPartitionStateMap().entrySet()) {
assignmentMap.computeIfAbsent(stateEntry.getKey(), k -> new HashMap<>())
.put(_testInstanceId, stateEntry.getValue());
}
ResourceAssignment assignment = new ResourceAssignment(resource);
assignmentMap.keySet().stream().forEach(partition -> assignment
.addReplicaMap(new Partition(partition), assignmentMap.get(partition)));
baselineAssignment.put(resource, assignment);
}
}
// Generate a cluster model based on the best possible assignment
clusterModel = ClusterModelProvider.generateClusterModelForBaseline(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances, Collections.emptyMap(), baselineAssignment);
// There should be 4 existing assignments in total (each resource has 2) in the specified instance
Assert.assertTrue(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.allMatch(resourceMap -> resourceMap.values().stream()
.allMatch(partitionSet -> partitionSet.size() == 2)));
Assert.assertEquals(
clusterModel.getAssignableNodes().get(_testInstanceId).getAssignedReplicaCount(), 4);
// Since each resource has 2 replicas assigned, the assignable replica count should be 10.
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 2);
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.size() == 10));
// 5. test with best possible assignment but cluster topology is changed
clusterModel = ClusterModelProvider.generateClusterModelForBaseline(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances,
Collections.singletonMap(HelixConstants.ChangeType.CLUSTER_CONFIG, Collections.emptySet()),
baselineAssignment);
// There should be no existing assignment since the topology change invalidates all existing assignment
Assert.assertTrue(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.allMatch(resourceMap -> resourceMap.isEmpty()));
Assert.assertFalse(clusterModel.getAssignableNodes().values().stream()
.anyMatch(node -> node.getAssignedReplicaCount() != 0));
// Shall have 2 resources and 12 replicas
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 2);
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.size() == 12));
// 6. test with best possible assignment and one resource config change
// Generate a cluster model based on the same best possible assignment, but resource1 config is changed
String changedResourceName = _resourceNames.get(0);
clusterModel = ClusterModelProvider.generateClusterModelForBaseline(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances, Collections.singletonMap(HelixConstants.ChangeType.RESOURCE_CONFIG,
Collections.singleton(changedResourceName)), baselineAssignment);
// There should be no existing assignment for all the resource except for resource2
Assert.assertEquals(clusterModel.getContext().getAssignmentForFaultZoneMap().size(), 1);
Map<String, Set<String>> resourceAssignmentMap =
clusterModel.getContext().getAssignmentForFaultZoneMap().get(_testInstanceId);
// Should be only resource2 in the map
Assert.assertEquals(resourceAssignmentMap.size(), 1);
for (String resource : _resourceNames) {
Assert
.assertEquals(resourceAssignmentMap.getOrDefault(resource, Collections.emptySet()).size(),
resource.equals(changedResourceName) ? 0 : 2);
}
// Only the first instance will have 2 assignment from resource2.
for (String instance : _instances) {
Assert.assertEquals(clusterModel.getAssignableNodes().get(instance).getAssignedReplicaCount(),
instance.equals(_testInstanceId) ? 2 : 0);
}
// Shall have 2 resources and 12 replicas
Assert.assertEquals(clusterModel.getAssignableReplicaMap().keySet().size(), 2);
for (String resource : _resourceNames) {
Assert.assertEquals(clusterModel.getAssignableReplicaMap().get(resource).size(),
resource.equals(changedResourceName) ? 12 : 10);
}
// 7. test with best possible assignment but the instance becomes inactive
// Generate a cluster model based on the best possible assignment, but the assigned node is disabled
Set<String> limitedActiveInstances = new HashSet<>(_instances);
limitedActiveInstances.remove(_testInstanceId);
clusterModel = ClusterModelProvider.generateClusterModelForBaseline(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
limitedActiveInstances, Collections.emptyMap(), baselineAssignment);
// There should be no existing assignment.
Assert.assertFalse(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.anyMatch(resourceMap -> !resourceMap.isEmpty()));
Assert.assertFalse(clusterModel.getAssignableNodes().values().stream()
.anyMatch(node -> node.getAssignedReplicaCount() != 0));
// Have only 2 instances
Assert.assertEquals(
clusterModel.getAssignableNodes().values().stream().map(AssignableNode::getInstanceName)
.collect(Collectors.toSet()), limitedActiveInstances);
// Since only 2 instances are active, we shall have 8 assignable replicas in each resource.
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 2);
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.size() == 8));
}
@Test (dependsOnMethods = "testGenerateClusterModel")
public void testGenerateClusterModelForPartialRebalance() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
// 1. test generating a cluster model with empty assignment
ClusterModel clusterModel = ClusterModelProvider
.generateClusterModelForPartialRebalance(testCache, _resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances, Collections.emptyMap(), Collections.emptyMap());
// There should be no existing assignment.
Assert.assertFalse(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.anyMatch(resourceMap -> !resourceMap.isEmpty()));
Assert.assertFalse(clusterModel.getAssignableNodes().values().stream()
.anyMatch(node -> node.getAssignedReplicaCount() != 0));
// Have all 3 instances
Assert.assertEquals(
clusterModel.getAssignableNodes().values().stream().map(AssignableNode::getInstanceName)
.collect(Collectors.toSet()), _instances);
// Shall have 0 resources and 0 replicas since the baseline is empty. The partial rebalance
// should not rebalance any replica.
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 0);
// Adjust instance fault zone, so they have different fault zones.
testCache.getInstanceConfigMap().values().stream()
.forEach(config -> config.setZoneId(config.getInstanceName()));
// 2. test with a pair of identical best possible assignment and baseline assignment
// Mock a best possible assignment based on the current states.
Map<String, ResourceAssignment> bestPossibleAssignment = new HashMap<>();
for (String resource : _resourceNames) {
// <partition, <instance, state>>
Map<String, Map<String, String>> assignmentMap = new HashMap<>();
CurrentState cs = testCache.getCurrentState(_testInstanceId, _sessionId).get(resource);
if (cs != null) {
for (Map.Entry<String, String> stateEntry : cs.getPartitionStateMap().entrySet()) {
assignmentMap.computeIfAbsent(stateEntry.getKey(), k -> new HashMap<>())
.put(_testInstanceId, stateEntry.getValue());
}
ResourceAssignment assignment = new ResourceAssignment(resource);
assignmentMap.keySet().stream().forEach(partition -> assignment
.addReplicaMap(new Partition(partition), assignmentMap.get(partition)));
bestPossibleAssignment.put(resource, assignment);
}
}
Map<String, ResourceAssignment> baseline = new HashMap<>(bestPossibleAssignment);
// Generate a cluster model for partial rebalance
clusterModel = ClusterModelProvider.generateClusterModelForPartialRebalance(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances, baseline, bestPossibleAssignment);
// There should be 4 existing assignments in total (each resource has 2) in the specified instance
Assert.assertTrue(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.allMatch(resourceMap -> resourceMap.values().stream()
.allMatch(partitionSet -> partitionSet.size() == 2)));
Assert.assertEquals(
clusterModel.getAssignableNodes().get(_testInstanceId).getAssignedReplicaCount(), 4);
// Since the best possible matches the baseline, no replica needs to be reassigned.
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 0);
// 3. test with inactive instance in the baseline and the best possible assignment
Set<String> partialInstanceList = new HashSet<>(_instances);
partialInstanceList.remove(_testInstanceId);
clusterModel = ClusterModelProvider.generateClusterModelForPartialRebalance(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
partialInstanceList, baseline, bestPossibleAssignment);
// Have the other 2 active instances
Assert.assertEquals(clusterModel.getAssignableNodes().size(), 2);
// All the replicas in the existing assignment should be rebalanced.
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 2);
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.size() == 2));
// Shall have 0 assigned replicas
Assert.assertTrue(clusterModel.getAssignableNodes().values().stream()
.allMatch(assignableNode -> assignableNode.getAssignedReplicaCount() == 0));
// 4. test with one resource that is only in the baseline
String resourceInBaselineOnly = _resourceNames.get(0);
Map<String, ResourceAssignment> partialBestPossibleAssignment =
new HashMap<>(bestPossibleAssignment);
partialBestPossibleAssignment.remove(resourceInBaselineOnly);
// Generate a cluster mode with the adjusted best possible assignment
clusterModel = ClusterModelProvider.generateClusterModelForPartialRebalance(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances, baseline, partialBestPossibleAssignment);
// There should be 2 existing assignments in total in the specified instance
Assert.assertTrue(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.allMatch(resourceMap -> resourceMap.values().stream()
.allMatch(partitionSet -> partitionSet.size() == 2)));
// Only the replicas of one resource require rebalance
Assert.assertEquals(
clusterModel.getAssignableNodes().get(_testInstanceId).getAssignedReplicaCount(), 2);
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 1);
Assert.assertTrue(clusterModel.getAssignableReplicaMap().containsKey(resourceInBaselineOnly));
Assert.assertTrue(clusterModel.getAssignableReplicaMap().values().stream()
.allMatch(replicaSet -> replicaSet.size() == 2));
// 5. test with one resource only in the best possible assignment
String resourceInBestPossibleOnly = _resourceNames.get(1);
Map<String, ResourceAssignment> partialBaseline = new HashMap<>(baseline);
partialBaseline.remove(resourceInBestPossibleOnly);
// Generate a cluster model with the adjusted baseline
clusterModel = ClusterModelProvider.generateClusterModelForPartialRebalance(testCache,
_resourceNames.stream()
.collect(Collectors.toMap(resource -> resource, resource -> new Resource(resource))),
_instances, partialBaseline, bestPossibleAssignment);
// There should be 2 existing assignments in total and all of them require rebalance.
Assert.assertTrue(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.allMatch(resourceMap -> resourceMap.values().stream()
.allMatch(partitionSet -> partitionSet.size() == 2)));
Assert.assertEquals(
clusterModel.getAssignableNodes().get(_testInstanceId).getAssignedReplicaCount(), 2);
// No need to rebalance the replicas that are not in the baseline yet.
Assert.assertEquals(clusterModel.getAssignableReplicaMap().size(), 0);
}
static class MockAssignableReplica extends AssignableReplica {
MockAssignableReplica(ResourceConfig resourceConfig, String partition, String replicaState) {
super(new ClusterConfig("testCluster"), resourceConfig, partition, replicaState, 1);
}
}
}
| 9,778 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/model/TestAssignableNode.java
|
package org.apache.helix.controller.rebalancer.waged.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.HelixException;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import static org.mockito.Mockito.when;
public class TestAssignableNode extends AbstractTestClusterModel {
@BeforeClass
public void initialize() {
super.initialize();
}
@Test
public void testNormalUsage() throws IOException {
// Test 1 - initialize based on the data cache and check with the expected result
ResourceControllerDataProvider testCache = setupClusterDataCache();
Set<AssignableReplica> assignmentSet = generateReplicas(testCache);
Set<String> expectedTopStateAssignmentSet1 = new HashSet<>(_partitionNames.subList(0, 1));
Set<String> expectedTopStateAssignmentSet2 = new HashSet<>(_partitionNames.subList(2, 3));
Set<String> expectedAssignmentSet1 = new HashSet<>(_partitionNames.subList(0, 2));
Set<String> expectedAssignmentSet2 = new HashSet<>(_partitionNames.subList(2, 4));
Map<String, Set<String>> expectedAssignment = new HashMap<>();
expectedAssignment.put("Resource1", expectedAssignmentSet1);
expectedAssignment.put("Resource2", expectedAssignmentSet2);
Map<String, Integer> expectedCapacityMap = new HashMap<>();
expectedCapacityMap.put("item1", 4);
expectedCapacityMap.put("item2", 8);
expectedCapacityMap.put("item3", 30);
AssignableNode assignableNode = new AssignableNode(testCache.getClusterConfig(),
testCache.getInstanceConfigMap().get(_testInstanceId), _testInstanceId);
assignableNode.assignInitBatch(assignmentSet);
Assert.assertEquals(assignableNode.getAssignedPartitionsMap(), expectedAssignment);
Assert.assertEquals(assignableNode.getAssignedReplicaCount(), 4);
Assert.assertEquals(assignableNode.getGeneralProjectedHighestUtilization(Collections.EMPTY_MAP),
16.0 / 20.0, 0.005);
Assert.assertEquals(assignableNode.getTopStateProjectedHighestUtilization(Collections.EMPTY_MAP),
8.0 / 20.0, 0.005);
Assert.assertEquals(assignableNode.getMaxCapacity(), _capacityDataMap);
Assert.assertEquals(assignableNode.getMaxPartition(), 5);
Assert.assertEquals(assignableNode.getInstanceTags(), _testInstanceTags);
Assert.assertEquals(assignableNode.getFaultZone(), _testFaultZoneId);
Assert.assertEquals(assignableNode.getDisabledPartitionsMap(), _disabledPartitionsMap);
Assert.assertEquals(assignableNode.getRemainingCapacity(), expectedCapacityMap);
Assert.assertEquals(assignableNode.getAssignedReplicas(), assignmentSet);
Assert.assertEquals(assignableNode.getAssignedPartitionsByResource(_resourceNames.get(0)),
expectedAssignmentSet1);
Assert.assertEquals(assignableNode.getAssignedPartitionsByResource(_resourceNames.get(1)),
expectedAssignmentSet2);
Assert
.assertEquals(assignableNode.getAssignedTopStatePartitionsByResource(_resourceNames.get(0)),
expectedTopStateAssignmentSet1);
Assert
.assertEquals(assignableNode.getAssignedTopStatePartitionsByResource(_resourceNames.get(1)),
expectedTopStateAssignmentSet2);
Assert.assertEquals(assignableNode.getAssignedTopStatePartitionsCount(),
expectedTopStateAssignmentSet1.size() + expectedTopStateAssignmentSet2.size());
// Test 2 - release assignment from the AssignableNode
AssignableReplica removingReplica = new AssignableReplica(testCache.getClusterConfig(),
testCache.getResourceConfig(_resourceNames.get(1)), _partitionNames.get(2), "MASTER", 1);
expectedAssignment.get(_resourceNames.get(1)).remove(_partitionNames.get(2));
expectedCapacityMap.put("item1", 9);
expectedCapacityMap.put("item2", 18);
Iterator<AssignableReplica> iter = assignmentSet.iterator();
while (iter.hasNext()) {
AssignableReplica replica = iter.next();
if (replica.equals(removingReplica)) {
iter.remove();
}
}
expectedTopStateAssignmentSet2.remove(_partitionNames.get(2));
assignableNode.release(removingReplica);
Assert.assertEquals(assignableNode.getAssignedPartitionsMap(), expectedAssignment);
Assert.assertEquals(assignableNode.getAssignedReplicaCount(), 3);
Assert.assertEquals(assignableNode.getGeneralProjectedHighestUtilization(Collections.EMPTY_MAP),
11.0 / 20.0, 0.005);
Assert.assertEquals(assignableNode.getTopStateProjectedHighestUtilization(Collections.EMPTY_MAP),
3.0 / 20.0, 0.005);
Assert.assertEquals(assignableNode.getMaxCapacity(), _capacityDataMap);
Assert.assertEquals(assignableNode.getMaxPartition(), 5);
Assert.assertEquals(assignableNode.getInstanceTags(), _testInstanceTags);
Assert.assertEquals(assignableNode.getFaultZone(), _testFaultZoneId);
Assert.assertEquals(assignableNode.getDisabledPartitionsMap(), _disabledPartitionsMap);
Assert.assertEquals(assignableNode.getRemainingCapacity(), expectedCapacityMap);
Assert.assertEquals(assignableNode.getAssignedReplicas(), assignmentSet);
Assert.assertEquals(assignableNode.getAssignedPartitionsByResource(_resourceNames.get(0)),
expectedAssignmentSet1);
Assert.assertEquals(assignableNode.getAssignedPartitionsByResource(_resourceNames.get(1)),
expectedAssignmentSet2);
Assert
.assertEquals(assignableNode.getAssignedTopStatePartitionsByResource(_resourceNames.get(0)),
expectedTopStateAssignmentSet1);
Assert
.assertEquals(assignableNode.getAssignedTopStatePartitionsByResource(_resourceNames.get(1)),
expectedTopStateAssignmentSet2);
Assert.assertEquals(assignableNode.getAssignedTopStatePartitionsCount(),
expectedTopStateAssignmentSet1.size() + expectedTopStateAssignmentSet2.size());
// Test 3 - add assignment to the AssignableNode
AssignableReplica addingReplica = new AssignableReplica(testCache.getClusterConfig(),
testCache.getResourceConfig(_resourceNames.get(1)), _partitionNames.get(2), "SLAVE", 2);
expectedAssignment.get(_resourceNames.get(1)).add(_partitionNames.get(2));
expectedCapacityMap.put("item1", 4);
expectedCapacityMap.put("item2", 8);
assignmentSet.add(addingReplica);
assignableNode.assign(addingReplica);
Assert.assertEquals(assignableNode.getAssignedPartitionsMap(), expectedAssignment);
Assert.assertEquals(assignableNode.getAssignedReplicaCount(), 4);
Assert.assertEquals(assignableNode.getGeneralProjectedHighestUtilization(Collections.EMPTY_MAP),
16.0 / 20.0, 0.005);
Assert.assertEquals(assignableNode.getTopStateProjectedHighestUtilization(Collections.EMPTY_MAP),
3.0 / 20.0, 0.005);
Assert.assertEquals(assignableNode.getMaxCapacity(), _capacityDataMap);
Assert.assertEquals(assignableNode.getMaxPartition(), 5);
Assert.assertEquals(assignableNode.getInstanceTags(), _testInstanceTags);
Assert.assertEquals(assignableNode.getFaultZone(), _testFaultZoneId);
Assert.assertEquals(assignableNode.getDisabledPartitionsMap(), _disabledPartitionsMap);
Assert.assertEquals(assignableNode.getRemainingCapacity(), expectedCapacityMap);
Assert.assertEquals(assignableNode.getAssignedReplicas(), assignmentSet);
Assert.assertEquals(assignableNode.getAssignedPartitionsByResource(_resourceNames.get(0)),
expectedAssignmentSet1);
Assert.assertEquals(assignableNode.getAssignedPartitionsByResource(_resourceNames.get(1)),
expectedAssignmentSet2);
Assert
.assertEquals(assignableNode.getAssignedTopStatePartitionsByResource(_resourceNames.get(0)),
expectedTopStateAssignmentSet1);
Assert
.assertEquals(assignableNode.getAssignedTopStatePartitionsByResource(_resourceNames.get(1)),
expectedTopStateAssignmentSet2);
Assert.assertEquals(assignableNode.getAssignedTopStatePartitionsCount(),
expectedTopStateAssignmentSet1.size() + expectedTopStateAssignmentSet2.size());
}
@Test
public void testReleaseNoPartition() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
AssignableNode assignableNode = new AssignableNode(testCache.getClusterConfig(),
testCache.getInstanceConfigMap().get(_testInstanceId), _testInstanceId);
AssignableReplica removingReplica = new AssignableReplica(testCache.getClusterConfig(),
testCache.getResourceConfig(_resourceNames.get(1)), _partitionNames.get(2) + "non-exist",
"MASTER", 1);
// Release shall pass.
assignableNode.release(removingReplica);
}
@Test(expectedExceptions = HelixException.class, expectedExceptionsMessageRegExp = "Resource Resource1 already has a replica with state SLAVE from partition Partition1 on node testInstanceId")
public void testAssignDuplicateReplica() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
Set<AssignableReplica> assignmentSet = generateReplicas(testCache);
AssignableNode assignableNode = new AssignableNode(testCache.getClusterConfig(),
testCache.getInstanceConfigMap().get(_testInstanceId), _testInstanceId);
assignableNode.assignInitBatch(assignmentSet);
AssignableReplica duplicateReplica = new AssignableReplica(testCache.getClusterConfig(),
testCache.getResourceConfig(_resourceNames.get(0)), _partitionNames.get(0), "SLAVE", 2);
assignableNode.assign(duplicateReplica);
}
@Test
public void testParseFaultZoneNotFound() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
ClusterConfig testClusterConfig = new ClusterConfig("testClusterConfigId");
testClusterConfig.setFaultZoneType("zone");
testClusterConfig.setTopologyAwareEnabled(true);
testClusterConfig.setTopology("/zone/");
when(testCache.getClusterConfig()).thenReturn(testClusterConfig);
InstanceConfig testInstanceConfig = new InstanceConfig("testInstanceConfigId");
testInstanceConfig.setDomain("instance=testInstance");
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
instanceConfigMap.put(_testInstanceId, testInstanceConfig);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
AssignableNode node = new AssignableNode(testCache.getClusterConfig(),
testCache.getInstanceConfigMap().get(_testInstanceId), _testInstanceId);
Assert.assertEquals(node.getFaultZone(), "Helix_default_zone");
}
@Test
public void testParseFaultZone() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
ClusterConfig testClusterConfig = new ClusterConfig("testClusterConfigId");
testClusterConfig.setFaultZoneType("zone");
testClusterConfig.setTopologyAwareEnabled(true);
testClusterConfig.setTopology("/zone/instance");
when(testCache.getClusterConfig()).thenReturn(testClusterConfig);
InstanceConfig testInstanceConfig = new InstanceConfig("testInstanceConfigId");
testInstanceConfig.setDomain("zone=2, instance=testInstance");
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
instanceConfigMap.put(_testInstanceId, testInstanceConfig);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
AssignableNode assignableNode = new AssignableNode(testCache.getClusterConfig(),
testCache.getInstanceConfigMap().get(_testInstanceId), _testInstanceId);
Assert.assertEquals(assignableNode.getFaultZone(), "2");
testClusterConfig = new ClusterConfig("testClusterConfigId");
testClusterConfig.setFaultZoneType("instance");
testClusterConfig.setTopologyAwareEnabled(true);
testClusterConfig.setTopology("/zone/instance");
when(testCache.getClusterConfig()).thenReturn(testClusterConfig);
testInstanceConfig = new InstanceConfig("testInstanceConfigId");
testInstanceConfig.setDomain("zone=2, instance=testInstance");
instanceConfigMap = new HashMap<>();
instanceConfigMap.put(_testInstanceId, testInstanceConfig);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
assignableNode = new AssignableNode(testCache.getClusterConfig(),
testCache.getInstanceConfigMap().get(_testInstanceId), _testInstanceId);
Assert.assertEquals(assignableNode.getFaultZone(), "2/testInstance");
// test fault zone not in top of topology
testClusterConfig = new ClusterConfig("testClusterConfigId");
testClusterConfig.setFaultZoneType("zone");
testClusterConfig.setTopologyAwareEnabled(true);
testClusterConfig.setTopology("/rack/zone/instance");
testInstanceConfig = new InstanceConfig("testInstanceConfigId");
testInstanceConfig.setDomain("rack=3, zone=2, instance=testInstanceConfigId");
instanceConfigMap = new HashMap<>();
instanceConfigMap.put(_testInstanceId, testInstanceConfig);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
when(testCache.getClusterConfig()).thenReturn(testClusterConfig);
assignableNode = new AssignableNode(testCache.getClusterConfig(),
testCache.getInstanceConfigMap().get(_testInstanceId), _testInstanceId);
Assert.assertEquals(assignableNode.getFaultZone(), "3/2");
}
@Test
public void testDefaultInstanceCapacity() {
ClusterConfig testClusterConfig = new ClusterConfig("testClusterConfigId");
testClusterConfig.setDefaultInstanceCapacityMap(_capacityDataMap);
InstanceConfig testInstanceConfig = new InstanceConfig("testInstanceConfigId");
AssignableNode assignableNode =
new AssignableNode(testClusterConfig, testInstanceConfig, _testInstanceId);
Assert.assertEquals(assignableNode.getMaxCapacity(), _capacityDataMap);
}
@Test(expectedExceptions = HelixException.class, expectedExceptionsMessageRegExp = "The required capacity keys: \\[item2, item1, item3, AdditionalCapacityKey\\] are not fully configured in the instance: testInstanceId, capacity map: \\{item2=40, item1=20, item3=30\\}.")
public void testIncompleteInstanceCapacity() {
ClusterConfig testClusterConfig = new ClusterConfig("testClusterConfigId");
List<String> requiredCapacityKeys = new ArrayList<>(_capacityDataMap.keySet());
requiredCapacityKeys.add("AdditionalCapacityKey");
testClusterConfig.setInstanceCapacityKeys(requiredCapacityKeys);
InstanceConfig testInstanceConfig = new InstanceConfig(_testInstanceId);
testInstanceConfig.setInstanceCapacityMap(_capacityDataMap);
new AssignableNode(testClusterConfig, testInstanceConfig, _testInstanceId);
}
}
| 9,779 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/model/TestClusterContext.java
|
package org.apache.helix.controller.rebalancer.waged.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.helix.HelixException;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestClusterContext extends AbstractTestClusterModel {
@BeforeClass
public void initialize() {
super.initialize();
}
@Test
public void testNormalUsage() throws IOException {
// Test 1 - initialize the cluster context based on the data cache.
ResourceControllerDataProvider testCache = setupClusterDataCache();
Set<AssignableReplica> assignmentSet = generateReplicas(testCache);
ClusterContext context =
new ClusterContext(assignmentSet, generateNodes(testCache), new HashMap<>(),
new HashMap<>());
Assert.assertEquals(context.getEstimatedMaxPartitionCount(), 4);
Assert.assertEquals(context.getEstimatedMaxTopStateCount(), 2);
Assert.assertEquals(context.getAssignmentForFaultZoneMap(), Collections.emptyMap());
for (String resourceName : _resourceNames) {
Assert.assertEquals(context.getEstimatedMaxPartitionByResource(resourceName), 2);
Assert.assertEquals(
context.getPartitionsForResourceAndFaultZone(_testFaultZoneId, resourceName),
Collections.emptySet());
}
// Assign
Map<String, Map<String, Set<String>>> expectedFaultZoneMap = Collections
.singletonMap(_testFaultZoneId, assignmentSet.stream().collect(Collectors
.groupingBy(AssignableReplica::getResourceName,
Collectors.mapping(AssignableReplica::getPartitionName, Collectors.toSet()))));
assignmentSet.stream().forEach(replica -> context
.addPartitionToFaultZone(_testFaultZoneId, replica.getResourceName(),
replica.getPartitionName()));
Assert.assertEquals(context.getAssignmentForFaultZoneMap(), expectedFaultZoneMap);
// Capacity with "item1" key is the highest utilized. Among 4 partitions, their weights are
// 3, 5, 3, 5, so a total of 16/20 is used; the 2 master partitions have 3, 5, so 8/20 used.
Assert.assertEquals(context.getEstimatedMaxUtilization(), 16.0 / 20.0, 0.005);
Assert.assertEquals(context.getEstimatedTopStateMaxUtilization(), 8.0 / 20.0, 0.005);
// release
expectedFaultZoneMap.get(_testFaultZoneId).get(_resourceNames.get(0))
.remove(_partitionNames.get(0));
Assert.assertTrue(context.removePartitionFromFaultZone(_testFaultZoneId, _resourceNames.get(0),
_partitionNames.get(0)));
Assert.assertEquals(context.getAssignmentForFaultZoneMap(), expectedFaultZoneMap);
}
@Test(expectedExceptions = HelixException.class, expectedExceptionsMessageRegExp = "Resource Resource1 already has a replica from partition Partition1 in fault zone testZone")
public void testDuplicateAssign() throws IOException {
ResourceControllerDataProvider testCache = setupClusterDataCache();
Set<AssignableReplica> assignmentSet = generateReplicas(testCache);
ClusterContext context =
new ClusterContext(assignmentSet, generateNodes(testCache), new HashMap<>(),
new HashMap<>());
context.addPartitionToFaultZone(_testFaultZoneId, _resourceNames.get(0), _partitionNames.get(0));
// Insert again and trigger the error.
context
.addPartitionToFaultZone(_testFaultZoneId, _resourceNames.get(0), _partitionNames.get(0));
}
}
| 9,780 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/model/TestClusterModel.java
|
package org.apache.helix.controller.rebalancer.waged.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.Set;
import org.apache.helix.HelixException;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestClusterModel extends AbstractTestClusterModel {
@BeforeClass
public void initialize() {
super.initialize();
}
@Test
public void testNormalUsage() throws IOException {
// Test 1 - initialize the cluster model based on the data cache.
ResourceControllerDataProvider testCache = setupClusterDataCache();
Set<AssignableReplica> assignableReplicas = generateReplicas(testCache);
Set<AssignableNode> assignableNodes = generateNodes(testCache);
ClusterContext context =
new ClusterContext(assignableReplicas, assignableNodes, Collections.emptyMap(),
Collections.emptyMap());
ClusterModel clusterModel = new ClusterModel(context, assignableReplicas, assignableNodes);
Assert.assertTrue(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.allMatch(resourceMap -> resourceMap.values().isEmpty()));
Assert.assertFalse(clusterModel.getAssignableNodes().values().stream()
.anyMatch(node -> node.getAssignedReplicaCount() != 0));
// The initialization of the context, node and replication has been tested separately. So for
// cluster model, focus on testing the assignment and release.
// Assign
AssignableReplica replica = assignableReplicas.iterator().next();
AssignableNode assignableNode = assignableNodes.iterator().next();
clusterModel
.assign(replica.getResourceName(), replica.getPartitionName(), replica.getReplicaState(),
assignableNode.getInstanceName());
Assert.assertTrue(
clusterModel.getContext().getAssignmentForFaultZoneMap().get(assignableNode.getFaultZone())
.get(replica.getResourceName()).contains(replica.getPartitionName()));
Assert.assertTrue(assignableNode.getAssignedPartitionsMap().get(replica.getResourceName())
.contains(replica.getPartitionName()));
// Assign a nonexist replication
try {
clusterModel.assign("NOT-EXIST", replica.getPartitionName(), replica.getReplicaState(),
assignableNode.getInstanceName());
Assert.fail("Assigning a non existing resource partition shall fail.");
} catch (HelixException ex) {
// expected
}
// Assign a non-exist replication
try {
clusterModel
.assign(replica.getResourceName(), replica.getPartitionName(), replica.getReplicaState(),
"NON-EXIST");
Assert.fail("Assigning a resource partition to a non existing instance shall fail.");
} catch (HelixException ex) {
// expected
}
// Release
clusterModel
.release(replica.getResourceName(), replica.getPartitionName(), replica.getReplicaState(),
assignableNode.getInstanceName());
Assert.assertTrue(clusterModel.getContext().getAssignmentForFaultZoneMap().values().stream()
.allMatch(resourceMap -> resourceMap.values().stream()
.allMatch(partitions -> partitions.isEmpty())));
Assert.assertFalse(clusterModel.getAssignableNodes().values().stream()
.anyMatch(node -> node.getAssignedReplicaCount() != 0));
}
}
| 9,781 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/model/TestOptimalAssignment.java
|
package org.apache.helix.controller.rebalancer.waged.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import org.apache.helix.HelixException;
import org.apache.helix.model.Partition;
import org.apache.helix.model.ResourceAssignment;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestOptimalAssignment extends ClusterModelTestHelper {
@BeforeClass
public void initialize() {
super.initialize();
}
@Test
public void testUpdateAssignment() throws IOException {
OptimalAssignment assignment = new OptimalAssignment();
// update with empty cluster model
assignment.updateAssignments(getDefaultClusterModel());
Map<String, ResourceAssignment> optimalAssignmentMap =
assignment.getOptimalResourceAssignment();
Assert.assertEquals(optimalAssignmentMap, Collections.emptyMap());
// update with valid assignment
ClusterModel model = getDefaultClusterModel();
model.assign(_resourceNames.get(0), _partitionNames.get(1), "SLAVE", _testInstanceId);
model.assign(_resourceNames.get(0), _partitionNames.get(0), "MASTER", _testInstanceId);
assignment.updateAssignments(model);
optimalAssignmentMap = assignment.getOptimalResourceAssignment();
Assert.assertEquals(optimalAssignmentMap.get(_resourceNames.get(0)).getMappedPartitions(),
Arrays
.asList(new Partition(_partitionNames.get(0)), new Partition(_partitionNames.get(1))));
Assert.assertEquals(optimalAssignmentMap.get(_resourceNames.get(0))
.getReplicaMap(new Partition(_partitionNames.get(1))),
Collections.singletonMap(_testInstanceId, "SLAVE"));
Assert.assertEquals(optimalAssignmentMap.get(_resourceNames.get(0))
.getReplicaMap(new Partition(_partitionNames.get(0))),
Collections.singletonMap(_testInstanceId, "MASTER"));
}
@Test(dependsOnMethods = "testUpdateAssignment")
public void TestAssignmentFailure() throws IOException {
OptimalAssignment assignment = new OptimalAssignment();
ClusterModel model = getDefaultClusterModel();
// record failure
AssignableReplica targetFailureReplica =
model.getAssignableReplicaMap().get(_resourceNames.get(0)).iterator().next();
AssignableNode targetFailureNode = model.getAssignableNodes().get(_testInstanceId);
assignment.recordAssignmentFailure(targetFailureReplica, Collections
.singletonMap(targetFailureNode, Collections.singletonList("Assignment Failure!")));
Assert.assertTrue(assignment.hasAnyFailure());
assignment.updateAssignments(getDefaultClusterModel());
try {
assignment.getOptimalResourceAssignment();
Assert.fail("Get optimal assignment shall fail because of the failure record.");
} catch (HelixException ex) {
Assert.assertTrue(ex.getMessage().startsWith(
"Cannot get the optimal resource assignment since a calculation failure is recorded."));
}
}
}
| 9,782 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/model/ClusterModelTestHelper.java
|
package org.apache.helix.controller.rebalancer.waged.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.model.InstanceConfig;
import static org.mockito.Mockito.when;
public class ClusterModelTestHelper extends AbstractTestClusterModel {
public static final String TEST_INSTANCE_ID_1 = "TestInstanceId1";
public static final String TEST_INSTANCE_ID_2 = "TestInstanceId2";
public ClusterModel getDefaultClusterModel() throws IOException {
initialize();
ResourceControllerDataProvider testCache = setupClusterDataCache();
Set<AssignableReplica> assignableReplicas = generateReplicas(testCache);
Set<AssignableNode> assignableNodes = generateNodes(testCache);
ClusterContext context =
new ClusterContext(assignableReplicas, assignableNodes, Collections.emptyMap(), Collections.emptyMap());
return new ClusterModel(context, assignableReplicas, assignableNodes);
}
public ClusterModel getMultiNodeClusterModel() throws IOException {
initialize();
return getClusterHelper(setupClusterDataCacheForNearFullUtil());
}
public ClusterModel getMultiNodeClusterModelNegativeSetup() throws IOException {
initialize();
return getClusterHelper(setupClusterDataCacheForNoFitUtil());
}
private ClusterModel getClusterHelper(ResourceControllerDataProvider testCache)
throws IOException {
InstanceConfig testInstanceConfig1 = createMockInstanceConfig(TEST_INSTANCE_ID_1);
InstanceConfig testInstanceConfig2 = createMockInstanceConfig(TEST_INSTANCE_ID_2);
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
instanceConfigMap.put(TEST_INSTANCE_ID_1, testInstanceConfig1);
instanceConfigMap.put(TEST_INSTANCE_ID_2, testInstanceConfig2);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
Set<AssignableReplica> assignableReplicas = generateReplicas(testCache);
Set<AssignableNode> assignableNodes = generateNodes(testCache);
ClusterContext context =
new ClusterContext(assignableReplicas, assignableNodes, Collections.emptyMap(),
Collections.emptyMap());
return new ClusterModel(context, assignableReplicas, assignableNodes);
}
}
| 9,783 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/waged/model/AbstractTestClusterModel.java
|
package org.apache.helix.controller.rebalancer.waged.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.constraint.MonitoredAbnormalResolver;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.ResourceConfig;
import org.mockito.Mockito;
import org.testng.annotations.BeforeClass;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public abstract class AbstractTestClusterModel {
protected static String _sessionId = "testSessionId";
protected String _testInstanceId;
protected List<String> _resourceNames;
protected List<String> _partitionNames;
protected Map<String, Integer> _capacityDataMap;
protected Map<String, List<String>> _disabledPartitionsMap;
protected List<String> _testInstanceTags;
protected String _testFaultZoneId;
protected Set<String> _instances;
@BeforeClass
public void initialize() {
_testInstanceId = "testInstanceId";
_resourceNames = new ArrayList<>();
_resourceNames.add("Resource1");
_resourceNames.add("Resource2");
_partitionNames = new ArrayList<>();
_partitionNames.add("Partition1");
_partitionNames.add("Partition2");
_partitionNames.add("Partition3");
_partitionNames.add("Partition4");
_capacityDataMap = new HashMap<>();
_capacityDataMap.put("item1", 20);
_capacityDataMap.put("item2", 40);
_capacityDataMap.put("item3", 30);
List<String> disabledPartitions = new ArrayList<>();
disabledPartitions.add("TestPartition");
_disabledPartitionsMap = new HashMap<>();
_disabledPartitionsMap.put("TestResource", disabledPartitions);
_testInstanceTags = new ArrayList<>();
_testInstanceTags.add("TestTag");
_testFaultZoneId = "testZone";
_instances = new HashSet<>();
_instances.add(_testInstanceId);
}
protected InstanceConfig createMockInstanceConfig(String instanceId) {
InstanceConfig testInstanceConfig = new InstanceConfig(instanceId);
testInstanceConfig.setInstanceCapacityMap(_capacityDataMap);
testInstanceConfig.addTag(_testInstanceTags.get(0));
testInstanceConfig.setInstanceEnabled(true);
testInstanceConfig.setZoneId(_testFaultZoneId);
return testInstanceConfig;
}
protected LiveInstance createMockLiveInstance(String instanceId) {
LiveInstance testLiveInstance = new LiveInstance(instanceId);
testLiveInstance.setSessionId(_sessionId);
return testLiveInstance;
}
protected ResourceControllerDataProvider setupClusterDataCache() throws IOException {
ResourceControllerDataProvider testCache = Mockito.mock(ResourceControllerDataProvider.class);
// 1. Set up the default instance information with capacity configuration.
InstanceConfig testInstanceConfig = createMockInstanceConfig(_testInstanceId);
testInstanceConfig.setInstanceEnabledForPartition("TestResource", "TestPartition", false);
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
instanceConfigMap.put(_testInstanceId, testInstanceConfig);
when(testCache.getInstanceConfigMap()).thenReturn(instanceConfigMap);
// 2. Set up the basic cluster configuration.
ClusterConfig testClusterConfig = new ClusterConfig("testClusterConfigId");
testClusterConfig.setMaxPartitionsPerInstance(5);
testClusterConfig.setDisabledInstances(Collections.emptyMap());
testClusterConfig.setInstanceCapacityKeys(new ArrayList<>(_capacityDataMap.keySet()));
testClusterConfig.setDefaultPartitionWeightMap(
_capacityDataMap.keySet().stream().collect(Collectors.toMap(key -> key, key -> 0)));
testClusterConfig.setTopologyAwareEnabled(true);
when(testCache.getClusterConfig()).thenReturn(testClusterConfig);
when(testCache.getAbnormalStateResolver(any()))
.thenReturn(MonitoredAbnormalResolver.DUMMY_STATE_RESOLVER);
// 3. Mock the live instance node for the default instance.
LiveInstance testLiveInstance = createMockLiveInstance(_testInstanceId);
Map<String, LiveInstance> liveInstanceMap = new HashMap<>();
liveInstanceMap.put(_testInstanceId, testLiveInstance);
when(testCache.getLiveInstances()).thenReturn(liveInstanceMap);
// 4. Mock two resources, each with 2 partitions on the default instance.
// The instance will have the following partitions assigned
// Resource 1:
// -------------- partition 1 - MASTER
// -------------- partition 2 - SLAVE
// Resource 2:
// -------------- partition 3 - MASTER
// -------------- partition 4 - SLAVE
CurrentState testCurrentStateResource1 = Mockito.mock(CurrentState.class);
Map<String, String> partitionStateMap1 = new HashMap<>();
partitionStateMap1.put(_partitionNames.get(0), "MASTER");
partitionStateMap1.put(_partitionNames.get(1), "SLAVE");
when(testCurrentStateResource1.getResourceName()).thenReturn(_resourceNames.get(0));
when(testCurrentStateResource1.getPartitionStateMap()).thenReturn(partitionStateMap1);
when(testCurrentStateResource1.getStateModelDefRef()).thenReturn("MasterSlave");
when(testCurrentStateResource1.getState(_partitionNames.get(0))).thenReturn("MASTER");
when(testCurrentStateResource1.getState(_partitionNames.get(1))).thenReturn("SLAVE");
when(testCurrentStateResource1.getSessionId()).thenReturn(_sessionId);
CurrentState testCurrentStateResource2 = Mockito.mock(CurrentState.class);
Map<String, String> partitionStateMap2 = new HashMap<>();
partitionStateMap2.put(_partitionNames.get(2), "MASTER");
partitionStateMap2.put(_partitionNames.get(3), "SLAVE");
when(testCurrentStateResource2.getResourceName()).thenReturn(_resourceNames.get(1));
when(testCurrentStateResource2.getPartitionStateMap()).thenReturn(partitionStateMap2);
when(testCurrentStateResource2.getStateModelDefRef()).thenReturn("MasterSlave");
when(testCurrentStateResource2.getState(_partitionNames.get(2))).thenReturn("MASTER");
when(testCurrentStateResource2.getState(_partitionNames.get(3))).thenReturn("SLAVE");
when(testCurrentStateResource2.getSessionId()).thenReturn(_sessionId);
Map<String, CurrentState> currentStatemap = new HashMap<>();
currentStatemap.put(_resourceNames.get(0), testCurrentStateResource1);
currentStatemap.put(_resourceNames.get(1), testCurrentStateResource2);
when(testCache.getCurrentState(_testInstanceId, _sessionId)).thenReturn(currentStatemap);
when(testCache.getCurrentState(_testInstanceId, _sessionId, false)).thenReturn(currentStatemap);
// 5. Set up the resource config for the two resources with the partition weight.
Map<String, Integer> capacityDataMapResource1 = new HashMap<>();
capacityDataMapResource1.put("item1", 3);
capacityDataMapResource1.put("item2", 6);
ResourceConfig testResourceConfigResource1 = new ResourceConfig("Resource1");
testResourceConfigResource1.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMapResource1));
when(testCache.getResourceConfig("Resource1")).thenReturn(testResourceConfigResource1);
Map<String, Integer> capacityDataMapResource2 = new HashMap<>();
capacityDataMapResource2.put("item1", 5);
capacityDataMapResource2.put("item2", 10);
ResourceConfig testResourceConfigResource2 = new ResourceConfig("Resource2");
testResourceConfigResource2.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMapResource2));
when(testCache.getResourceConfig("Resource2")).thenReturn(testResourceConfigResource2);
Map<String, ResourceConfig> configMap = new HashMap<>();
configMap.put("Resource1", testResourceConfigResource1);
configMap.put("Resource2", testResourceConfigResource2);
when(testCache.getResourceConfigMap()).thenReturn(configMap);
// 6. Define mock state model
for (BuiltInStateModelDefinitions bsmd : BuiltInStateModelDefinitions.values()) {
when(testCache.getStateModelDef(bsmd.name())).thenReturn(bsmd.getStateModelDefinition());
}
return testCache;
}
// Add another resource. When compute, the two smaller resources' Master partition should be
// assigned to one instance and the relatively larger one's Master partition should be assigned to
// another.
// The sorting algorithm in ConstraintBasedAlgorithm should garnette these 2 smaller resources
// are placed after the larger one.
// This is the only way to accommodate all 6 partitions.
protected ResourceControllerDataProvider setupClusterDataCacheForNearFullUtil() throws IOException {
_resourceNames.add("Resource3");
_partitionNames.add("Partition5");
_partitionNames.add("Partition6");
ResourceControllerDataProvider testCache = setupClusterDataCache();
CurrentState testCurrentStateResource3 = Mockito.mock(CurrentState.class);
Map<String, String> partitionStateMap3 = new HashMap<>();
partitionStateMap3.put(_partitionNames.get(4), "MASTER");
partitionStateMap3.put(_partitionNames.get(5), "SLAVE");
when(testCurrentStateResource3.getResourceName()).thenReturn(_resourceNames.get(2));
when(testCurrentStateResource3.getPartitionStateMap()).thenReturn(partitionStateMap3);
when(testCurrentStateResource3.getStateModelDefRef()).thenReturn("MasterSlave");
when(testCurrentStateResource3.getState(_partitionNames.get(4))).thenReturn("MASTER");
when(testCurrentStateResource3.getState(_partitionNames.get(5))).thenReturn("SLAVE");
when(testCurrentStateResource3.getSessionId()).thenReturn(_sessionId);
Map<String, CurrentState> currentStatemap = testCache.getCurrentState(_testInstanceId, _sessionId);
currentStatemap.put(_resourceNames.get(2), testCurrentStateResource3);
when(testCache.getCurrentState(_testInstanceId, _sessionId)).thenReturn(currentStatemap);
when(testCache.getCurrentState(_testInstanceId, _sessionId, false)).thenReturn(currentStatemap);
Map<String, Integer> capacityDataMapResource3 = new HashMap<>();
capacityDataMapResource3.put("item1", 9);
capacityDataMapResource3.put("item2", 17);
ResourceConfig testResourceConfigResource3 = new ResourceConfig("Resource3");
testResourceConfigResource3.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMapResource3));
when(testCache.getResourceConfig("Resource3")).thenReturn(testResourceConfigResource3);
Map<String, ResourceConfig> configMap = testCache.getResourceConfigMap();
configMap.put("Resource3", testResourceConfigResource3);
when(testCache.getResourceConfigMap()).thenReturn(configMap);
return testCache;
}
// Add another resource that has large capacity for negative test
// TODO: this function has a lot similar line as previous one. We should have a more
// generalized factory function instead.
protected ResourceControllerDataProvider setupClusterDataCacheForNoFitUtil() throws IOException {
_resourceNames.add("Resource4");
_partitionNames.add("Partition7");
_partitionNames.add("Partition8");
ResourceControllerDataProvider testCache = setupClusterDataCache();
CurrentState testCurrentStateResource4 = Mockito.mock(CurrentState.class);
Map<String, String> partitionStateMap4 = new HashMap<>();
partitionStateMap4.put(_partitionNames.get(4), "MASTER");
partitionStateMap4.put(_partitionNames.get(5), "SLAVE");
when(testCurrentStateResource4.getResourceName()).thenReturn(_resourceNames.get(2));
when(testCurrentStateResource4.getPartitionStateMap()).thenReturn(partitionStateMap4);
when(testCurrentStateResource4.getStateModelDefRef()).thenReturn("MasterSlave");
when(testCurrentStateResource4.getState(_partitionNames.get(4))).thenReturn("MASTER");
when(testCurrentStateResource4.getState(_partitionNames.get(5))).thenReturn("SLAVE");
when(testCurrentStateResource4.getSessionId()).thenReturn(_sessionId);
Map<String, CurrentState> currentStatemap =
testCache.getCurrentState(_testInstanceId, _sessionId);
currentStatemap.put(_resourceNames.get(2), testCurrentStateResource4);
when(testCache.getCurrentState(_testInstanceId, _sessionId)).thenReturn(currentStatemap);
when(testCache.getCurrentState(_testInstanceId, _sessionId, false)).thenReturn(currentStatemap);
Map<String, Integer> capacityDataMapResource3 = new HashMap<>();
capacityDataMapResource3.put("item1", 90);
capacityDataMapResource3.put("item2", 9);
ResourceConfig testResourceConfigResource3 = new ResourceConfig("Resource4");
testResourceConfigResource3.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMapResource3));
when(testCache.getResourceConfig("Resource4")).thenReturn(testResourceConfigResource3);
Map<String, ResourceConfig> configMap = testCache.getResourceConfigMap();
configMap.put("Resource4", testResourceConfigResource3);
when(testCache.getResourceConfigMap()).thenReturn(configMap);
return testCache;
}
/**
* Generate the replica objects according to the provider information.
*/
protected Set<AssignableReplica> generateReplicas(ResourceControllerDataProvider dataProvider) {
// Create assignable replica based on the current state.
Map<String, CurrentState> currentStatemap =
dataProvider.getCurrentState(_testInstanceId, _sessionId);
Set<AssignableReplica> assignmentSet = new HashSet<>();
for (CurrentState cs : currentStatemap.values()) {
ResourceConfig resourceConfig = dataProvider.getResourceConfig(cs.getResourceName());
// Construct one AssignableReplica for each partition in the current state.
cs.getPartitionStateMap().entrySet().stream().forEach(entry -> assignmentSet
.add(new AssignableReplica(dataProvider.getClusterConfig(), resourceConfig,
entry.getKey(), entry.getValue(), entry.getValue().equals("MASTER") ? 1 : 2)));
}
return assignmentSet;
}
protected Set<AssignableNode> generateNodes(ResourceControllerDataProvider testCache) {
Set<AssignableNode> nodeSet = new HashSet<>();
testCache.getInstanceConfigMap().values().forEach(config -> nodeSet
.add(new AssignableNode(testCache.getClusterConfig(), config, config.getInstanceName())));
return nodeSet;
}
}
| 9,784 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/constraint/TestAbnormalStatesResolverMonitor.java
|
package org.apache.helix.controller.rebalancer.constraint;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.Random;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import org.apache.helix.model.MasterSlaveSMD;
import org.apache.helix.monitoring.mbeans.MonitorDomainNames;
import org.apache.helix.monitoring.metrics.AbnormalStatesMetricCollector;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestAbnormalStatesResolverMonitor {
private static final MBeanServer MBEAN_SERVER = ManagementFactory.getPlatformMBeanServer();
private final String CLUSTER_NAME = "TestCluster";
@Test
public void testMonitorResolver()
throws MalformedObjectNameException, AttributeNotFoundException, MBeanException,
ReflectionException, InstanceNotFoundException {
final String testResolverMonitorMbeanName = String
.format("%s:%s=%s, %s=%s.%s", MonitorDomainNames.Rebalancer, "ClusterName", CLUSTER_NAME,
"EntityName", "AbnormalStates", MasterSlaveSMD.name);
final ObjectName testResolverMonitorMbeanObjectName =
new ObjectName(testResolverMonitorMbeanName);
Assert.assertFalse(MBEAN_SERVER.isRegistered(testResolverMonitorMbeanObjectName));
// Update the resolver configuration for MasterSlave state model.
MonitoredAbnormalResolver monitoredAbnormalResolver =
new MonitoredAbnormalResolver(new MockAbnormalStateResolver(), CLUSTER_NAME,
MasterSlaveSMD.name);
// Validate if the MBean has been registered
Assert.assertTrue(MBEAN_SERVER.isRegistered(testResolverMonitorMbeanObjectName));
Assert.assertEquals(MBEAN_SERVER.getAttribute(testResolverMonitorMbeanObjectName,
AbnormalStatesMetricCollector.AbnormalStatesMetricNames.AbnormalStatePartitionCounter
.name()), 0L);
Assert.assertEquals(MBEAN_SERVER.getAttribute(testResolverMonitorMbeanObjectName,
AbnormalStatesMetricCollector.AbnormalStatesMetricNames.RecoveryAttemptCounter.name()), 0L);
// Validate if the metrics recording methods work as expected
Random ran = new Random(System.currentTimeMillis());
Long expectation = 1L + ran.nextInt(10);
for (int i = 0; i < expectation; i++) {
monitoredAbnormalResolver.recordAbnormalState();
}
Assert.assertEquals(MBEAN_SERVER.getAttribute(testResolverMonitorMbeanObjectName,
AbnormalStatesMetricCollector.AbnormalStatesMetricNames.AbnormalStatePartitionCounter
.name()), expectation);
expectation = 1L + ran.nextInt(10);
for (int i = 0; i < expectation; i++) {
monitoredAbnormalResolver.recordRecoveryAttempt();
}
Assert.assertEquals(MBEAN_SERVER.getAttribute(testResolverMonitorMbeanObjectName,
AbnormalStatesMetricCollector.AbnormalStatesMetricNames.RecoveryAttemptCounter.name()),
expectation);
// Reset the resolver map
monitoredAbnormalResolver.close();
// Validate if the MBean has been unregistered
Assert.assertFalse(MBEAN_SERVER.isRegistered(testResolverMonitorMbeanObjectName));
}
}
| 9,785 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/constraint/MockAbnormalStateResolver.java
|
package org.apache.helix.controller.rebalancer.constraint;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import org.apache.helix.api.rebalancer.constraint.AbnormalStateResolver;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.Partition;
import org.apache.helix.model.StateModelDefinition;
/**
* A mock abnormal state resolver for supporting tests.
* It always return dummy result.
*/
public class MockAbnormalStateResolver implements AbnormalStateResolver {
@Override
public boolean checkCurrentStates(final CurrentStateOutput currentStateOutput,
final String resourceName, final Partition partition,
final StateModelDefinition stateModelDef) {
// By default, all current states are valid.
return true;
}
public Map<String, String> computeRecoveryAssignment(final CurrentStateOutput currentStateOutput,
final String resourceName, final Partition partition,
final StateModelDefinition stateModelDef, final List<String> preferenceList) {
throw new UnsupportedOperationException("The mock resolver won't recover abnormal states.");
}
}
| 9,786 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/constraint
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/constraint/dataprovider/MockPartitionWeightProvider.java
|
package org.apache.helix.controller.rebalancer.constraint.dataprovider;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.api.rebalancer.constraint.dataprovider.PartitionWeightProvider;
public class MockPartitionWeightProvider implements PartitionWeightProvider {
private final int _defaultWeight;
private Map<String, Map<String, Integer>> _partitionWeightMap = new HashMap<>();
public MockPartitionWeightProvider(int defaultWeight) {
// use the default weight
_defaultWeight = defaultWeight;
}
public MockPartitionWeightProvider(Map<String, Map<String, Integer>> partitionWeightMap,
int defaultWeight) {
_partitionWeightMap = partitionWeightMap;
_defaultWeight = defaultWeight;
}
@Override
public int getPartitionWeight(String resource, String partition) {
if (_partitionWeightMap.containsKey(resource) && _partitionWeightMap.get(resource)
.containsKey(partition)) {
return _partitionWeightMap.get(resource).get(partition);
}
return _defaultWeight;
}
}
| 9,787 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/constraint
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/rebalancer/constraint/dataprovider/MockCapacityProvider.java
|
package org.apache.helix.controller.rebalancer.constraint.dataprovider;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.api.rebalancer.constraint.dataprovider.CapacityProvider;
public class MockCapacityProvider implements CapacityProvider {
private final int _defaultCapacity;
private final Map<String, Integer> _capacityMap = new HashMap<>();
private final Map<String, Integer> _usageMap = new HashMap<>();
public MockCapacityProvider(Map<String, Integer> capacityMap, int defaultCapacity) {
_capacityMap.putAll(capacityMap);
_defaultCapacity = defaultCapacity;
}
@Override
public int getParticipantCapacity(String participant) {
if (_capacityMap.containsKey(participant)) {
return _capacityMap.get(participant);
}
return _defaultCapacity;
}
@Override
public int getParticipantUsage(String participant) {
if (_usageMap.containsKey(participant)) {
return _usageMap.get(participant);
}
return 0;
}
}
| 9,788 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/strategy/TestTopology.java
|
package org.apache.helix.controller.strategy;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.controller.rebalancer.TestAutoRebalanceStrategy;
import org.apache.helix.controller.rebalancer.topology.Node;
import org.apache.helix.controller.rebalancer.topology.Topology;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class TestTopology {
private static Logger logger = LoggerFactory.getLogger(TestAutoRebalanceStrategy.class);
@Test
public void testCreateClusterTopology() {
ClusterConfig clusterConfig = new ClusterConfig("Test_Cluster");
String topology = "/Rack/Sub-Rack/Host/Instance";
clusterConfig.setTopology(topology);
clusterConfig.setFaultZoneType("Sub-Rack");
clusterConfig.setTopologyAwareEnabled(true);
List<String> allNodes = new ArrayList<String>();
List<String> liveNodes = new ArrayList<String>();
Map<String, InstanceConfig> instanceConfigMap = new HashMap<String, InstanceConfig>();
Map<String, Integer> nodeToWeightMap = new HashMap<String, Integer>();
for (int i = 0; i < 100; i++) {
String instance = "localhost_" + i;
InstanceConfig config = new InstanceConfig(instance);
String rack_id = "rack_" + i/25;
String sub_rack_id = "subrack-" + i/5;
String domain =
String.format("Rack=%s, Sub-Rack=%s, Host=%s", rack_id, sub_rack_id, instance);
config.setDomain(domain);
config.setHostName(instance);
config.setPort("9000");
allNodes.add(instance);
int weight = 0;
if (i % 10 != 0) {
liveNodes.add(instance);
weight = 1000;
if (i % 3 == 0) {
// set random instance weight.
weight = (i+1) * 100;
config.setWeight(weight);
}
}
instanceConfigMap.put(instance, config);
if (!nodeToWeightMap.containsKey(rack_id)) {
nodeToWeightMap.put(rack_id, 0);
}
nodeToWeightMap.put(rack_id, nodeToWeightMap.get(rack_id) + weight);
if (!nodeToWeightMap.containsKey(sub_rack_id)) {
nodeToWeightMap.put(sub_rack_id, 0);
}
nodeToWeightMap.put(sub_rack_id, nodeToWeightMap.get(sub_rack_id) + weight);
}
Topology topo = new Topology(allNodes, liveNodes, instanceConfigMap, clusterConfig);
Assert.assertTrue(topo.getEndNodeType().equals("Instance"));
Assert.assertTrue(topo.getFaultZoneType().equals("Sub-Rack"));
List<Node> faultZones = topo.getFaultZones();
Assert.assertEquals(faultZones.size(), 20);
Node root = topo.getRootNode();
Assert.assertEquals(root.getChildrenCount("Rack"), 4);
Assert.assertEquals(root.getChildrenCount("Sub-Rack"), 20);
Assert.assertEquals(root.getChildrenCount("Host"), 100);
Assert.assertEquals(root.getChildrenCount("Instance"), 100);
// validate weights.
for (Node rack : root.getChildren()) {
Assert.assertEquals(rack.getWeight(), (long)nodeToWeightMap.get(rack.getName()));
for (Node subRack : rack.getChildren()) {
Assert.assertEquals(subRack.getWeight(), (long)nodeToWeightMap.get(subRack.getName()));
}
}
}
@Test
public void testCreateClusterTopologyWithDefaultTopology() {
ClusterConfig clusterConfig = new ClusterConfig("Test_Cluster");
clusterConfig.setTopologyAwareEnabled(true);
List<String> allNodes = new ArrayList<String>();
List<String> liveNodes = new ArrayList<String>();
Map<String, InstanceConfig> instanceConfigMap = new HashMap<String, InstanceConfig>();
Map<String, Integer> nodeToWeightMap = new HashMap<String, Integer>();
for (int i = 0; i < 100; i++) {
String instance = "localhost_" + i;
InstanceConfig config = new InstanceConfig(instance);
String zoneId = "rack_" + i / 10;
config.setZoneId(zoneId);
config.setHostName(instance);
config.setPort("9000");
allNodes.add(instance);
int weight = 0;
if (i % 10 != 0) {
liveNodes.add(instance);
weight = 1000;
if (i % 3 == 0) {
// set random instance weight.
weight = (i + 1) * 100;
config.setWeight(weight);
}
}
instanceConfigMap.put(instance, config);
if (!nodeToWeightMap.containsKey(zoneId)) {
nodeToWeightMap.put(zoneId, 0);
}
nodeToWeightMap.put(zoneId, nodeToWeightMap.get(zoneId) + weight);
}
Topology topo = new Topology(allNodes, liveNodes, instanceConfigMap, clusterConfig);
Assert.assertTrue(topo.getEndNodeType().equals(Topology.Types.INSTANCE.name()));
Assert.assertTrue(topo.getFaultZoneType().equals(Topology.Types.ZONE.name()));
List<Node> faultZones = topo.getFaultZones();
Assert.assertEquals(faultZones.size(), 10);
Node root = topo.getRootNode();
Assert.assertEquals(root.getChildrenCount(Topology.Types.ZONE.name()), 10);
Assert.assertEquals(root.getChildrenCount(topo.getEndNodeType()), 100);
// validate weights.
for (Node rack : root.getChildren()) {
Assert.assertEquals(rack.getWeight(), (long) nodeToWeightMap.get(rack.getName()));
}
}
}
| 9,789 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/strategy
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/controller/strategy/crushMapping/TestCardDealingAdjustmentAlgorithmV2.java
|
package org.apache.helix.controller.strategy.crushMapping;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
import org.apache.helix.controller.rebalancer.strategy.crushMapping.CardDealingAdjustmentAlgorithmV2;
import org.apache.helix.controller.rebalancer.topology.InstanceNode;
import org.apache.helix.controller.rebalancer.topology.Node;
import org.apache.helix.controller.rebalancer.topology.Topology;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestCardDealingAdjustmentAlgorithmV2 {
private static int DEFAULT_REPLICA_COUNT = 3;
private static int DEFAULT_RANDOM_SEED = 10;
private static int NUMS_ZONES = 3;
private static int NUM_INSTANCES_PER_ZONE = 3;
private static int NUM_TOTAL_INSTANCES;
private static int[][] DEFAULT_ZONES = new int[NUMS_ZONES][NUM_INSTANCES_PER_ZONE];
static {
for (int i = 0; i < NUMS_ZONES; i++) {
for (int j = 0; j < NUM_INSTANCES_PER_ZONE; j++) {
DEFAULT_ZONES[i][j] = i * NUMS_ZONES + j + 1;
}
}
NUM_TOTAL_INSTANCES = NUMS_ZONES * NUM_INSTANCES_PER_ZONE;
}
private Topology _topology;
@BeforeClass
public void setUpTopology() {
_topology = mock(Topology.class);
System.out.println("Default ZONES: " + Arrays.deepToString(DEFAULT_ZONES));
when(_topology.getFaultZones()).thenReturn(createFaultZones(DEFAULT_ZONES));
}
private List<Node> createFaultZones(int[][] instancesMap) {
List<Node> faultZones = new ArrayList<>();
int zoneId = 0;
for (int[] instances : instancesMap) {
Node zoneNode = new Node();
zoneNode.setName("zone" + zoneId);
zoneNode.setId(zoneId++);
int zoneWeight = 0;
for (int instanceId : instances) {
Node node = new Node();
node.setName("instance" + instanceId);
node.setId(instanceId);
Node instanceNode = new InstanceNode(node, "instance" + instanceId);
// use instance id (integer) as the weight for ease of testing
instanceNode.addWeight(instanceId);
zoneNode.addChild(instanceNode);
zoneWeight += instanceNode.getWeight();
}
zoneNode.addWeight(zoneWeight);
faultZones.add(zoneNode);
}
return faultZones;
}
@Test(description = "Verify a few properties after algorithm object is created")
public void testAlgorithmConstructor() {
System.out.println("START TestCardDealingAdjustmentAlgorithmV2.testAlgorithmConstructor");
CardDealingAdjustmentAlgorithmV2Accessor algorithm =
new CardDealingAdjustmentAlgorithmV2Accessor(_topology, DEFAULT_REPLICA_COUNT,
CardDealingAdjustmentAlgorithmV2.Mode.EVENNESS);
Map<Node, Long> instanceWeights = algorithm.getInstanceWeight();
// verify weight is set correctly
for (Map.Entry<Node, Long> entry : instanceWeights.entrySet()) {
if (entry.getKey().getId() != entry.getValue()) {
Assert.fail(String.format("%s %s should have weight of %s", entry.getKey().getName(),
entry.getKey().getId(), entry.getValue()));
}
}
Map<Node, Long> faultZoneWeights = algorithm.getFaultZoneWeight();
Map<Node, Set<String>> faultZonePartitionMap = algorithm.getFaultZonePartitionMap();
Set<Node> faultZones = faultZoneWeights.keySet();
Assert.assertEquals(faultZoneWeights.size(), NUMS_ZONES);
Assert.assertEquals(faultZonePartitionMap.keySet(), faultZones);
long sum = 0;
for (long weight : faultZoneWeights.values()) {
sum += weight;
}
// verify total weight is computed correct
if (sum != algorithm.getTotalWeight()) {
Assert.fail(String.format("total weight %s != total weight of zones %s",
algorithm.getTotalWeight(), sum));
}
Map<Node, Node> instanceFaultZone = algorithm.getInstanceFaultZone();
Assert.assertEquals(instanceFaultZone.size(), NUM_TOTAL_INSTANCES);
// verify zone mapping is correct
for (Node zone : faultZones) {
long zoneId = zone.getId();
List<Node> instanceNodes = zone.getChildren();
Set<Long> actualInstanceIds = new HashSet<>();
for (Node node : instanceNodes) {
actualInstanceIds.add(node.getId());
}
Set<Long> expectedInstanceIds = new HashSet<>();
for (int i = 0; i < NUM_INSTANCES_PER_ZONE; i++) {
expectedInstanceIds.add(NUMS_ZONES * zoneId + i + 1);
}
Assert.assertEquals(instanceNodes.size(), NUM_INSTANCES_PER_ZONE);
Assert.assertEquals(actualInstanceIds, expectedInstanceIds);
}
System.out.println("END TestCardDealingAdjustmentAlgorithmV2.testAlgorithmConstructor");
}
@DataProvider
public static Object[][] stableComputingVerification() {
return new Object[][] {
// replica, repeatTimes, seed, true: evenness false: less movement
{
1, 10, 0, true
}, {
2, 10, 10, true
}, {
3, 10, 100, true
}, {
1, 10, 0, true
}, {
2, 10, 10, true
}, {
3, 10, 100, true
}, {
1, 10, 0, true
}, {
2, 10, 10, true
}, {
3, 10, 100, true
}, {
1, 10, 0, false
}, {
2, 10, 10, false
}, {
3, 10, 100, false
}, {
1, 10, 0, false
}, {
2, 10, 10, false
}, {
3, 10, 100, false
}, {
1, 10, 0, false
}, {
2, 10, 10, false
}, {
3, 10, 100, false
}
};
}
@Test(description = "Compute mapping multiple times, the mapping of each time should be same", dataProvider = "stableComputingVerification",
dependsOnMethods = "testAlgorithmConstructor")
public void testStableComputeMappingForMultipleTimes(int replica, int repeatTimes, int seed,
boolean isEvennessPreferred) {
System.out.println("START TestCardDealingAdjustmentAlgorithmV2.testStableComputeMappingForMultipleTimes");
CardDealingAdjustmentAlgorithmV2.Mode preference =
isEvennessPreferred ? CardDealingAdjustmentAlgorithmV2.Mode.EVENNESS
: CardDealingAdjustmentAlgorithmV2.Mode.MINIMIZE_MOVEMENT;
CardDealingAdjustmentAlgorithmV2Accessor algorithm =
new CardDealingAdjustmentAlgorithmV2Accessor(_topology, replica, preference);
Set<Node> instanceNodes = algorithm.getInstanceFaultZone().keySet();
Map<Node, List<String>> nodeToPartitions = new HashMap<>();
for (Node instanceNode : instanceNodes) {
int start = (int) instanceNode.getId();
int end = start + (int) instanceNode.getWeight();
nodeToPartitions.put(instanceNode, createDummyPartitions(start, end));
}
Map<Long, Set<String>> oldSimpleMapping = getSimpleMapping(nodeToPartitions);
Map<Long, Set<String>> lastCalculatedDifference = new HashMap<>();
while (repeatTimes > 0) {
System.out.println(String.format("Round %s replica %s algorithm seed: %s preference: %s ",
repeatTimes, replica, seed, preference));
// deep clone the original mapping
Map<Node, List<String>> newMapping = new HashMap<>();
for (Map.Entry<Node, List<String>> entry : nodeToPartitions.entrySet()) {
newMapping.put(entry.getKey(), new ArrayList<>(entry.getValue()));
}
new CardDealingAdjustmentAlgorithmV2(_topology, replica, preference)
.computeMapping(newMapping, seed);
Map<Long, Set<String>> newDifference =
getDifference(oldSimpleMapping, getSimpleMapping(newMapping));
if (!lastCalculatedDifference.isEmpty() && !newDifference.equals(lastCalculatedDifference)) {
Assert.fail("Different mapping of the same input");
}
lastCalculatedDifference = newDifference;
repeatTimes -= 1;
}
System.out.println("END TestCardDealingAdjustmentAlgorithmV2.testStableComputeMappingForMultipleTimes");
}
@DataProvider
public static Object[][] replicas() {
return new Object[][] {
{
1
}, {
2
}, {
3
}, {
4
}, {
5
}
};
}
@Test(description = "Test performance given different replica count", dataProvider = "replicas", dependsOnMethods = "testStableComputeMappingForMultipleTimes")
public void testComputeMappingForDifferentReplicas(int replica) {
System.out.println("SATRT TestCardDealingAdjustmentAlgorithmV2.testComputeMappingForDifferentReplicas");
CardDealingAdjustmentAlgorithmV2Accessor algorithm =
new CardDealingAdjustmentAlgorithmV2Accessor(_topology, replica,
CardDealingAdjustmentAlgorithmV2.Mode.EVENNESS);
Set<Node> instanceNodes = algorithm.getInstanceFaultZone().keySet();
Map<Node, List<String>> nodeToPartitions = new HashMap<>();
for (Node instanceNode : instanceNodes) {
int start = (int) instanceNode.getId();
int end = start + (int) instanceNode.getWeight();
nodeToPartitions.put(instanceNode, createDummyPartitions(start, end));
}
Map<Long, Set<String>> oldSimpleMapping = getSimpleMapping(nodeToPartitions);
boolean isAllAssigned = algorithm.computeMapping(nodeToPartitions, DEFAULT_RANDOM_SEED);
Map<Long, Set<String>> difference =
getDifference(oldSimpleMapping, getSimpleMapping(nodeToPartitions));
int totalMovements = 0;
for (Set<String> value : difference.values()) {
totalMovements += value.size();
}
// These are the previously calculated results, keep them to make sure consistency
Map<Integer, Integer> expected = ImmutableMap.of(1, 8, 2, 8, 3, 21, 4, 0, 5, 0);
if (totalMovements != expected.get(replica)) {
Assert.fail(String.format("Total movements: %s != expected %s, replica: %s", totalMovements,
expected.get(replica), replica));
}
System.out.println("END TestCardDealingAdjustmentAlgorithmV2.testComputeMappingForDifferentReplicas");
}
@Test(description = "Test performance given different preference (evenness or less movements)", dependsOnMethods = "testComputeMappingForDifferentReplicas")
public void testComputeMappingForDifferentPreference() {
System.out.println("START TestCardDealingAdjustmentAlgorithmV2.testComputeMappingForDifferentPreference");
CardDealingAdjustmentAlgorithmV2Accessor algorithm1 =
new CardDealingAdjustmentAlgorithmV2Accessor(_topology, DEFAULT_REPLICA_COUNT,
CardDealingAdjustmentAlgorithmV2.Mode.EVENNESS);
CardDealingAdjustmentAlgorithmV2Accessor algorithm2 =
new CardDealingAdjustmentAlgorithmV2Accessor(_topology, DEFAULT_REPLICA_COUNT,
CardDealingAdjustmentAlgorithmV2.Mode.MINIMIZE_MOVEMENT);
Set<Node> instanceNodes = algorithm1.getInstanceFaultZone().keySet();
Map<Node, List<String>> nodeToPartitions = new HashMap<>();
for (Node instanceNode : instanceNodes) {
int start = (int) instanceNode.getId();
int end = start + (int) instanceNode.getWeight();
nodeToPartitions.put(instanceNode, createDummyPartitions(start, end));
}
Map<Long, Set<String>> oldSimpleMapping = getSimpleMapping(nodeToPartitions);
// deep clone the original mapping
Map<Node, List<String>> newMapping = new HashMap<>();
for (Map.Entry<Node, List<String>> entry : nodeToPartitions.entrySet()) {
newMapping.put(entry.getKey(), new ArrayList<>(entry.getValue()));
}
boolean isAllAssigned1 = algorithm1.computeMapping(nodeToPartitions, DEFAULT_RANDOM_SEED);
boolean isAllAssigned2 = algorithm2.computeMapping(newMapping, DEFAULT_RANDOM_SEED);
int movement1 = getTotalMovements(oldSimpleMapping, getSimpleMapping(nodeToPartitions));
int movement2 = getTotalMovements(oldSimpleMapping, getSimpleMapping(newMapping));
System.out.println(String.format("Total movements: %s, isAllAssigned: %s, preference: %s",
movement1, isAllAssigned1, CardDealingAdjustmentAlgorithmV2.Mode.EVENNESS));
System.out.println(String.format("Total movements: %s, isAllAssigned: %s, preference: %s",
movement2, isAllAssigned2, CardDealingAdjustmentAlgorithmV2.Mode.MINIMIZE_MOVEMENT));
Assert.assertTrue(movement1 >= movement2);
System.out.println("START TestCardDealingAdjustmentAlgorithmV2.testComputeMappingForDifferentPreference");
}
@Test (dependsOnMethods = "testComputeMappingForDifferentPreference")
public void testComputeMappingWhenZeroWeightInstance() {
System.out.println("START TestCardDealingAdjustmentAlgorithmV2.testComputeMappingWhenZeroWeightInstance");
when(_topology.getFaultZones()).thenReturn(createFaultZones(new int[][] {
{
0, 1
}, // zone0: instance id = 0, weight = 0; instance id = 1, weight = 1; zone weight = 1
{
2, 3
}, // zone1: instance id = 2, weight = 2; instance id = 3, weight = 3; zone weight = 5,
{
4, 5
} // zone2: instance id = 4, weight = 4; instance id = 5, weight = 5; zone weight = 9
}));
CardDealingAdjustmentAlgorithmV2Accessor algorithm =
new CardDealingAdjustmentAlgorithmV2Accessor(_topology, DEFAULT_REPLICA_COUNT,
CardDealingAdjustmentAlgorithmV2.Mode.EVENNESS);
Set<Node> instanceNodes = algorithm.getInstanceFaultZone().keySet();
Map<Node, List<String>> nodeToPartitions = new HashMap<>();
// assign 2 partitions to each instance
// zone0 with weight = 1 will be overloaded, the other two zones should have spaces left
for (Node instanceNode : instanceNodes) {
nodeToPartitions.put(instanceNode, createDummyPartitions(0, 2));
}
Map<Long, Set<String>> oldSimpleMapping = getSimpleMapping(nodeToPartitions);
boolean isAssigned = algorithm.computeMapping(nodeToPartitions, DEFAULT_RANDOM_SEED);
Map<Long, Set<String>> newSimpleMapping = getSimpleMapping(nodeToPartitions);
System.out.println("old mapping" + oldSimpleMapping);
System.out.println("new mapping" + newSimpleMapping);
Assert.assertTrue(newSimpleMapping.get(0L).isEmpty());
System.out.println("END TestCardDealingAdjustmentAlgorithmV2.testComputeMappingWhenZeroWeightInstance");
}
@Test (dependsOnMethods = "testComputeMappingWhenZeroWeightInstance")
public void testComputeMappingWhenZeroWeightZone() {
System.out.println("START TestCardDealingAdjustmentAlgorithmV2.testComputeMappingWhenZeroWeightZone");
when(_topology.getFaultZones()).thenReturn(createFaultZones(new int[][] {
{
0
}, // zone0: instance id = 0, weight = 0; zone weight = 0
{
2, 3
}, // zone1: instance id = 2, weight = 2; instance id = 3, weight = 3; zone weight = 5,
{
4, 5
} // zone2: instance id = 4, weight = 4; instance id = 5, weight = 5; zone weight = 9
}));
CardDealingAdjustmentAlgorithmV2Accessor algorithm =
new CardDealingAdjustmentAlgorithmV2Accessor(_topology, DEFAULT_REPLICA_COUNT,
CardDealingAdjustmentAlgorithmV2.Mode.EVENNESS);
Set<Node> instanceNodes = algorithm.getInstanceFaultZone().keySet();
Map<Node, List<String>> nodeToPartitions = new HashMap<>();
for (Node instanceNode : instanceNodes) {
nodeToPartitions.put(instanceNode, createDummyPartitions(0, 2)); // assign 2 partitions to
// each instance
}
Map<Long, Set<String>> oldSimpleMapping = getSimpleMapping(nodeToPartitions);
boolean isAssigned = algorithm.computeMapping(nodeToPartitions, DEFAULT_RANDOM_SEED);
Map<Long, Set<String>> newSimpleMapping = getSimpleMapping(nodeToPartitions);
System.out.println("old mapping" + oldSimpleMapping);
System.out.println("new mapping" + newSimpleMapping);
Assert.assertTrue(newSimpleMapping.get(0L).isEmpty());
System.out.println("START TestCardDealingAdjustmentAlgorithmV2.testComputeMappingWhenZeroWeightZone");
}
private int getTotalMovements(Map<Long, Set<String>> oldSimpleMapping,
Map<Long, Set<String>> newSimpleMapping) {
Map<Long, Set<String>> difference = getDifference(oldSimpleMapping, newSimpleMapping);
int totalMovements = 0;
for (Set<String> value : difference.values()) {
totalMovements += value.size();
}
return totalMovements;
}
private Map<Long, Set<String>> getDifference(Map<Long, Set<String>> oldSimpleMapping,
Map<Long, Set<String>> newSimpleMapping) {
Map<Long, Set<String>> difference = new HashMap<>();
for (long instanceId : newSimpleMapping.keySet()) {
Set<String> addedPartitions = new HashSet<>();
Set<String> lostPartitions = new HashSet<>();
for (String partition : Sets.difference(newSimpleMapping.get(instanceId),
oldSimpleMapping.get(instanceId))) {
addedPartitions.add("+" + partition);
}
for (String partition : Sets.difference(oldSimpleMapping.get(instanceId),
newSimpleMapping.get(instanceId))) {
addedPartitions.add("-" + partition);
}
Set<String> changePartitions = Sets.union(addedPartitions, lostPartitions);
difference.put(instanceId, changePartitions);
}
return difference;
}
private Map<Long, Set<String>> getSimpleMapping(Map<Node, List<String>> nodeToPartitions) {
Map<Long, Set<String>> mapping = new HashMap<>();
for (Map.Entry<Node, List<String>> entry : nodeToPartitions.entrySet()) {
mapping.put(entry.getKey().getId(), new HashSet<>(entry.getValue()));
}
return mapping;
}
private List<String> createDummyPartitions(int start, int end) {
List<String> dummyPartitions = new ArrayList<>();
for (int i = start; i <= end; i++) {
dummyPartitions.add("Partition_" + i);
}
return dummyPartitions;
}
// The accessor class to get protected fields of {@link CardDealingAdjustmentAlgorithmV2}
private static class CardDealingAdjustmentAlgorithmV2Accessor
extends CardDealingAdjustmentAlgorithmV2 {
CardDealingAdjustmentAlgorithmV2Accessor(Topology topology, int replica, Mode mode) {
super(topology, replica, mode);
}
Map<Node, Node> getInstanceFaultZone() {
return _instanceFaultZone;
}
Map<Node, Long> getInstanceWeight() {
return _instanceWeight;
}
long getTotalWeight() {
return _totalWeight;
}
Map<Node, Long> getFaultZoneWeight() {
return _faultZoneWeight;
}
Map<Node, Set<String>> getFaultZonePartitionMap() {
return _faultZonePartitionMap;
}
}
}
| 9,790 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/MockListener.java
|
package org.apache.helix.manager;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import org.apache.helix.ConfigChangeListener;
import org.apache.helix.ControllerChangeListener;
import org.apache.helix.CurrentStateChangeListener;
import org.apache.helix.ExternalViewChangeListener;
import org.apache.helix.IdealStateChangeListener;
import org.apache.helix.LiveInstanceChangeListener;
import org.apache.helix.MessageListener;
import org.apache.helix.NotificationContext;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
public class MockListener implements IdealStateChangeListener, LiveInstanceChangeListener,
ConfigChangeListener, CurrentStateChangeListener, ExternalViewChangeListener,
ControllerChangeListener, MessageListener
{
public boolean isIdealStateChangeListenerInvoked = false;
public boolean isLiveInstanceChangeListenerInvoked = false;
public boolean isCurrentStateChangeListenerInvoked = false;
public boolean isMessageListenerInvoked = false;
public boolean isConfigChangeListenerInvoked = false;
public boolean isExternalViewChangeListenerInvoked = false;
public boolean isControllerChangeListenerInvoked = false;
public void reset() {
isIdealStateChangeListenerInvoked = false;
isLiveInstanceChangeListenerInvoked = false;
isCurrentStateChangeListenerInvoked = false;
isMessageListenerInvoked = false;
isConfigChangeListenerInvoked = false;
isExternalViewChangeListenerInvoked = false;
isControllerChangeListenerInvoked = false;
}
@Override
public void onIdealStateChange(List<IdealState> idealState, NotificationContext changeContext) {
isIdealStateChangeListenerInvoked = true;
}
@Override
public void onLiveInstanceChange(List<LiveInstance> liveInstances,
NotificationContext changeContext) {
isLiveInstanceChangeListenerInvoked = true;
}
@Override
public void onConfigChange(List<InstanceConfig> configs, NotificationContext changeContext) {
isConfigChangeListenerInvoked = true;
}
@Override
public void onStateChange(String instanceName, List<CurrentState> statesInfo,
NotificationContext changeContext) {
isCurrentStateChangeListenerInvoked = true;
}
@Override
public void onExternalViewChange(List<ExternalView> externalViewList,
NotificationContext changeContext) {
isExternalViewChangeListenerInvoked = true;
}
@Override
public void onControllerChange(NotificationContext changeContext) {
isControllerChangeListenerInvoked = true;
}
@Override
public void onMessage(String instanceName, List<Message> messages,
NotificationContext changeContext) {
isMessageListenerInvoked = true;
}
}
| 9,791 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestWtCacheSyncOpSingleThread.java
|
package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import org.apache.helix.AccessOption;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.ZNRecordUpdater;
import org.apache.helix.ZkUnitTestBase;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestWtCacheSyncOpSingleThread extends ZkUnitTestBase {
@Test
public void testHappyPathZkCacheBaseDataAccessor() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// init zkCacheDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
String extViewPath = PropertyPathBuilder.externalView(clusterName);
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
baseAccessor.create(curStatePath, null, AccessOption.PERSISTENT);
List<String> cachePaths = Arrays.asList(curStatePath, extViewPath);
ZkCacheBaseDataAccessor<ZNRecord> accessor =
new ZkCacheBaseDataAccessor<>(baseAccessor, null, cachePaths, null);
boolean ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, true);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// create 10 current states
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT);
Assert.assertTrue(success, "Should succeed in create: " + path);
}
// verify wtCache
// TestHelper.printCache(accessor._wtCache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// update each current state 10 times, single thread
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
for (int j = 0; j < 10; j++) {
ZNRecord newRecord = new ZNRecord("TestDB" + i);
newRecord.setSimpleField("" + j, "" + j);
boolean success =
accessor.update(path, new ZNRecordUpdater(newRecord), AccessOption.PERSISTENT);
Assert.assertTrue(success, "Should succeed in update: " + path);
}
}
// verify cache
// TestHelper.printCache(accessor._wtCache._cache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// set 10 external views
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i);
boolean success = accessor.set(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT);
Assert.assertTrue(success, "Should succeed in set: " + path);
}
// verify wtCache
// accessor.printWtCache();
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// get 10 external views
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i);
ZNRecord record = accessor.get(path, null, 0);
Assert.assertEquals(record.getId(), "TestDB" + i);
}
// getChildNames
List<String> childNames = accessor.getChildNames(extViewPath, 0);
// System.out.println(childNames);
Assert.assertEquals(childNames.size(), 10, "Should contain only: TestDB0-9");
for (int i = 0; i < 10; i++) {
Assert.assertTrue(childNames.contains("TestDB" + i));
}
// exists
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901",
"session_0", "TestDB" + i);
Assert.assertTrue(accessor.exists(path, 0));
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testCreateFailZkCacheBaseDataAccessor() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// init zkCacheDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
ZkCacheBaseDataAccessor<ZNRecord> accessor = new ZkCacheBaseDataAccessor<>(baseAccessor, null,
Collections.singletonList(curStatePath), null);
// create 10 current states
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901",
"session_1", "TestDB" + i);
boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT);
Assert.assertTrue(success, "Should succeed in create: " + path);
}
// create same 10 current states again, should fail
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901",
"session_1", "TestDB" + i);
boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT);
Assert.assertFalse(success, "Should fail in create due to NodeExists: " + path);
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,792 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestDefaultControllerMsgHandlerFactory.java
|
package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.HelixException;
import org.apache.helix.NotificationContext;
import org.apache.helix.manager.zk.DefaultControllerMessageHandlerFactory.DefaultControllerMessageHandler;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestDefaultControllerMsgHandlerFactory {
private static Logger LOG = LoggerFactory.getLogger(TestDefaultControllerMsgHandlerFactory.class);
@Test()
public void testDefaultControllerMsgHandlerFactory() {
System.out.println("START TestDefaultControllerMsgHandlerFactory at "
+ new Date(System.currentTimeMillis()));
DefaultControllerMessageHandlerFactory facotry = new DefaultControllerMessageHandlerFactory();
Message message = new Message(MessageType.NO_OP, "0");
NotificationContext context = new NotificationContext(null);
boolean exceptionCaught = false;
try {
MessageHandler handler = facotry.createHandler(message, context);
} catch (HelixException e) {
exceptionCaught = true;
}
AssertJUnit.assertTrue(exceptionCaught);
message = new Message(MessageType.CONTROLLER_MSG, "1");
exceptionCaught = false;
try {
MessageHandler handler = facotry.createHandler(message, context);
} catch (HelixException e) {
exceptionCaught = true;
}
AssertJUnit.assertFalse(exceptionCaught);
Map<String, String> resultMap = new HashMap<String, String>();
message = new Message(MessageType.NO_OP, "3");
DefaultControllerMessageHandler defaultHandler =
new DefaultControllerMessageHandler(message, context);
try {
defaultHandler.handleMessage();
} catch (HelixException e) {
exceptionCaught = true;
} catch (InterruptedException e) {
LOG.error("Interrupted handling message", e);
}
AssertJUnit.assertTrue(exceptionCaught);
message = new Message(MessageType.CONTROLLER_MSG, "4");
defaultHandler = new DefaultControllerMessageHandler(message, context);
exceptionCaught = false;
try {
defaultHandler.handleMessage();
} catch (HelixException e) {
exceptionCaught = true;
} catch (InterruptedException e) {
LOG.error("Interrupted handling message", e);
}
AssertJUnit.assertFalse(exceptionCaught);
System.out.println("END TestDefaultControllerMsgHandlerFactory at "
+ new Date(System.currentTimeMillis()));
}
}
| 9,793 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestWtCacheAsyncOpSingleThread.java
|
package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import org.apache.helix.AccessOption;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.ZNRecordUpdater;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestWtCacheAsyncOpSingleThread extends ZkUnitTestBase {
@Test
public void testHappyPathZkCacheBaseDataAccessor() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// init zkCacheDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
String extViewPath = PropertyPathBuilder.externalView(clusterName);
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
baseAccessor.create(curStatePath, null, AccessOption.PERSISTENT);
List<String> cachePaths = Arrays.asList(curStatePath, extViewPath);
ZkCacheBaseDataAccessor<ZNRecord> accessor =
new ZkCacheBaseDataAccessor<>(baseAccessor, null, cachePaths, null);
boolean ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// create 10 current states
List<String> paths = new ArrayList<>();
List<ZNRecord> records = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901",
"session_0", "TestDB" + i);
ZNRecord record = new ZNRecord("TestDB" + i);
paths.add(path);
records.add(record);
}
boolean[] success = accessor.createChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in create: " + paths.get(i));
}
// verify wtCache
// TestHelper.printCache(accessor._wtCache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// update each current state 10 times
List<DataUpdater<ZNRecord>> updaters = new ArrayList<>();
for (int j = 0; j < 10; j++) {
paths.clear();
updaters.clear();
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
ZNRecord newRecord = new ZNRecord("TestDB" + i);
newRecord.setSimpleField("" + j, "" + j);
DataUpdater<ZNRecord> updater = new ZNRecordUpdater(newRecord);
paths.add(path);
updaters.add(updater);
}
success = accessor.updateChildren(paths, updaters, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in update: " + paths.get(i));
}
}
// verify cache
// TestHelper.printCache(accessor._wtCache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// set 10 external views
paths.clear();
records.clear();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i);
ZNRecord record = new ZNRecord("TestDB" + i);
paths.add(path);
records.add(record);
}
success = accessor.setChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in set: " + paths.get(i));
}
// verify wtCache
// TestHelper.printCache(accessor._wtCache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// get 10 external views
paths.clear();
records.clear();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i);
paths.add(path);
}
records = accessor.get(paths, null, 0, true);
for (int i = 0; i < 10; i++) {
Assert.assertEquals(records.get(i).getId(), "TestDB" + i);
}
// getChildren
records.clear();
records = accessor.getChildren(extViewPath, null, 0, 0, 0);
for (int i = 0; i < 10; i++) {
Assert.assertEquals(records.get(i).getId(), "TestDB" + i);
}
// exists
paths.clear();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901",
"session_0", "TestDB" + i);
paths.add(path);
}
success = accessor.exists(paths, 0);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should exits: TestDB" + i);
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testCreateFailZkCacheBaseAccessor() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// init zkCacheDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
String extViewPath = PropertyPathBuilder.externalView(clusterName);
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
baseAccessor.create(curStatePath, null, AccessOption.PERSISTENT);
ZkCacheBaseDataAccessor<ZNRecord> accessor = new ZkCacheBaseDataAccessor<>(baseAccessor, null,
Arrays.asList(curStatePath, extViewPath), null);
Assert.assertEquals(accessor._wtCache._cache.size(), 1,
"Should contain only:\n" + curStatePath);
Assert.assertTrue(accessor._wtCache._cache.containsKey(curStatePath));
// create 10 current states
List<String> paths = new ArrayList<>();
List<ZNRecord> records = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901",
"session_1", "TestDB" + i);
ZNRecord record = new ZNRecord("TestDB" + i);
paths.add(path);
records.add(record);
}
boolean[] success = accessor.createChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in create: " + paths.get(i));
}
// create same 10 current states again, should fail on NodeExists
success = accessor.createChildren(paths, records, AccessOption.PERSISTENT);
// System.out.println(Arrays.toString(success));
for (int i = 0; i < 10; i++) {
Assert.assertFalse(success[i], "Should fail on create: " + paths.get(i));
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,794 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestParticipantManager.java
|
package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
import org.apache.helix.PreConnectCallback;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkTestHelper;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestParticipantManager extends ZkTestBase {
private static Logger LOG = LoggerFactory.getLogger(TestParticipantManager.class);
/*
* Simulates zk session expiry before creating live instance in participant manager. This test
* makes sure the session aware create ephemeral API is called, which validates the expected zk
* session.
* What this test does is:
* 1. Sets up live instance with session S0
* 2. Expires S0 and gets new session S1
* 3. S1 is blocked before creating live instance in participant manager
* 4. Expires S1 and gets new session S2
* 5. Proceeds S1 to create live instance, which will fail because session S1 is expired
* 6. Proceeds S2 to create live instance, which will succeed
*/
@Test
public void testSessionExpiryCreateLiveInstance() throws Exception {
final String className = TestHelper.getTestClassName();
final String methodName = TestHelper.getTestMethodName();
final String clusterName = className + "_" + methodName;
final ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(ZK_ADDR));
final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
TestHelper.setupCluster(clusterName, ZK_ADDR,
12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
5, // number of nodes
3, // replicas
"MasterSlave",
true); // do rebalance
final String instanceName = "localhost_12918";
final MockParticipantManager manager =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
manager.syncStart();
final LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName));
final long originalCreationTime = liveInstance.getStat().getCreationTime();
final String originalSessionId = manager.getSessionId();
// Verify current live instance.
Assert.assertNotNull(liveInstance);
Assert.assertEquals(liveInstance.getEphemeralOwner(), originalSessionId);
final CountDownLatch startCountdown = new CountDownLatch(1);
final CountDownLatch endCountdown = new CountDownLatch(1);
final Semaphore semaphore = new Semaphore(0);
manager.addPreConnectCallback(
new BlockingPreConnectCallback(instanceName, startCountdown, endCountdown, semaphore));
// Expire S0 and new session S1 will be created.
ZkTestHelper.asyncExpireSession(manager.getZkClient());
// Wait for onPreConnect to start
semaphore.acquire();
// New session S1 should not be equal to S0.
Assert.assertFalse(originalSessionId.equals(manager.getSessionId()));
// Live instance should be gone as original session S0 is expired.
Assert.assertNull(accessor.getProperty(keyBuilder.liveInstance(instanceName)));
final String sessionOne = manager.getSessionId();
// Expire S1 when S1 is blocked in onPreConnect().
// New session S2 will be created.
ZkTestHelper.asyncExpireSession(manager.getZkClient());
TestHelper.verify(
() -> !(ZKUtil.toHexSessionId(manager.getZkClient().getSessionId()).equals(sessionOne)),
TestHelper.WAIT_DURATION);
// New session S2 should not be equal to S1.
final String sessionTwo = ZKUtil.toHexSessionId(manager.getZkClient().getSessionId());
Assert.assertFalse(sessionOne.equals(sessionTwo));
// Proceed S1 to create live instance, which will fail.
startCountdown.countDown();
// Wait until S2 starts onPreConnect, which indicates S1's handling new session is completed.
semaphore.acquire();
// Live instance should not be created because zk session is expired.
Assert.assertNull(accessor.getProperty(keyBuilder.liveInstance(instanceName)),
"Live instance should not be created because zk session is expired!");
// Proceed S2 to create live instance.
endCountdown.countDown();
TestHelper.verify(() -> {
// Newly created live instance should be created by the latest session S2
// and have a new creation time.
LiveInstance newLiveInstance =
accessor.getProperty(keyBuilder.liveInstance(instanceName));
return newLiveInstance != null
&& newLiveInstance.getStat().getCreationTime() != originalCreationTime
&& newLiveInstance.getEphemeralOwner().equals(sessionTwo);
}, TestHelper.WAIT_DURATION);
// Clean up.
manager.syncStop();
deleteCluster(clusterName);
}
@Test(dependsOnMethods = "testSessionExpiryCreateLiveInstance")
public void testCurrentTaskThreadPoolSizeCreation() throws Exception {
// Using a pool sized different from the default value to verify correctness
final int testThreadPoolSize = TaskConstants.DEFAULT_TASK_THREAD_POOL_SIZE + 1;
final String className = TestHelper.getTestClassName();
final String methodName = TestHelper.getTestMethodName();
final String clusterName = className + "_" + methodName;
final ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName,
new ZkBaseDataAccessor.Builder<ZNRecord>().setZkAddress(ZK_ADDR).build());
final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
5, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
final String instanceName = "localhost_12918";
final MockParticipantManager manager =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
InstanceConfig instanceConfig = accessor.getProperty(keyBuilder.instanceConfig(instanceName));
instanceConfig.setTargetTaskThreadPoolSize(testThreadPoolSize);
accessor.setProperty(keyBuilder.instanceConfig(instanceName), instanceConfig);
manager.syncStart();
final LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName));
Assert.assertNotNull(liveInstance);
Assert.assertEquals(liveInstance.getCurrentTaskThreadPoolSize(), testThreadPoolSize);
// Clean up.
manager.syncStop();
deleteCluster(clusterName);
}
/*
* Mocks PreConnectCallback to insert session expiry during ParticipantManager#handleNewSession()
*/
static class BlockingPreConnectCallback implements PreConnectCallback {
private final String instanceName;
private final CountDownLatch startCountDown;
private final CountDownLatch endCountDown;
private final Semaphore semaphore;
private boolean canCreateLiveInstance;
BlockingPreConnectCallback(String instanceName, CountDownLatch startCountdown,
CountDownLatch endCountdown, Semaphore semaphore) {
this.instanceName = instanceName;
this.startCountDown = startCountdown;
this.endCountDown = endCountdown;
this.semaphore = semaphore;
}
@Override
public void onPreConnect() {
LOG.info("Handling new session for instance: {}", instanceName);
semaphore.release();
try {
LOG.info("Waiting session expiry to happen.");
startCountDown.await();
if (canCreateLiveInstance) {
LOG.info("Waiting to continue creating live instance.");
endCountDown.await();
}
} catch (InterruptedException ex) {
LOG.error("Interrupted in waiting", ex);
}
canCreateLiveInstance = true;
}
}
}
| 9,795 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSerializer.java
|
package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.ByteArrayInputStream;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.MapperFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestZNRecordSerializer {
/**
* Test the normal case of serialize/deserialize where ZNRecord is well-formed
*/
@Test
public void basicTest() {
ZNRecord record = new ZNRecord("testId");
record.setMapField("k1", ImmutableMap.of("a", "b", "c", "d"));
record.setMapField("k2", ImmutableMap.of("e", "f", "g", "h"));
record.setListField("k3", ImmutableList.of("a", "b", "c", "d"));
record.setListField("k4", ImmutableList.of("d", "e", "f", "g"));
record.setSimpleField("k5", "a");
record.setSimpleField("k5", "b");
ZNRecordSerializer serializer = new ZNRecordSerializer();
ZNRecord result = (ZNRecord) serializer.deserialize(serializer.serialize(record));
Assert.assertEquals(result, record);
}
@Test
public void testNullFields() {
ZNRecord record = new ZNRecord("testId");
record.setMapField("K1", null);
record.setListField("k2", null);
record.setSimpleField("k3", null);
ZNRecordSerializer serializer = new ZNRecordSerializer();
byte [] data = serializer.serialize(record);
ZNRecord result = (ZNRecord) serializer.deserialize(data);
Assert.assertEquals(result, record);
Assert.assertNull(result.getMapField("K1"));
Assert.assertNull(result.getListField("K2"));
Assert.assertNull(result.getSimpleField("K3"));
Assert.assertNull(result.getListField("K4"));
}
@Test (enabled = false)
public void testPerformance() {
ZNRecord record = createZnRecord();
ZNRecordSerializer serializer1 = new ZNRecordSerializer();
ZNRecordStreamingSerializer serializer2 = new ZNRecordStreamingSerializer();
int loop = 100000;
long start = System.currentTimeMillis();
for (int i = 0; i < loop; i++) {
serializer1.serialize(record);
}
System.out.println("ZNRecordSerializer serialize took " + (System.currentTimeMillis() - start) + " ms");
byte[] data = serializer1.serialize(record);
start = System.currentTimeMillis();
for (int i = 0; i < loop; i++) {
serializer1.deserialize(data);
}
System.out.println("ZNRecordSerializer deserialize took " + (System.currentTimeMillis() - start) + " ms");
start = System.currentTimeMillis();
for (int i = 0; i < loop; i++) {
data = serializer2.serialize(record);
}
System.out.println("ZNRecordStreamingSerializer serialize took " + (System.currentTimeMillis() - start) + " ms");
start = System.currentTimeMillis();
for (int i = 0; i < loop; i++) {
ZNRecord result = (ZNRecord) serializer2.deserialize(data);
}
System.out.println("ZNRecordStreamingSerializer deserialize took " + (System.currentTimeMillis() - start) + " ms");
}
ZNRecord createZnRecord() {
ZNRecord record = new ZNRecord("testId");
for (int i = 0; i < 400; i++) {
Map<String, String> map = new HashMap<>();
map.put("localhost_" + i, "Master");
map.put("localhost_" + (i+1), "Slave");
map.put("localhost_" + (i+2), "Slave");
record.setMapField("partition_" + i, map);
record.setListField("partition_" + i, Lists.<String>newArrayList(map.keySet()));
record.setSimpleField("partition_" + i, UUID.randomUUID().toString());
}
return record;
}
@Test (enabled = false)
public void testParallelPerformance() throws ExecutionException, InterruptedException {
final ZNRecord record = createZnRecord();
final ZNRecordSerializer serializer1 = new ZNRecordSerializer();
final ZNRecordStreamingSerializer serializer2 = new ZNRecordStreamingSerializer();
int loop = 100000;
ExecutorService executorService = Executors.newFixedThreadPool(10000);
long start = System.currentTimeMillis();
batchSerialize(serializer1, executorService, loop, record);
System.out.println("ZNRecordSerializer serialize took " + (System.currentTimeMillis() - start) + " ms");
byte[] data = serializer1.serialize(record);
start = System.currentTimeMillis();
batchSerialize(serializer2, executorService, loop, record);
System.out.println("ZNRecordSerializer deserialize took " + (System.currentTimeMillis() - start) + " ms");
start = System.currentTimeMillis();
for (int i = 0; i < loop; i++) {
data = serializer2.serialize(record);
}
System.out.println("ZNRecordStreamingSerializer serialize took " + (System.currentTimeMillis() - start) + " ms");
start = System.currentTimeMillis();
for (int i = 0; i < loop; i++) {
ZNRecord result = (ZNRecord) serializer2.deserialize(data);
}
System.out.println("ZNRecordStreamingSerializer deserialize took " + (System.currentTimeMillis() - start) + " ms");
}
private void batchSerialize(final ZkSerializer serializer, ExecutorService executorService, int repeatTime, final ZNRecord record)
throws ExecutionException, InterruptedException {
List<Future> futures = new ArrayList<>();
for (int i = 0; i < repeatTime; i++) {
Future f = executorService.submit(new Runnable() {
@Override public void run() {
serializer.serialize(record);
}
});
futures.add(f);
}
for (Future f : futures) {
f.get();
}
}
private void batchDeSerialize(final ZkSerializer serializer, ExecutorService executorService, int repeatTime, final byte [] data)
throws ExecutionException, InterruptedException {
List<Future> futures = new ArrayList<>();
for (int i = 0; i < repeatTime; i++) {
Future f = executorService.submit(new Runnable() {
@Override public void run() {
serializer.deserialize(data);
}
});
futures.add(f);
}
for (Future f : futures) {
f.get();
}
}
/**
* Test that simple, list, and map fields are initialized as empty even when not in json
*/
@Test
public void fieldAutoInitTest() {
StringBuilder jsonString = new StringBuilder("{\n").append("\"id\": \"myId\"\n").append("}");
ZNRecordSerializer serializer = new ZNRecordSerializer();
ZNRecord result = (ZNRecord) serializer.deserialize(jsonString.toString().getBytes());
Assert.assertNotNull(result);
Assert.assertEquals(result.getId(), "myId");
Assert.assertNotNull(result.getSimpleFields());
Assert.assertTrue(result.getSimpleFields().isEmpty());
Assert.assertNotNull(result.getListFields());
Assert.assertTrue(result.getListFields().isEmpty());
Assert.assertNotNull(result.getMapFields());
Assert.assertTrue(result.getMapFields().isEmpty());
}
@Test
public void testBasicCompression() {
ZNRecord record = new ZNRecord("testId");
int numPartitions = 1024;
int replicas = 3;
int numNodes = 100;
Random random = new Random();
for (int p = 0; p < numPartitions; p++) {
Map<String, String> map = new HashMap<String, String>();
for (int r = 0; r < replicas; r++) {
map.put("host_" + random.nextInt(numNodes), "ONLINE");
}
record.setMapField("TestResource_" + p, map);
}
ZNRecordSerializer serializer = new ZNRecordSerializer();
byte[] serializedBytes;
serializedBytes = serializer.serialize(record);
int uncompressedSize = serializedBytes.length;
System.out.println("raw serialized data length = " + serializedBytes.length);
record.setSimpleField("enableCompression", "true");
serializedBytes = serializer.serialize(record);
int compressedSize = serializedBytes.length;
System.out.println("compressed serialized data length = " + serializedBytes.length);
System.out.printf("compression ratio: %.2f \n", (uncompressedSize * 1.0 / compressedSize));
ZNRecord result = (ZNRecord) serializer.deserialize(serializedBytes);
Assert.assertEquals(result, record);
}
@Test
public void testCompression() {
int runId = 1;
while (runId < 20) {
int numPartitions = runId * 1000;
int replicas = 3;
int numNodes = 100;
Random random = new Random();
ZNRecord record = new ZNRecord("testId");
System.out.println("Partitions:" + numPartitions);
for (int p = 0; p < numPartitions; p++) {
Map<String, String> map = new HashMap<String, String>();
for (int r = 0; r < replicas; r++) {
map.put("host_" + random.nextInt(numNodes), "ONLINE");
}
record.setMapField("TestResource_" + p, map);
}
ZNRecordSerializer serializer = new ZNRecordSerializer();
byte[] serializedBytes;
record.setSimpleField("enableCompression", "true");
serializedBytes = serializer.serialize(record);
int compressedSize = serializedBytes.length;
System.out.println("compressed serialized data length = " + compressedSize);
ZNRecord result = (ZNRecord) serializer.deserialize(serializedBytes);
Assert.assertEquals(result, record);
runId = runId + 1;
}
}
/*
* Tests Jackson 1.x can work with ZNRecord
*/
@Test
public void testCodehausJacksonSerializer() {
ZNRecord record = createZnRecord();
ZNRecordSerializer znRecordSerializer = new ZNRecordSerializer();
CodehausJsonSerializer<ZNRecord> codehausJsonSerializer =
new CodehausJsonSerializer<>(ZNRecord.class);
byte[] codehausBytes = codehausJsonSerializer.serialize(record);
ZNRecord deserialized =
(ZNRecord) codehausJsonSerializer.deserialize(codehausBytes);
Assert.assertEquals(deserialized, record,
"Codehaus jackson serializer should work with ZNRecord");
deserialized = (ZNRecord) znRecordSerializer.deserialize(codehausBytes);
Assert.assertEquals(deserialized, record,
"ZNRecordSerializer should deserialize bytes serialized by codehaus serializer");
deserialized =
(ZNRecord) codehausJsonSerializer.deserialize(znRecordSerializer.serialize(record));
Assert.assertEquals(deserialized, record,
"codehaus serializer should deserialize bytes serialized by ZNRecordSerializer");
}
private static class CodehausJsonSerializer<T> {
private static final Logger LOG = LoggerFactory.getLogger(CodehausJsonSerializer.class);
private final Class<T> _clazz;
private final ObjectMapper mapper = new ObjectMapper();
public CodehausJsonSerializer(Class<T> clazz) {
_clazz = clazz;
}
public byte[] serialize(Object data) {
mapper.enable(SerializationFeature.INDENT_OUTPUT);
mapper.enable(MapperFeature.AUTO_DETECT_FIELDS);
mapper.enable(MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS);
StringWriter sw = new StringWriter();
try {
mapper.writeValue(sw, data);
return sw.toString().getBytes();
} catch (Exception e) {
LOG.error("Error during serialization of data", e);
}
return new byte[]{};
}
public Object deserialize(byte[] bytes) {
if (bytes == null || bytes.length == 0) {
return null;
}
try {
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
mapper.enable(MapperFeature.AUTO_DETECT_FIELDS);
mapper.enable(MapperFeature.AUTO_DETECT_SETTERS);
mapper.enable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
return mapper.readValue(bais, _clazz);
} catch (Exception e) {
LOG.error("Error during deserialization of bytes: " + new String(bytes), e);
}
return null;
}
}
}
| 9,796 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkBucketDataAccessor.java
|
package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.AccessOption;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.BucketDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.HelixProperty;
import org.apache.helix.TestHelper;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.zkclient.exception.ZkMarshallingError;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestZkBucketDataAccessor extends ZkTestBase {
private static final String PATH = "/" + TestHelper.getTestClassName();
private static final String NAME_KEY = TestHelper.getTestClassName();
private static final String LAST_SUCCESSFUL_WRITE_KEY = "LAST_SUCCESSFUL_WRITE";
private static final String LAST_WRITE_KEY = "LAST_WRITE";
private static final long VERSION_TTL_MS = 1000L;
// Populate list and map fields for content comparison
private static final List<String> LIST_FIELD = ImmutableList.of("1", "2");
private static final Map<String, String> MAP_FIELD = ImmutableMap.of("1", "2");
private final ZNRecord record = new ZNRecord(NAME_KEY);
private BucketDataAccessor _bucketDataAccessor;
private BaseDataAccessor<byte[]> _zkBaseDataAccessor;
@BeforeClass
public void beforeClass() {
// Initialize ZK accessors for testing
HelixZkClient zkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR));
zkClient.setZkSerializer(new ZkSerializer() {
@Override
public byte[] serialize(Object data) throws ZkMarshallingError {
if (data instanceof byte[]) {
return (byte[]) data;
}
throw new HelixException("ZkBucketDataAccesor only supports a byte array as an argument!");
}
@Override
public Object deserialize(byte[] data) throws ZkMarshallingError {
return data;
}
});
_zkBaseDataAccessor = new ZkBaseDataAccessor<>(zkClient);
_bucketDataAccessor = new ZkBucketDataAccessor(zkClient, 50 * 1024, VERSION_TTL_MS);
// Fill in some data for the record
record.setSimpleField(NAME_KEY, NAME_KEY);
record.setListField(NAME_KEY, LIST_FIELD);
record.setMapField(NAME_KEY, MAP_FIELD);
}
@AfterClass
public void afterClass() {
_bucketDataAccessor.disconnect();
}
/**
* Attempt writing a simple HelixProperty using compressedBucketWrite.
* @throws IOException
*/
@Test
public void testCompressedBucketWrite() throws IOException {
Assert.assertTrue(_bucketDataAccessor.compressedBucketWrite(PATH, new HelixProperty(record)));
}
@Test(dependsOnMethods = "testCompressedBucketWrite")
public void testMultipleWrites() throws Exception {
// Note to use a count number < 10 for testing.
// Otherwise the nodes named with version number will be ordered in a different alphabet order.
// This might hide some bugs in the GC code。
int count = 5;
int pathCount = 2;
Assert.assertTrue(VERSION_TTL_MS > 100,
"This test should be executed with the TTL more than 100ms.");
try {
// Write "count + 1" times, so the latest version will be "count"
for (int i = 0; i < count + 1; i++) {
for (int j = 0; j < pathCount; j++) {
_bucketDataAccessor.compressedBucketWrite(PATH + j, new HelixProperty(record));
}
}
for (int j = 0; j < pathCount; j++) {
String path = PATH + j;
// Last known good version number should be "count"
byte[] binarySuccessfulWriteVer = _zkBaseDataAccessor.get(path + "/" + LAST_SUCCESSFUL_WRITE_KEY, null, AccessOption.PERSISTENT);
long lastSuccessfulWriteVer = Long.parseLong(new String(binarySuccessfulWriteVer));
Assert.assertEquals(lastSuccessfulWriteVer, count);
// Last write version should be "count"
byte[] binaryWriteVer = _zkBaseDataAccessor.get(path + "/" + LAST_WRITE_KEY, null, AccessOption.PERSISTENT);
long writeVer = Long.parseLong(new String(binaryWriteVer));
Assert.assertEquals(writeVer, count);
// Test that all previous versions have been deleted
// Use Verifier because GC can take ZK delay
Assert.assertTrue(TestHelper.verify(() -> {
List<String> children = _zkBaseDataAccessor.getChildNames(path, AccessOption.PERSISTENT);
return children.size() == 3 && children.containsAll(ImmutableList
.of(LAST_SUCCESSFUL_WRITE_KEY, LAST_WRITE_KEY, new Long(lastSuccessfulWriteVer).toString()));
}, VERSION_TTL_MS * 2));
// Wait one more TTL to ensure that the GC has been done.
Thread.sleep(VERSION_TTL_MS);
List<String> children = _zkBaseDataAccessor.getChildNames(path, AccessOption.PERSISTENT);
Assert.assertTrue(children.size() == 3 && children.containsAll(ImmutableList
.of(LAST_SUCCESSFUL_WRITE_KEY, LAST_WRITE_KEY, new Long(lastSuccessfulWriteVer).toString())));
}
} finally {
for (int j = 0; j < pathCount; j++) {
_bucketDataAccessor.compressedBucketDelete(PATH + j);
}
}
}
/**
* The record written in {@link #testCompressedBucketWrite()} is the same record that was written.
*/
@Test(dependsOnMethods = "testMultipleWrites")
public void testCompressedBucketRead() throws IOException {
_bucketDataAccessor.compressedBucketWrite(PATH, new HelixProperty(record));
HelixProperty readRecord = _bucketDataAccessor.compressedBucketRead(PATH, HelixProperty.class);
Assert.assertEquals(readRecord.getRecord().getSimpleField(NAME_KEY), NAME_KEY);
Assert.assertEquals(readRecord.getRecord().getListField(NAME_KEY), LIST_FIELD);
Assert.assertEquals(readRecord.getRecord().getMapField(NAME_KEY), MAP_FIELD);
_bucketDataAccessor.compressedBucketDelete(PATH);
}
/**
* Write a HelixProperty with large number of entries using BucketDataAccessor and read it back.
*/
@Test(dependsOnMethods = "testCompressedBucketRead")
public void testLargeWriteAndRead() throws IOException {
String name = "largeResourceAssignment";
HelixProperty property = createLargeHelixProperty(name, 100000);
// Perform large write
long before = System.currentTimeMillis();
_bucketDataAccessor.compressedBucketWrite("/" + name, property);
long after = System.currentTimeMillis();
System.out.println("Write took " + (after - before) + " ms");
// Read it back
before = System.currentTimeMillis();
HelixProperty readRecord =
_bucketDataAccessor.compressedBucketRead("/" + name, HelixProperty.class);
after = System.currentTimeMillis();
System.out.println("Read took " + (after - before) + " ms");
// Check against the original HelixProperty
Assert.assertEquals(readRecord, property);
}
private HelixProperty createLargeHelixProperty(String name, int numEntries) {
HelixProperty property = new HelixProperty(name);
for (int i = 0; i < numEntries; i++) {
// Create a random string every time
byte[] arrayKey = new byte[20];
byte[] arrayVal = new byte[20];
new Random().nextBytes(arrayKey);
new Random().nextBytes(arrayVal);
String randomStrKey = new String(arrayKey, StandardCharsets.UTF_8);
String randomStrVal = new String(arrayVal, StandardCharsets.UTF_8);
// Dummy mapField
Map<String, String> mapField = new HashMap<>();
mapField.put(randomStrKey, randomStrVal);
property.getRecord().setMapField(randomStrKey, mapField);
}
return property;
}
}
| 9,797 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestAddBuiltInStateModelDef.java
|
package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.zookeeper.data.Stat;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestAddBuiltInStateModelDef extends ZkUnitTestBase {
@Test
public void test() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName);
admin.addStateModelDef(clusterName, BuiltInStateModelDefinitions.MasterSlave.getStateModelDefinition().getId(),
BuiltInStateModelDefinitions.MasterSlave.getStateModelDefinition());
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName);
controller.syncStart();
// controller shall create all built-in state model definitions
final BaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
final PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName);
boolean ret = TestHelper.verify(new TestHelper.Verifier() {
@Override
public boolean verify() throws Exception {
for (BuiltInStateModelDefinitions def : BuiltInStateModelDefinitions.values()) {
String path = keyBuilder.stateModelDef(def.getStateModelDefinition().getId()).getPath();
boolean exist = baseAccessor.exists(path, 0);
if (!exist) {
return false;
}
// make sure MasterSlave is not over-written
if (def == BuiltInStateModelDefinitions.MasterSlave) {
Stat stat = new Stat();
baseAccessor.get(path, stat, 0);
if (stat.getVersion() != 0) {
return false;
}
}
}
return true;
}
}, 10 * 1000);
Assert.assertTrue(ret);
controller.syncStop();
admin.dropCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,798 |
0 |
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager
|
Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkStateChangeListener.java
|
package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.integration.common.ZkStandAloneCMTestBase;
import org.apache.zookeeper.Watcher.Event.KeeperState;
import org.testng.Assert;
public class TestZkStateChangeListener extends ZkStandAloneCMTestBase {
// TODO this test has been covered by TestZkFlapping. check if still needed
// @Test
public void testDisconnectHistory() throws Exception {
// String controllerName = CONTROLLER_PREFIX + "_0";
// StartCMResult controllerResult = _startCMResultMap.get(controllerName);
// ZKHelixManager controller = (ZKHelixManager) controllerResult._manager;
// ZkStateChangeListener listener1 = new ZkStateChangeListener(controller, 5000, 10);
// ZkStateChangeListener listener1 = new ZkStateChangeListener(_controller, 5000, 10);
// 11 disconnects in 5 sec
for (int i = 0; i < 11; i++) {
Thread.sleep(200);
_controller.handleStateChanged(KeeperState.Disconnected);
if (i < 10) {
Assert.assertTrue(_controller.isConnected());
} else {
Assert.assertFalse(_controller.isConnected());
}
}
// If maxDisconnectThreshold is 0 it should be set to 1
// String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + 0);
// ZKHelixManager manager = (ZKHelixManager) _startCMResultMap.get(instanceName)._manager;
// ZkStateChangeListener listener2 = new ZkStateChangeListener(_participants[0], 5000, 0);
for (int i = 0; i < 2; i++) {
Thread.sleep(200);
_participants[0].handleStateChanged(KeeperState.Disconnected);
if (i < 1) {
Assert.assertTrue(_participants[0].isConnected());
} else {
Assert.assertFalse(_participants[0].isConnected());
}
}
// If there are long time after disconnect, older history should be cleanup
// instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + 1);
// manager = (ZKHelixManager) _startCMResultMap.get(instanceName)._manager;
// ZkStateChangeListener listener3 = new ZkStateChangeListener(_participants[1], 5000, 5);
for (int i = 0; i < 3; i++) {
Thread.sleep(200);
_participants[1].handleStateChanged(KeeperState.Disconnected);
Assert.assertTrue(_participants[1].isConnected());
}
Thread.sleep(5000);
// Old entries should be cleaned up
for (int i = 0; i < 3; i++) {
Thread.sleep(200);
_participants[1].handleStateChanged(KeeperState.Disconnected);
Assert.assertTrue(_participants[1].isConnected());
}
for (int i = 0; i < 2; i++) {
Thread.sleep(200);
_participants[1].handleStateChanged(KeeperState.Disconnected);
Assert.assertTrue(_participants[1].isConnected());
}
_participants[1].handleStateChanged(KeeperState.Disconnected);
Assert.assertFalse(_participants[1].isConnected());
}
}
| 9,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.