index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestJobAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.TestHelper;
import org.apache.helix.rest.server.resources.helix.JobAccessor;
import org.apache.helix.rest.server.resources.helix.WorkflowAccessor;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobQueue;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskPartitionState;
import org.apache.helix.task.TaskUtil;
import org.apache.helix.task.WorkflowConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestJobAccessor extends AbstractTestClass {
private final static String CLUSTER_NAME = TASK_TEST_CLUSTER;
private final static String WORKFLOW_NAME = WORKFLOW_PREFIX + 0;
private final static String TEST_QUEUE_NAME = "TestQueue";
private final static String JOB_NAME = WORKFLOW_NAME + "_" + JOB_PREFIX + 0;
private final static String TEST_JOB_NAME = "TestJob";
private final static String JOB_INPUT =
"{\"id\":\"TestJob\",\"simpleFields\":{\"JobID\":\"Job2\"," + "\"WorkflowID\":\"Workflow1\"},\"mapFields\":{\"Task1\":{\"TASK_ID\":\"Task1\","
+ "\"TASK_COMMAND\":\"Backup\",\"TASK_TARGET_PARTITION\":\"p1\"},\"Task2\":{\"TASK_ID\":"
+ "\"Task2\",\"TASK_COMMAND\":\"ReIndex\"}},\"listFields\":{}}";
@Test
public void testGetJobs() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/workflows/" + WORKFLOW_NAME + "/jobs", null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
String jobsStr = node.get(JobAccessor.JobProperties.Jobs.name()).toString();
Set<String> jobs = OBJECT_MAPPER.readValue(jobsStr,
OBJECT_MAPPER.getTypeFactory().constructCollectionType(Set.class, String.class));
Assert.assertEquals(jobs,
_workflowMap.get(CLUSTER_NAME).get(WORKFLOW_NAME).getWorkflowConfig().getJobDag()
.getAllNodes());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetJobs")
public void testGetJob() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body =
get("clusters/" + CLUSTER_NAME + "/workflows/" + WORKFLOW_NAME + "/jobs/" + JOB_NAME, null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
Assert.assertNotNull(node.get(JobAccessor.JobProperties.JobConfig.name()));
Assert.assertNotNull(node.get(JobAccessor.JobProperties.JobContext.name()));
String workflowId =
node.get(JobAccessor.JobProperties.JobConfig.name()).get("simpleFields").get("WorkflowID")
.textValue();
Assert.assertEquals(workflowId, WORKFLOW_NAME);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetJob")
public void testGetJobConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body =
get("clusters/" + CLUSTER_NAME + "/workflows/" + WORKFLOW_NAME + "/jobs/" + JOB_NAME
+ "/configs", null, Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
String workflowId = node.get("simpleFields").get("WorkflowID").textValue();
Assert.assertEquals(workflowId, WORKFLOW_NAME);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetJobConfig")
public void testGetJobContext() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body =
get("clusters/" + CLUSTER_NAME + "/workflows/" + WORKFLOW_NAME + "/jobs/" + JOB_NAME
+ "/context", null, Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
Assert.assertEquals(node.get("mapFields").get("0").get("STATE").textValue(),
TaskPartitionState.COMPLETED.name());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetJobContext")
public void testCreateJob() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
TaskDriver driver = getTaskDriver(CLUSTER_NAME);
// Create JobQueue
JobQueue.Builder jobQueue = new JobQueue.Builder(TEST_QUEUE_NAME)
.setWorkflowConfig(driver.getWorkflowConfig(WORKFLOW_NAME));
Entity entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(Collections
.singletonMap(WorkflowAccessor.WorkflowProperties.WorkflowConfig.name(),
jobQueue.build().getWorkflowConfig().getRecord().getSimpleFields())),
MediaType.APPLICATION_JSON_TYPE);
put("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_QUEUE_NAME, null, entity,
Response.Status.OK.getStatusCode());
// Test enqueue job
entity = Entity.entity(JOB_INPUT, MediaType.APPLICATION_JSON_TYPE);
put("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_QUEUE_NAME + "/jobs/" + TEST_JOB_NAME,
null, entity, Response.Status.OK.getStatusCode());
String jobName = TaskUtil.getNamespacedJobName(TEST_QUEUE_NAME, TEST_JOB_NAME);
JobConfig jobConfig = driver.getJobConfig(jobName);
Assert.assertNotNull(jobConfig);
WorkflowConfig workflowConfig = driver.getWorkflowConfig(TEST_QUEUE_NAME);
Assert.assertTrue(workflowConfig.getJobDag().getAllNodes().contains(jobName));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testCreateJob")
public void testGetAddJobContent() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String uri = "clusters/" + CLUSTER_NAME + "/workflows/Workflow_0/jobs/JOB0/userContent";
// Empty user content
String body =
get(uri, null, Response.Status.OK.getStatusCode(), true);
Map<String, String> contentStore = OBJECT_MAPPER.readValue(body, new TypeReference<Map<String, String>>() {});
Assert.assertTrue(contentStore.isEmpty());
// Post user content
Map<String, String> map1 = new HashMap<>();
map1.put("k1", "v1");
Entity entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(map1), MediaType.APPLICATION_JSON_TYPE);
post(uri, ImmutableMap.of("command", "update"), entity, Response.Status.OK.getStatusCode());
// update (add items) workflow content store
body = get(uri, null, Response.Status.OK.getStatusCode(), true);
contentStore = OBJECT_MAPPER.readValue(body, new TypeReference<Map<String, String>>() {});
Assert.assertEquals(contentStore, map1);
// modify map1 and verify
map1.put("k1", "v2");
map1.put("k2", "v2");
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(map1), MediaType.APPLICATION_JSON_TYPE);
post(uri, ImmutableMap.of("command", "update"), entity, Response.Status.OK.getStatusCode());
body = get(uri, null, Response.Status.OK.getStatusCode(), true);
contentStore = OBJECT_MAPPER.readValue(body, new TypeReference<Map<String, String>>() {});
Assert.assertEquals(contentStore, map1);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetAddJobContent")
public void testInvalidGetAndUpdateJobContentStore() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String validURI = "clusters/" + CLUSTER_NAME + "/workflows/Workflow_0/jobs/JOB0/userContent";
String invalidURI1 = "clusters/" + CLUSTER_NAME + "/workflows/xxx/jobs/JOB0/userContent"; // workflow not exist
String invalidURI2 = "clusters/" + CLUSTER_NAME + "/workflows/Workflow_0/jobs/xxx/userContent"; // job not exist
Entity validEntity = Entity.entity("{\"k1\":\"v1\"}", MediaType.APPLICATION_JSON_TYPE);
Entity invalidEntity = Entity.entity("{\"k1\":{}}", MediaType.APPLICATION_JSON_TYPE); // not Map<String, String>
Map<String, String> validCmd = ImmutableMap.of("command", "update");
Map<String, String> invalidCmd = ImmutableMap.of("command", "delete"); // cmd not supported
get(invalidURI1, null, Response.Status.NOT_FOUND.getStatusCode(), false);
get(invalidURI2, null, Response.Status.NOT_FOUND.getStatusCode(), false);
// The following two lines should get OK even though they should be NOT FOUND because the client
// side code create UserContent znodes when not found
post(invalidURI1, validCmd, validEntity, Response.Status.OK.getStatusCode());
post(invalidURI2, validCmd, validEntity, Response.Status.OK.getStatusCode());
post(validURI, invalidCmd, validEntity, Response.Status.BAD_REQUEST.getStatusCode());
post(validURI, validCmd, invalidEntity, Response.Status.BAD_REQUEST.getStatusCode());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testInvalidGetAndUpdateJobContentStore")
public void testDeleteJob() throws InterruptedException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
TaskDriver driver = getTaskDriver(CLUSTER_NAME);
driver.waitToStop(TEST_QUEUE_NAME, 5000);
delete("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_QUEUE_NAME + "/jobs/" + TEST_JOB_NAME,
Response.Status.OK.getStatusCode());
String jobName = TaskUtil.getNamespacedJobName(TEST_QUEUE_NAME, TEST_JOB_NAME);
JobConfig jobConfig = driver.getJobConfig(jobName);
Assert.assertNull(jobConfig);
WorkflowConfig workflowConfig = driver.getWorkflowConfig(TEST_QUEUE_NAME);
Assert.assertTrue(!workflowConfig.getJobDag().getAllNodes().contains(jobName));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
}
| 9,300 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestAclRegister.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.helix.TestHelper;
import org.apache.helix.rest.acl.AclRegister;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.authValidator.NoopAuthValidator;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
import static org.mockito.ArgumentMatchers.any;
public class TestAclRegister extends AbstractTestClass {
private String _mockBaseUri;
private CloseableHttpClient _httpClient;
private static String CLASSNAME_TEST_DEFAULT_ACL_REGISTER = "testDefaultAclRegister";
private static String CLASSNAME_TEST_CUSTOM_ACL_REGISTER = "testCustomACLRegister";
@Test
public void testDefaultAclRegister() {
put("clusters/testCluster", null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
TestHelper.dropCluster("testCluster", _gZkClient);
}
@Test(dependsOnMethods = "testDefaultAclRegister")
public void testCustomACLRegister() throws IOException, InterruptedException {
int newPort = getBaseUri().getPort() + 1;
_mockBaseUri = HttpConstants.HTTP_PROTOCOL_PREFIX + getBaseUri().getHost() + ":" + newPort;
_httpClient = HttpClients.createDefault();
AclRegister mockAclRegister = Mockito.mock(AclRegister.class);
Mockito.doThrow(new RuntimeException()).when(mockAclRegister).createACL(any());
List<HelixRestNamespace> namespaces = new ArrayList<>();
namespaces.add(new HelixRestNamespace(HelixRestNamespace.DEFAULT_NAMESPACE_NAME,
HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, ZK_ADDR, true));
// Create a server that passes acl resource creation
HelixRestServer server =
new HelixRestServer(namespaces, newPort, getBaseUri().getPath(), Collections.emptyList(),
new NoopAuthValidator(), new NoopAuthValidator(), mockAclRegister);
server.start();
HttpUriRequest request =
buildRequest("/clusters/testCluster", HttpConstants.RestVerbs.PUT, "");
sendRequestAndValidate(request, Response.Status.INTERNAL_SERVER_ERROR.getStatusCode());
request =
buildRequest("/clusters/testCluster", HttpConstants.RestVerbs.GET, "");
sendRequestAndValidate(request, Response.Status.NOT_FOUND.getStatusCode());
server.shutdown();
_httpClient.close();
}
private HttpUriRequest buildRequest(String urlSuffix, HttpConstants.RestVerbs requestMethod,
String jsonEntity) {
String url = _mockBaseUri + urlSuffix;
switch (requestMethod) {
case PUT:
HttpPut httpPut = new HttpPut(url);
httpPut.setEntity(new StringEntity(jsonEntity, ContentType.APPLICATION_JSON));
return httpPut;
case DELETE:
return new HttpDelete(url);
case GET:
return new HttpGet(url);
default:
throw new IllegalArgumentException("Unsupported requestMethod: " + requestMethod);
}
}
private void sendRequestAndValidate(HttpUriRequest request, int expectedResponseCode)
throws IllegalArgumentException, IOException {
HttpResponse response = _httpClient.execute(request);
Assert.assertEquals(response.getStatusLine().getStatusCode(), expectedResponseCode);
}
}
| 9,301 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestResourceAssignmentOptimizerAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.StringWriter;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.type.TypeReference;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.TestHelper;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.rest.server.resources.helix.ResourceAssignmentOptimizerAccessor;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestResourceAssignmentOptimizerAccessor extends AbstractTestClass {
String cluster = "TestCluster_3";
String instance1 = cluster + "dummyInstance_localhost_12930";
String urlBase = "clusters/TestCluster_3/partitionAssignment/";
String toDeactivatedInstance, toEnabledInstance;
HelixDataAccessor helixDataAccessor;
List<String> resources;
List<String> liveInstances;
@BeforeClass
public void beforeClass() {
helixDataAccessor = new ZKHelixDataAccessor(cluster, _baseAccessor);
_gSetupTool.addInstanceToCluster(cluster, instance1);
resources = _gSetupTool.getClusterManagementTool().getResourcesInCluster(cluster);
liveInstances = helixDataAccessor.getChildNames(helixDataAccessor.keyBuilder().liveInstances());
Assert.assertFalse(resources.isEmpty() || liveInstances.isEmpty());
// set up instances, we need too deactivate one instance
toDeactivatedInstance = liveInstances.get(0);
toEnabledInstance = liveInstances.get(2);
InstanceConfig config = _gSetupTool.getClusterManagementTool()
.getInstanceConfig(cluster, toEnabledInstance);
config.setInstanceEnabled(false);
_gSetupTool.getClusterManagementTool()
.setInstanceConfig(cluster, toEnabledInstance, config);
// set all resource to FULL_AUTO
for (String resource : resources) {
IdealState idealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(cluster, resource);
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
idealState.setDelayRebalanceEnabled(true);
idealState.setRebalanceDelay(360000);
_gSetupTool.getClusterManagementTool().setResourceIdealState(cluster, resource, idealState);
}
}
@AfterClass
public void afterClass() {
for (String resource : resources) {
IdealState idealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(cluster, resource);
idealState.setRebalanceMode(IdealState.RebalanceMode.SEMI_AUTO);
_gSetupTool.getClusterManagementTool().setResourceIdealState(cluster, resource, idealState);
}
InstanceConfig config = _gSetupTool.getClusterManagementTool()
.getInstanceConfig(cluster, toEnabledInstance);
config.setInstanceEnabled(true);
_gSetupTool.getClusterManagementTool().setInstanceConfig(cluster, toEnabledInstance, config);
_gSetupTool.getClusterManagementTool()
.enableMaintenanceMode(cluster, false, TestHelper.getTestMethodName());
}
@Test
public void testComputePartitionAssignment() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Test AddInstances, RemoveInstances and SwapInstances
String payload = "{\"InstanceChange\" : { \"ActivateInstances\" : [\"" + toEnabledInstance + "\"],"
+ "\"DeactivateInstances\" : [ \"" + toDeactivatedInstance + "\"] }} ";
Response response = post(urlBase, null, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode(), true);
Map<String, Map<String, Map<String, String>>> resourceAssignments = OBJECT_MAPPER
.readValue(response.readEntity(String.class),
new TypeReference<HashMap<String, Map<String, Map<String, String>>>>() {
});
Set<String> hostSet = new HashSet<>();
resourceAssignments.forEach((k, v) -> v.forEach((kk, vv) -> hostSet.addAll(vv.keySet())));
resourceAssignments.forEach((k, v) -> v.forEach((kk, vv) -> Assert.assertEquals(vv.size(), 2)));
Assert.assertTrue(hostSet.contains(toEnabledInstance));
Assert.assertFalse(hostSet.contains(toDeactivatedInstance));
// Validate header
MultivaluedMap<String, Object> headers = response.getHeaders();
Assert.assertTrue(headers.containsKey(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY));
Assert.assertFalse(
headers.get(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY).isEmpty());
Assert.assertEquals(headers.get(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY).get(0),
"{instanceFilter=[], resourceFilter=[], returnFormat=IdealStateFormat}");
// Test partitionAssignment InstanceFilter
String payload2 = "{\"Options\" : { \"InstanceFilter\" : [\"" + liveInstances.get(0) + "\" , \""
+ liveInstances.get(1) + "\"] }} ";
Response response2 = post(urlBase, null, Entity.entity(payload2, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode(), true);
String body2 = response2.readEntity(String.class);
Map<String, Map<String, Map<String, String>>> resourceAssignments2 = OBJECT_MAPPER
.readValue(body2, new TypeReference<HashMap<String, Map<String, Map<String, String>>>>() {
});
Set<String> hostSet2 = new HashSet<>();
resourceAssignments2.forEach((k, v) -> v.forEach((kk, vv) -> hostSet2.addAll(vv.keySet())));
Assert.assertEquals(hostSet2.size(), 2);
Assert.assertTrue(hostSet2.contains(liveInstances.get(0)));
Assert.assertTrue(hostSet2.contains(liveInstances.get(1)));
// Validate header
MultivaluedMap<String, Object> headers2 = response2.getHeaders();
Assert
.assertTrue(headers2.containsKey(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY));
List partitionAssignmentMetadata2 =
headers2.get(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY);
Assert.assertFalse(
headers2.get(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY).isEmpty());
Assert.assertTrue(
partitionAssignmentMetadata2.get(0).equals(
"{instanceFilter=[" + liveInstances.get(0) + ", " + liveInstances.get(1)
+ "], resourceFilter=[], returnFormat=IdealStateFormat}") ||
partitionAssignmentMetadata2.get(0).equals(
"{instanceFilter=[" + liveInstances.get(1) + ", " + liveInstances.get(0)
+ "], resourceFilter=[], returnFormat=IdealStateFormat}"),
partitionAssignmentMetadata2.get(0).toString());
// Test partitionAssignment ResourceFilter
String payload3 =
"{\"Options\" : { \"ResourceFilter\" : [\"" + resources.get(0) + "\" , \"" + resources
.get(1) + "\"] }} ";
Response response3 =
post(urlBase, null, Entity.entity(payload3, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode(), true);
String body3 = response3.readEntity(String.class);
Map<String, Map<String, Map<String, String>>> resourceAssignments3 = OBJECT_MAPPER
.readValue(body3, new TypeReference<HashMap<String, Map<String, Map<String, String>>>>() {
});
Assert.assertEquals(resourceAssignments3.size(), 2);
Assert.assertTrue(resourceAssignments3.containsKey(resources.get(0)));
Assert.assertTrue(resourceAssignments3.containsKey(resources.get(1)));
// Validate header
MultivaluedMap<String, Object> headers3 = response3.getHeaders();
Assert
.assertTrue(headers3.containsKey(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY));
List partitionAssignmentMetadata3 =
headers3.get(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY);
Assert.assertFalse(
headers3.get(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY).isEmpty());
Assert.assertTrue(
partitionAssignmentMetadata3.get(0).equals(
"{instanceFilter=[], resourceFilter=[" + resources.get(0) + ", " + resources.get(1)
+ "], returnFormat=IdealStateFormat}") ||
partitionAssignmentMetadata3.get(0).equals(
"{instanceFilter=[], resourceFilter=[" + resources.get(1) + ", " + resources.get(0)
+ "], returnFormat=IdealStateFormat}"),
partitionAssignmentMetadata3.get(0).toString());
// Test Option CurrentState format with AddInstances, RemoveInstances and SwapInstances
String payload4 = "{\"InstanceChange\" : { \"ActivateInstances\" : [\"" + toEnabledInstance
+ "\"], \"DeactivateInstances\" : [ \"" + toDeactivatedInstance + "\"] "
+ "}, \"Options\" : { \"ReturnFormat\" : \"CurrentStateFormat\" , \"ResourceFilter\" : [\""
+ resources.get(0) + "\" , \"" + resources.get(1) + "\"]} } ";
Response response4 =
post(urlBase, null, Entity.entity(payload4, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode(), true);
String body4 = response4.readEntity(String.class);
Map<String, Map<String, Map<String, String>>> resourceAssignments4 = OBJECT_MAPPER
.readValue(body4, new TypeReference<HashMap<String, Map<String, Map<String, String>>>>() {
});
// Validate target resources exist
Set<String> resource4 = new HashSet<>();
resourceAssignments4.forEach((k, v) -> v.forEach((kk, vv) -> resource4.add(kk)));
Assert.assertTrue(resource4.contains(resources.get(0)));
Assert.assertTrue(resource4.contains(resources.get(1)));
// Validate header
MultivaluedMap<String, Object> headers4 = response4.getHeaders();
Assert
.assertTrue(headers4.containsKey(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY));
List partitionAssignmentMetadata4 =
headers4.get(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY);
Assert.assertFalse(
headers4.get(ResourceAssignmentOptimizerAccessor.RESPONSE_HEADER_KEY).isEmpty());
Assert.assertTrue(
partitionAssignmentMetadata4.get(0).equals(
"{instanceFilter=[], resourceFilter=[" + resources.get(0) + ", " + resources.get(1)
+ "], returnFormat=CurrentStateFormat}") ||
partitionAssignmentMetadata4.get(0).equals(
"{instanceFilter=[], resourceFilter=[" + resources.get(1) + ", " + resources.get(0)
+ "], returnFormat=CurrentStateFormat}"),
partitionAssignmentMetadata4.get(0).toString());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testComputePartitionAssignment")
public void testComputePartitionAssignmentWaged() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Use Waged for following tests
for (String resource : resources) {
IdealState idealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(cluster, resource);
idealState.setRebalancerClassName(
"org.apache.helix.controller.rebalancer.waged.WagedRebalancer");
_gSetupTool.getClusterManagementTool().setResourceIdealState(cluster, resource, idealState);
}
// Test AddInstances, RemoveInstances and SwapInstances
String payload = "{\"InstanceChange\" : { \"ActivateInstances\" : [\"" + toEnabledInstance
+ "\"], \"DeactivateInstances\" : [ \"" + toDeactivatedInstance + "\"] }} ";
String body = post(urlBase, null, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode(), true).readEntity(String.class);
Map<String, Map<String, Map<String, String>>> resourceAssignments =
OBJECT_MAPPER.readValue(body,
new TypeReference<HashMap<String, Map<String, Map<String, String>>>>() {
});
Set<String> hostSet = new HashSet<>();
resourceAssignments.forEach((k, v) -> v.forEach((kk, vv) -> hostSet.addAll(vv.keySet())));
// Assert every partition has 2 replicas. Indicating we ignore the delayed rebalance when
// recomputing partition assignment.
resourceAssignments.forEach((k, v) -> v.forEach((kk, vv) -> Assert.assertEquals(vv.size(), 2)));
Assert.assertTrue(hostSet.contains(toEnabledInstance));
Assert.assertFalse(hostSet.contains(toDeactivatedInstance));
// Test InstanceConfig overrides
InstanceConfig toDeactivatedInstanceConfig =
_gSetupTool.getClusterManagementTool().getInstanceConfig(cluster, toDeactivatedInstance);
InstanceConfig toEnabledInstanceConfig =
_gSetupTool.getClusterManagementTool().getInstanceConfig(cluster, toEnabledInstance);
// Another way to mark the node as inactive or active.
toDeactivatedInstanceConfig.setInstanceEnabled(false);
toEnabledInstanceConfig.setInstanceEnabled(true);
// Write the current InstanceConfigs record to json string
StringWriter sw = new StringWriter();
OBJECT_MAPPER.writeValue(sw, toDeactivatedInstanceConfig.getRecord());
String toDeactivatedInstanceConfigStr = sw.toString();
sw = new StringWriter();
OBJECT_MAPPER.writeValue(sw, toEnabledInstanceConfig.getRecord());
String toEnabledInstanceConfigStr = sw.toString();
String payload1 =
"{\"InstanceChange\" : { " + "\"InstanceConfigs\": [" + toDeactivatedInstanceConfigStr + ","
+ toEnabledInstanceConfigStr + "]}}";
String body1 = post(urlBase, null, Entity.entity(payload1, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode(), true).readEntity(String.class);
Map<String, Map<String, Map<String, String>>> resourceAssignments1 =
OBJECT_MAPPER.readValue(body1,
new TypeReference<HashMap<String, Map<String, Map<String, String>>>>() {
});
Set<String> hostSet1 = new HashSet<>();
resourceAssignments1.forEach((k, v) -> v.forEach((kk, vv) -> hostSet1.addAll(vv.keySet())));
// Assert every partition has 2 replicas.
resourceAssignments1.forEach(
(k, v) -> v.forEach((kk, vv) -> Assert.assertEquals(vv.size(), 2)));
Assert.assertTrue(hostSet1.contains(toEnabledInstance));
Assert.assertFalse(hostSet1.contains(toDeactivatedInstance));
// Test partitionAssignment host filter
String payload2 = "{\"Options\" : { \"InstanceFilter\" : [\"" + liveInstances.get(0) + "\" , \""
+ liveInstances.get(1) + "\"] }} ";
String body2 = post(urlBase, null, Entity.entity(payload2, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode(), true).readEntity(String.class);
Map<String, Map<String, Map<String, String>>> resourceAssignments2 =
OBJECT_MAPPER.readValue(body2,
new TypeReference<HashMap<String, Map<String, Map<String, String>>>>() {
});
Set<String> hostSet2 = new HashSet<>();
resourceAssignments2.forEach((k, v) -> v.forEach((kk, vv) -> hostSet2.addAll(vv.keySet())));
Assert.assertEquals(hostSet2.size(), 2);
Assert.assertTrue(hostSet2.contains(liveInstances.get(0)));
Assert.assertTrue(hostSet2.contains(liveInstances.get(1)));
String payload3 =
"{\"Options\" : { \"ResourceFilter\" : [\"" + resources.get(0) + "\" , \"" + resources.get(
1) + "\"] }} ";
String body3 = post(urlBase, null, Entity.entity(payload3, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode(), true).readEntity(String.class);
Map<String, Map<String, Map<String, String>>> resourceAssignments3 = OBJECT_MAPPER
.readValue(body3, new TypeReference<HashMap<String, Map<String, Map<String, String>>>>() {
});
Assert.assertEquals(resourceAssignments3.size(), 2);
Assert.assertTrue(resourceAssignments3.containsKey(resources.get(0)));
Assert.assertTrue(resourceAssignments3.containsKey(resources.get(1)));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testComputePartitionAssignmentWaged")
public void testComputePartitionAssignmentNegativeInput() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Test negative input
String payload4 = "{\"InstanceChange\" : { \"ActivateInstances\" : [\" nonExistInstanceName \"] }} ";
post(urlBase, null, Entity.entity(payload4, MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode(), true);
String payload5 =
"{\"InstanceChange\" : { { \"ActivateInstances\" : [\"" + toDeactivatedInstance
+ "\"], \"DeactivateInstances\" : [\"" + toDeactivatedInstance + "\"] }} ";
post(urlBase, null, Entity.entity(payload5, MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode(), true);
// Currently we do not support maintenance mode
_gSetupTool.getClusterManagementTool()
.enableMaintenanceMode(cluster, true, TestHelper.getTestMethodName());
String payload6 = "{}";
post(urlBase, null, Entity.entity(payload6, MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode(), true);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
}
| 9,302 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestInstancesAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.apache.helix.TestHelper;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.rest.server.resources.helix.InstancesAccessor;
import org.apache.helix.rest.server.util.JerseyUriRequestBuilder;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestInstancesAccessor extends AbstractTestClass {
private final static String CLUSTER_NAME = "TestCluster_0";
@Test
public void testInstanceStoppable_zoneBased_zoneOrder() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Select instances with zone based
String content = String.format(
"{\"%s\":\"%s\",\"%s\":[\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\", \"%s\"], \"%s\":[\"%s\",\"%s\"]}",
InstancesAccessor.InstancesProperties.selection_base.name(),
InstancesAccessor.InstanceHealthSelectionBase.zone_based.name(),
InstancesAccessor.InstancesProperties.instances.name(), "instance0", "instance1",
"instance2", "instance3", "instance4", "instance5", "invalidInstance",
InstancesAccessor.InstancesProperties.zone_order.name(), "zone2", "zone1");
Response response = new JerseyUriRequestBuilder(
"clusters/{}/instances?command=stoppable&skipHealthCheckCategories=CUSTOM_INSTANCE_CHECK,CUSTOM_PARTITION_CHECK").format(
STOPPABLE_CLUSTER).post(this, Entity.entity(content, MediaType.APPLICATION_JSON_TYPE));
JsonNode jsonNode = OBJECT_MAPPER.readTree(response.readEntity(String.class));
Assert.assertFalse(
jsonNode.withArray(InstancesAccessor.InstancesProperties.instance_stoppable_parallel.name())
.elements().hasNext());
JsonNode nonStoppableInstances = jsonNode.get(
InstancesAccessor.InstancesProperties.instance_not_stoppable_with_reasons.name());
Assert.assertEquals(getStringSet(nonStoppableInstances, "instance5"),
ImmutableSet.of("HELIX:EMPTY_RESOURCE_ASSIGNMENT", "HELIX:INSTANCE_NOT_ALIVE",
"HELIX:INSTANCE_NOT_STABLE"));
Assert.assertEquals(getStringSet(nonStoppableInstances, "invalidInstance"),
ImmutableSet.of("HELIX:INSTANCE_NOT_EXIST"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testInstanceStoppable_zoneBased_zoneOrder")
public void testInstancesStoppable_zoneBased() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Select instances with zone based
String content =
String.format("{\"%s\":\"%s\",\"%s\":[\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\", \"%s\"]}",
InstancesAccessor.InstancesProperties.selection_base.name(),
InstancesAccessor.InstanceHealthSelectionBase.zone_based.name(),
InstancesAccessor.InstancesProperties.instances.name(), "instance0", "instance1",
"instance2", "instance3", "instance4", "instance5", "invalidInstance");
Response response =
new JerseyUriRequestBuilder("clusters/{}/instances?command=stoppable").format(
STOPPABLE_CLUSTER).post(this, Entity.entity(content, MediaType.APPLICATION_JSON_TYPE));
JsonNode jsonNode = OBJECT_MAPPER.readTree(response.readEntity(String.class));
Assert.assertFalse(
jsonNode.withArray(InstancesAccessor.InstancesProperties.instance_stoppable_parallel.name())
.elements().hasNext());
JsonNode nonStoppableInstances = jsonNode.get(
InstancesAccessor.InstancesProperties.instance_not_stoppable_with_reasons.name());
Assert.assertEquals(getStringSet(nonStoppableInstances, "instance0"),
ImmutableSet.of("HELIX:MIN_ACTIVE_REPLICA_CHECK_FAILED"));
Assert.assertEquals(getStringSet(nonStoppableInstances, "instance1"),
ImmutableSet.of("HELIX:EMPTY_RESOURCE_ASSIGNMENT", "HELIX:INSTANCE_NOT_ENABLED",
"HELIX:INSTANCE_NOT_STABLE"));
Assert.assertEquals(getStringSet(nonStoppableInstances, "instance2"),
ImmutableSet.of("HELIX:MIN_ACTIVE_REPLICA_CHECK_FAILED"));
Assert.assertEquals(getStringSet(nonStoppableInstances, "instance3"),
ImmutableSet.of("HELIX:HAS_DISABLED_PARTITION", "HELIX:MIN_ACTIVE_REPLICA_CHECK_FAILED"));
Assert.assertEquals(getStringSet(nonStoppableInstances, "instance4"),
ImmutableSet.of("HELIX:EMPTY_RESOURCE_ASSIGNMENT", "HELIX:INSTANCE_NOT_ALIVE",
"HELIX:INSTANCE_NOT_STABLE"));
Assert.assertEquals(getStringSet(nonStoppableInstances, "invalidInstance"), ImmutableSet.of("HELIX:INSTANCE_NOT_EXIST"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testInstancesStoppable_zoneBased")
public void testInstancesStoppable_disableOneInstance() throws IOException {
// Disable one selected instance0, it should failed to check
String instance = "instance0";
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(STOPPABLE_CLUSTER, instance);
instanceConfig.setInstanceEnabled(false);
instanceConfig.setInstanceEnabledForPartition("FakeResource", "FakePartition", false);
_configAccessor.setInstanceConfig(STOPPABLE_CLUSTER, instance, instanceConfig);
// It takes time to reflect the changes.
BestPossibleExternalViewVerifier verifier =
new BestPossibleExternalViewVerifier.Builder(STOPPABLE_CLUSTER).setZkAddr(ZK_ADDR).build();
Assert.assertTrue(verifier.verifyByPolling());
Entity entity = Entity.entity("\"{}\"", MediaType.APPLICATION_JSON_TYPE);
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/stoppable")
.format(STOPPABLE_CLUSTER, instance).post(this, entity);
JsonNode jsonResult = OBJECT_MAPPER.readTree(response.readEntity(String.class));
Assert.assertFalse(jsonResult.get("stoppable").asBoolean());
Assert.assertEquals(getStringSet(jsonResult, "failedChecks"),
ImmutableSet.of("HELIX:HAS_DISABLED_PARTITION","HELIX:INSTANCE_NOT_ENABLED","HELIX:INSTANCE_NOT_STABLE","HELIX:MIN_ACTIVE_REPLICA_CHECK_FAILED"));
// Reenable instance0, it should passed the check
instanceConfig.setInstanceEnabled(true);
instanceConfig.setInstanceEnabledForPartition("FakeResource", "FakePartition", true);
_configAccessor.setInstanceConfig(STOPPABLE_CLUSTER, instance, instanceConfig);
Assert.assertTrue(verifier.verifyByPolling());
entity = Entity.entity("\"{}\"", MediaType.APPLICATION_JSON_TYPE);
response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/stoppable")
.format(STOPPABLE_CLUSTER, instance).post(this, entity);
jsonResult = OBJECT_MAPPER.readTree(response.readEntity(String.class));
Assert.assertFalse(jsonResult.get("stoppable").asBoolean());
Assert.assertEquals(getStringSet(jsonResult, "failedChecks"), ImmutableSet.of("HELIX:MIN_ACTIVE_REPLICA_CHECK_FAILED"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testInstancesStoppable_disableOneInstance")
public void testGetAllInstances() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = new JerseyUriRequestBuilder("clusters/{}/instances").isBodyReturnExpected(true)
.format(CLUSTER_NAME).get(this);
JsonNode node = OBJECT_MAPPER.readTree(body);
String instancesStr =
node.get(InstancesAccessor.InstancesProperties.instances.name()).toString();
Assert.assertNotNull(instancesStr);
Set<String> instances = OBJECT_MAPPER.readValue(instancesStr,
OBJECT_MAPPER.getTypeFactory().constructCollectionType(Set.class, String.class));
Assert.assertEquals(instances, _instancesMap.get(CLUSTER_NAME), "Instances from response: "
+ instances + " vs instances actually: " + _instancesMap.get(CLUSTER_NAME));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(enabled = false)
public void testUpdateInstances() throws IOException {
// TODO: Reenable the test after storage node fix the problem
// Batch disable instances
List<String> instancesToDisable = Arrays.asList(new String[]{
CLUSTER_NAME + "localhost_12918",
CLUSTER_NAME + "localhost_12919", CLUSTER_NAME + "localhost_12920"});
Entity entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(ImmutableMap
.of(InstancesAccessor.InstancesProperties.instances.name(), instancesToDisable)),
MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/instances", ImmutableMap
.of("command", "disable", "instanceDisabledType", "USER_OPERATION",
"instanceDisabledReason", "reason_1"), entity, Response.Status.OK.getStatusCode());
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
Assert.assertEquals(clusterConfig.getDisabledInstances().keySet(),
new HashSet<>(instancesToDisable));
Assert.assertEquals(clusterConfig.getDisabledInstancesWithInfo().keySet(),
new HashSet<>(instancesToDisable));
Assert
.assertEquals(clusterConfig.getInstanceHelixDisabledType(CLUSTER_NAME + "localhost_12918"),
"USER_OPERATION");
Assert.assertEquals(
clusterConfig.getInstanceHelixDisabledReason(CLUSTER_NAME + "localhost_12918"), "reason_1");
instancesToDisable = Arrays
.asList(new String[]{CLUSTER_NAME + "localhost_12918", CLUSTER_NAME + "localhost_12920"});
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(ImmutableMap
.of(InstancesAccessor.InstancesProperties.instances.name(), instancesToDisable)),
MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/instances", ImmutableMap
.of("command", "enable", "instanceDisabledType", "USER_OPERATION", "instanceDisabledReason",
"reason_1"), entity, Response.Status.OK.getStatusCode());
clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
Assert.assertEquals(clusterConfig.getDisabledInstances().keySet(),
new HashSet<>(Arrays.asList(CLUSTER_NAME + "localhost_12919")));
Assert.assertEquals(clusterConfig.getDisabledInstancesWithInfo().keySet(),
new HashSet<>(Arrays.asList(CLUSTER_NAME + "localhost_12919")));
Assert.assertEquals(Long.parseLong(
clusterConfig.getInstanceHelixDisabledTimeStamp(CLUSTER_NAME + "localhost_12919")),
Long.parseLong(clusterConfig.getDisabledInstances().get(CLUSTER_NAME + "localhost_12919")));
Assert
.assertEquals(clusterConfig.getInstanceHelixDisabledType(CLUSTER_NAME + "localhost_12918"),
"INSTANCE_NOT_DISABLED");
Assert
.assertNull(clusterConfig.getInstanceHelixDisabledReason(CLUSTER_NAME + "localhost_12918"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetAllInstances")
public void testValidateWeightForAllInstances() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Empty out ClusterConfig's weight key setting and InstanceConfig's capacity maps for testing
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
clusterConfig.getRecord().setListField(
ClusterConfig.ClusterConfigProperty.INSTANCE_CAPACITY_KEYS.name(), new ArrayList<>());
_configAccessor.setClusterConfig(CLUSTER_NAME, clusterConfig);
List<String> instances =
_gSetupTool.getClusterManagementTool().getInstancesInCluster(CLUSTER_NAME);
for (String instance : instances) {
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, instance);
instanceConfig.setInstanceCapacityMap(Collections.emptyMap());
_configAccessor.setInstanceConfig(CLUSTER_NAME, instance, instanceConfig);
}
// Issue a validate call
String body = new JerseyUriRequestBuilder("clusters/{}/instances?command=validateWeight")
.isBodyReturnExpected(true).format(CLUSTER_NAME).get(this);
JsonNode node = OBJECT_MAPPER.readTree(body);
// Must have the results saying they are all valid (true) because there's no capacity keys set
// in ClusterConfig
node.iterator().forEachRemaining(child -> Assert.assertTrue(child.booleanValue()));
clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
clusterConfig.setInstanceCapacityKeys(Arrays.asList("FOO", "BAR"));
_configAccessor.setClusterConfig(CLUSTER_NAME, clusterConfig);
body = new JerseyUriRequestBuilder("clusters/{}/instances?command=validateWeight")
.isBodyReturnExpected(true).format(CLUSTER_NAME)
.expectedReturnStatusCode(Response.Status.BAD_REQUEST.getStatusCode()).get(this);
node = OBJECT_MAPPER.readTree(body);
// Since instances do not have weight-related configs, the result should return error
Assert.assertTrue(node.has("error"));
// Now set weight-related configs in InstanceConfigs
instances = _gSetupTool.getClusterManagementTool().getInstancesInCluster(CLUSTER_NAME);
for (String instance : instances) {
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, instance);
instanceConfig.setInstanceCapacityMap(ImmutableMap.of("FOO", 1000, "BAR", 1000));
_configAccessor.setInstanceConfig(CLUSTER_NAME, instance, instanceConfig);
}
body = new JerseyUriRequestBuilder("clusters/{}/instances?command=validateWeight")
.isBodyReturnExpected(true).format(CLUSTER_NAME)
.expectedReturnStatusCode(Response.Status.OK.getStatusCode()).get(this);
node = OBJECT_MAPPER.readTree(body);
// Must have the results saying they are all valid (true) because capacity keys are set
// in ClusterConfig
node.iterator().forEachRemaining(child -> Assert.assertTrue(child.booleanValue()));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
private Set<String> getStringSet(JsonNode jsonNode, String key) {
Set<String> result = new HashSet<>();
jsonNode.withArray(key).forEach(s -> result.add(s.textValue()));
return result;
}
}
| 9,303 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestNamespacedAPIAccess.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.PropertyKey;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestNamespacedAPIAccess extends AbstractTestClass {
ObjectMapper _mapper = new ObjectMapper();
@Test
public void testDefaultNamespaceDisabled() {
String testClusterName = "testDefaultNamespaceDisabled";
// "/namespaces/default" is disabled.
get(String.format("/namespaces/%s", HelixRestNamespace.DEFAULT_NAMESPACE_NAME), null, Response.Status.NOT_FOUND.getStatusCode(), false);
// Create a cluster.
put(String.format("/clusters/%s", testClusterName), null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
get(String.format("/clusters/%s", testClusterName), null, Response.Status.OK.getStatusCode(), false);
// Remove empty test cluster. Otherwise, it could fail ClusterAccessor tests
delete(String.format("/clusters/%s", testClusterName), Response.Status.OK.getStatusCode());
}
@Test(dependsOnMethods = "testDefaultNamespaceDisabled")
public void testNamespacedCRUD() throws IOException {
String testClusterName = "testClusterForNamespacedCRUD";
// Create cluster in test namespace and verify it's only appears in test namespace
put(String.format("/namespaces/%s/clusters/%s", TEST_NAMESPACE, testClusterName), null,
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.CREATED.getStatusCode());
get(String.format("/namespaces/%s/clusters/%s", TEST_NAMESPACE, testClusterName), null,
Response.Status.OK.getStatusCode(), false);
get(String.format("/clusters/%s", testClusterName), null, Response.Status.NOT_FOUND.getStatusCode(), false);
// Create a cluster with same name in a different namespace
put(String.format("/clusters/%s", testClusterName), null,
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.CREATED.getStatusCode());
get(String.format("/clusters/%s", testClusterName), null, Response.Status.OK.getStatusCode(), false);
// Modify cluster in default namespace
post(String.format("/clusters/%s", testClusterName), ImmutableMap.of("command", "disable"),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
// Verify the cluster in default namespace is modified, while the one in test namespace is not.
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(testClusterName);
Assert.assertTrue(_baseAccessor.exists(keyBuilder.pause().getPath(), 0));
Assert.assertFalse(_baseAccessorTestNS.exists(keyBuilder.pause().getPath(), 0));
// Verify that deleting cluster in one namespace will not affect the other
delete(String.format("/namespaces/%s/clusters/%s", TEST_NAMESPACE, testClusterName),
Response.Status.OK.getStatusCode());
get(String.format("/namespaces/%s/clusters/%s", TEST_NAMESPACE, testClusterName), null,
Response.Status.NOT_FOUND.getStatusCode(), false);
get(String.format("/clusters/%s", testClusterName), null, Response.Status.OK.getStatusCode(), false);
// Remove empty test clusters. Otherwise, it could fail ClusterAccessor tests
delete(String.format("/clusters/%s", testClusterName), Response.Status.OK.getStatusCode());
}
@Test(dependsOnMethods = "testNamespacedCRUD")
public void testNamespaceServer() throws IOException {
// Default endpoints should not have any namespace information returned
get("/", null, Response.Status.NOT_FOUND.getStatusCode(), false);
// Get invalid namespace should return not found
get("/namespaces/invalid-namespace", null, Response.Status.NOT_FOUND.getStatusCode(), false);
// list namespace should return a list of all namespaces
String body = get("/namespaces", null, Response.Status.OK.getStatusCode(), true);
List<Map<String, String>> namespaceMaps = _mapper
.readValue(body, _mapper.getTypeFactory().constructCollectionType(List.class, Map.class));
Assert.assertEquals(namespaceMaps.size(), 2);
Set<String> expectedNamespaceNames = new HashSet<>();
expectedNamespaceNames.add(HelixRestNamespace.DEFAULT_NAMESPACE_NAME);
expectedNamespaceNames.add(TEST_NAMESPACE);
for (Map<String, String> namespaceMap : namespaceMaps) {
String name = namespaceMap.get(HelixRestNamespace.HelixRestNamespaceProperty.NAME.name());
boolean isDefault = Boolean.parseBoolean(
namespaceMap.get(HelixRestNamespace.HelixRestNamespaceProperty.IS_DEFAULT.name()));
switch (name) {
case HelixRestNamespace.DEFAULT_NAMESPACE_NAME:
Assert.assertTrue(isDefault);
break;
case TEST_NAMESPACE:
Assert.assertFalse(isDefault);
break;
default:
Assert.assertFalse(true, "Namespace " + name + " is not expected");
break;
}
expectedNamespaceNames.remove(name);
}
Assert.assertTrue(expectedNamespaceNames.isEmpty());
// "/namespaces/default" is disabled.
get(String.format("/namespaces/%s", HelixRestNamespace.DEFAULT_NAMESPACE_NAME), null,
Response.Status.NOT_FOUND.getStatusCode(), false);
}
}
| 9,304 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestOperationImpl.java
|
package org.apache.helix.rest.server;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.rest.clusterMaintenanceService.MaintenanceManagementInstanceInfo;
import org.apache.helix.rest.clusterMaintenanceService.api.OperationInterface;
import org.apache.helix.rest.common.RestSnapShotSimpleImpl;
import org.apache.helix.rest.common.datamodel.RestSnapShot;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.util.InstanceValidationUtil;
public class TestOperationImpl implements OperationInterface {
@Override
public MaintenanceManagementInstanceInfo operationCheckForTakeSingleInstance(String instanceName, Map<String, String> operationConfig, RestSnapShot sn) {
Map<String, Boolean> isInstanceOnHoldCache = new HashMap<>();
for (Map.Entry<String, String> entry : operationConfig.entrySet()) {
isInstanceOnHoldCache.put(entry.getKey(), Boolean.parseBoolean(entry.getValue()));
}
try {
String unHealthyPartition =
siblingNodesActiveReplicaCheck(sn, instanceName, isInstanceOnHoldCache);
if (unHealthyPartition == null) {
return new MaintenanceManagementInstanceInfo(
MaintenanceManagementInstanceInfo.OperationalStatus.SUCCESS);
} else {
return new MaintenanceManagementInstanceInfo(
MaintenanceManagementInstanceInfo.OperationalStatus.FAILURE,
Collections.singletonList(unHealthyPartition));
}
} catch (Exception ex) {
return new MaintenanceManagementInstanceInfo(
MaintenanceManagementInstanceInfo.OperationalStatus.FAILURE,
Collections.singletonList(ex.getMessage()));
}
}
@Override
public MaintenanceManagementInstanceInfo operationCheckForFreeSingleInstance(String instanceName, Map<String, String> operationConfig, RestSnapShot sn) {
return null;
}
@Override
public Map<String, MaintenanceManagementInstanceInfo> operationCheckForTakeInstances(Collection<String> instances, Map<String, String> operationConfig, RestSnapShot sn) {
return null;
}
@Override
public Map<String, MaintenanceManagementInstanceInfo> operationCheckForFreeInstances(Collection<String> instances, Map<String, String> operationConfig, RestSnapShot sn) {
return null;
}
@Override
public MaintenanceManagementInstanceInfo operationExecForTakeSingleInstance(String instanceName,
Map<String, String> operationConfig, RestSnapShot sn) {
return new MaintenanceManagementInstanceInfo(
MaintenanceManagementInstanceInfo.OperationalStatus.SUCCESS,
"DummyTakeOperationResult");
}
@Override
public MaintenanceManagementInstanceInfo operationExecForFreeSingleInstance(String instanceName,
Map<String, String> operationConfig, RestSnapShot sn) {
return new MaintenanceManagementInstanceInfo(
MaintenanceManagementInstanceInfo.OperationalStatus.SUCCESS,
"DummyFreeOperationResult");
}
@Override
public Map<String, MaintenanceManagementInstanceInfo> operationExecForTakeInstances(Collection<String> instances, Map<String, String> operationConfig, RestSnapShot sn) {
return null;
}
@Override
public Map<String, MaintenanceManagementInstanceInfo> operationExecForFreeInstances(Collection<String> instances, Map<String, String> operationConfig, RestSnapShot sn) {
return null;
}
public String siblingNodesActiveReplicaCheck(RestSnapShot snapShot, String instanceName,
Map<String, Boolean> isInstanceOnHoldCache) throws HelixException {
String clusterName = snapShot.getClusterName();
if (!(snapShot instanceof RestSnapShotSimpleImpl)) {
throw new HelixException("Passed in Snapshot is not an instance of RestSnapShotSimpleImpl");
} RestSnapShotSimpleImpl restSnapShotSimple = (RestSnapShotSimpleImpl) snapShot;
PropertyKey.Builder propertyKeyBuilder = new PropertyKey.Builder(clusterName);
List<String> resources = restSnapShotSimple.getChildNames(propertyKeyBuilder.idealStates());
for (String resourceName : resources) {
IdealState idealState =
restSnapShotSimple.getProperty(propertyKeyBuilder.idealStates(resourceName));
if (idealState == null || !idealState.isEnabled() || !idealState.isValid()
|| TaskConstants.STATE_MODEL_NAME.equals(idealState.getStateModelDefRef())) {
continue;
}
ExternalView externalView =
restSnapShotSimple.getProperty(propertyKeyBuilder.externalView(resourceName));
if (externalView == null) {
throw new HelixException(
String.format("Resource %s does not have external view!", resourceName));
}
// Get the minActiveReplicas constraint for the resource
int minActiveReplicas = externalView.getMinActiveReplicas();
if (minActiveReplicas == -1) {
continue;
}
String stateModeDef = externalView.getStateModelDefRef();
StateModelDefinition stateModelDefinition =
restSnapShotSimple.getProperty(propertyKeyBuilder.stateModelDef(stateModeDef));
Set<String> unhealthyStates = new HashSet<>(InstanceValidationUtil.UNHEALTHY_STATES);
if (stateModelDefinition != null) {
unhealthyStates.add(stateModelDefinition.getInitialState());
}
for (String partition : externalView.getPartitionSet()) {
Map<String, String> stateByInstanceMap = externalView.getStateMap(partition);
// found the resource hosted on the instance
if (stateByInstanceMap.containsKey(instanceName)) {
int numHealthySiblings = 0;
for (Map.Entry<String, String> entry : stateByInstanceMap.entrySet()) {
if (!entry.getKey().equals(instanceName) && !unhealthyStates.contains(entry.getValue())
&& !isInstanceOnHoldCache.get(entry.getKey())) {
numHealthySiblings++;
}
}
if (numHealthySiblings < minActiveReplicas) {
return partition;
}
}
}
}
return null;
}
}
| 9,305 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestHelixRestServer.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.helix.HelixException;
import org.apache.helix.TestHelper;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.server.auditlog.AuditLogger;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestHelixRestServer extends AbstractTestClass {
@Test
public void testInvalidHelixRestServerInitialization() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Namespace manifests has invalid metadata store type should generate failure
try {
List<HelixRestNamespace> invalidManifest1 = new ArrayList<>();
invalidManifest1.add(
new HelixRestNamespace("test1", HelixRestNamespace.HelixMetadataStoreType.valueOf("InvalidMetadataStore"),
ZK_ADDR, false));
HelixRestServer svr = new HelixRestServer(invalidManifest1, 10250, "/", Collections.<AuditLogger>emptyList());
Assert.assertFalse(true, "InvalidManifest1 test failed");
} catch (IllegalArgumentException e) {
// OK
}
// Namespace manifests has invalid namespace name shall generate failure
try {
List<HelixRestNamespace> invalidManifest2 = new ArrayList<>();
invalidManifest2.add(
new HelixRestNamespace("", HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, ZK_ADDR, true));
HelixRestServer svr = new HelixRestServer(invalidManifest2, 10250, "/", Collections.<AuditLogger>emptyList());
Assert.assertFalse(true, "InvalidManifest2 test failed");
} catch (IllegalArgumentException e) {
// OK
}
// Duplicated namespace shall cause exception
try {
List<HelixRestNamespace> invalidManifest3 = new ArrayList<>();
invalidManifest3.add(
new HelixRestNamespace("DuplicatedName", HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, ZK_ADDR, false));
invalidManifest3.add(
new HelixRestNamespace("DuplicatedName", HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, ZK_ADDR,
false));
HelixRestServer svr = new HelixRestServer(invalidManifest3, 10250, "/", Collections.<AuditLogger>emptyList());
Assert.assertFalse(true, "InvalidManifest3 test failed");
} catch (IllegalArgumentException e) {
// OK
}
}
@Test(dependsOnMethods = "testInvalidHelixRestServerInitialization",
expectedExceptions = {IllegalStateException.class, HelixException.class},
expectedExceptionsMessageRegExp = ".*Multiple servlets map to path.*")
public void testDefaultNamespaceFail() throws InterruptedException {
// More than 1 default namespace shall cause failure
List<HelixRestNamespace> invalidManifest4 = new ArrayList<>();
invalidManifest4.add(
new HelixRestNamespace("test4-1", HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, ZK_ADDR, true));
invalidManifest4.add(
new HelixRestNamespace("test4-2", HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, ZK_ADDR, true));
HelixRestServer svr = new HelixRestServer(invalidManifest4, 10250, "/", Collections.<AuditLogger>emptyList());
svr.start();
System.out.println("End test :" + TestHelper.getTestMethodName());
}
}
| 9,306 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestResourceAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.controller.rebalancer.waged.WagedRebalancer;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CustomizedView;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.builder.FullAutoModeISBuilder;
import org.apache.helix.rest.server.resources.helix.ResourceAccessor;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResourceAccessor extends AbstractTestClass {
private final static String CLUSTER_NAME = "TestCluster_0";
private final static String RESOURCE_NAME = CLUSTER_NAME + "_db_0";
private final static String ANY_INSTANCE = "ANY_LIVEINSTANCE";
private final static String CUSTOMIZED_STATE_TYPE = "Customized_state_type_0";
@Test
public void testGetResources() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/resources", null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
String idealStates =
node.get(ResourceAccessor.ResourceProperties.idealStates.name()).toString();
Assert.assertNotNull(idealStates);
Set<String> resources = OBJECT_MAPPER.readValue(idealStates,
OBJECT_MAPPER.getTypeFactory().constructCollectionType(Set.class, String.class));
Assert.assertEquals(resources, _resourcesMap.get("TestCluster_0"), "Resources from response: "
+ resources + " vs clusters actually: " + _resourcesMap.get("TestCluster_0"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetResources")
public void testGetResource() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME, null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
String idealStateStr =
node.get(ResourceAccessor.ResourceProperties.idealState.name()).toString();
IdealState idealState = new IdealState(toZNRecord(idealStateStr));
IdealState originIdealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, RESOURCE_NAME);
Assert.assertEquals(idealState, originIdealState);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetResource")
public void testAddResources() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String newResourceName = "newResource";
IdealState idealState = new IdealState(newResourceName);
idealState.getRecord().getSimpleFields().putAll(_gSetupTool.getClusterManagementTool()
.getResourceIdealState(CLUSTER_NAME, RESOURCE_NAME).getRecord().getSimpleFields());
// Add resource by IdealState
Entity entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(idealState.getRecord()),
MediaType.APPLICATION_JSON_TYPE);
put("clusters/" + CLUSTER_NAME + "/resources/" + newResourceName, null, entity,
Response.Status.OK.getStatusCode());
Assert.assertEquals(idealState, _gSetupTool.getClusterManagementTool()
.getResourceIdealState(CLUSTER_NAME, newResourceName));
// Add resource by query param
entity = Entity.entity("", MediaType.APPLICATION_JSON_TYPE);
put("clusters/" + CLUSTER_NAME + "/resources/" + newResourceName + "0", ImmutableMap
.of("numPartitions", "4", "stateModelRef", "OnlineOffline", "rebalancerMode", "FULL_AUTO"),
entity, Response.Status.OK.getStatusCode());
IdealState queryIdealState = new FullAutoModeISBuilder(newResourceName + 0).setNumPartitions(4)
.setStateModel("OnlineOffline").setRebalancerMode(IdealState.RebalanceMode.FULL_AUTO)
.setRebalanceStrategy("DEFAULT").build();
Assert.assertEquals(queryIdealState, _gSetupTool.getClusterManagementTool()
.getResourceIdealState(CLUSTER_NAME, newResourceName + "0"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testAddResources")
public void testResourceConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/configs", null,
Response.Status.OK.getStatusCode(), true);
ResourceConfig resourceConfig = new ResourceConfig(toZNRecord(body));
Assert.assertEquals(resourceConfig,
_configAccessor.getResourceConfig(CLUSTER_NAME, RESOURCE_NAME));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testResourceConfig")
public void testIdealState() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/idealState",
null, Response.Status.OK.getStatusCode(), true);
IdealState idealState = new IdealState(toZNRecord(body));
Assert.assertEquals(idealState,
_gSetupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, RESOURCE_NAME));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testIdealState")
public void testExternalView() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/externalView",
null, Response.Status.OK.getStatusCode(), true);
ExternalView externalView = new ExternalView(toZNRecord(body));
Assert.assertEquals(externalView, _gSetupTool.getClusterManagementTool()
.getResourceExternalView(CLUSTER_NAME, RESOURCE_NAME));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testExternalView")
public void testCustomizedView() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
ZNRecord znRecord = new ZNRecord("test_customizedView");
_baseAccessor
.set(PropertyPathBuilder.customizedView(CLUSTER_NAME, CUSTOMIZED_STATE_TYPE, RESOURCE_NAME),
znRecord, 1);
String body =
get("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/" + CUSTOMIZED_STATE_TYPE
+ "/customizedView", null, Response.Status.OK.getStatusCode(), true);
CustomizedView customizedView = new CustomizedView(toZNRecord(body));
Assert.assertEquals(customizedView, _gSetupTool.getClusterManagementTool()
.getResourceCustomizedView(CLUSTER_NAME, RESOURCE_NAME, CUSTOMIZED_STATE_TYPE));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testExternalView")
public void testPartitionHealth() throws Exception {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String clusterName = "TestCluster_1";
String resourceName = clusterName + "_db_0";
// Disable the cluster to prevent external view from being removed
_gSetupTool.getClusterManagementTool().enableCluster(clusterName, false);
// Use mock numbers for testing
Map<String, String> idealStateParams = new HashMap<>();
idealStateParams.put("MinActiveReplicas", "2");
idealStateParams.put("StateModelDefRef", "MasterSlave");
idealStateParams.put("MaxPartitionsPerInstance", "3");
idealStateParams.put("Replicas", "3");
idealStateParams.put("NumPartitions", "3");
// Create a mock state mapping for testing
Map<String, List<String>> partitionReplicaStates = new LinkedHashMap<>();
String[] p0 = {
"MASTER", "SLAVE", "SLAVE"
};
String[] p1 = {
"MASTER", "SLAVE", "ERROR"
};
String[] p2 = {
"ERROR", "SLAVE", "SLAVE"
};
partitionReplicaStates.put("p0", Arrays.asList(p0));
partitionReplicaStates.put("p1", Arrays.asList(p1));
partitionReplicaStates.put("p2", Arrays.asList(p2));
createDummyMapping(clusterName, resourceName, idealStateParams, partitionReplicaStates);
// Get the result of getPartitionHealth
String body = get("clusters/" + clusterName + "/resources/" + resourceName + "/health", null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
Map<String, String> healthStatus =
OBJECT_MAPPER.convertValue(node, new TypeReference<Map<String, String>>() {
});
Assert.assertEquals(healthStatus.get("p0"), "HEALTHY");
Assert.assertEquals(healthStatus.get("p1"), "PARTIAL_HEALTHY");
Assert.assertEquals(healthStatus.get("p2"), "UNHEALTHY");
System.out.println("End test :" + TestHelper.getTestMethodName());
// Re-enable the cluster
_gSetupTool.getClusterManagementTool().enableCluster(clusterName, true);
}
@Test(dependsOnMethods = "testPartitionHealth")
public void testResourceHealth() throws Exception {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String clusterName = "TestCluster_1";
Map<String, String> idealStateParams = new HashMap<>();
idealStateParams.put("MinActiveReplicas", "2");
idealStateParams.put("StateModelDefRef", "MasterSlave");
idealStateParams.put("MaxPartitionsPerInstance", "3");
idealStateParams.put("Replicas", "3");
idealStateParams.put("NumPartitions", "3");
// Disable the cluster to prevent external view from being removed
_gSetupTool.getClusterManagementTool().enableCluster(clusterName, false);
// Create a healthy resource
String resourceNameHealthy = clusterName + "_db_0";
Map<String, List<String>> partitionReplicaStates = new LinkedHashMap<>();
String[] p0 = {
"MASTER", "SLAVE", "SLAVE"
};
String[] p1 = {
"MASTER", "SLAVE", "SLAVE"
};
String[] p2 = {
"MASTER", "SLAVE", "SLAVE"
};
partitionReplicaStates.put("p0", Arrays.asList(p0));
partitionReplicaStates.put("p1", Arrays.asList(p1));
partitionReplicaStates.put("p2", Arrays.asList(p2));
createDummyMapping(clusterName, resourceNameHealthy, idealStateParams, partitionReplicaStates);
// Create a partially healthy resource
String resourceNamePartiallyHealthy = clusterName + "_db_1";
Map<String, List<String>> partitionReplicaStates_1 = new LinkedHashMap<>();
String[] p0_1 = {
"MASTER", "SLAVE", "SLAVE"
};
String[] p1_1 = {
"MASTER", "SLAVE", "SLAVE"
};
String[] p2_1 = {
"MASTER", "SLAVE", "ERROR"
};
partitionReplicaStates_1.put("p0", Arrays.asList(p0_1));
partitionReplicaStates_1.put("p1", Arrays.asList(p1_1));
partitionReplicaStates_1.put("p2", Arrays.asList(p2_1));
createDummyMapping(clusterName, resourceNamePartiallyHealthy, idealStateParams,
partitionReplicaStates_1);
// Create a partially healthy resource
String resourceNameUnhealthy = clusterName + "_db_2";
Map<String, List<String>> partitionReplicaStates_2 = new LinkedHashMap<>();
String[] p0_2 = {
"MASTER", "SLAVE", "SLAVE"
};
String[] p1_2 = {
"MASTER", "SLAVE", "SLAVE"
};
String[] p2_2 = {
"ERROR", "SLAVE", "ERROR"
};
partitionReplicaStates_2.put("p0", Arrays.asList(p0_2));
partitionReplicaStates_2.put("p1", Arrays.asList(p1_2));
partitionReplicaStates_2.put("p2", Arrays.asList(p2_2));
createDummyMapping(clusterName, resourceNameUnhealthy, idealStateParams,
partitionReplicaStates_2);
// Get the result of getResourceHealth
String body = get("clusters/" + clusterName + "/resources/health", null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
Map<String, String> healthStatus =
OBJECT_MAPPER.convertValue(node, new TypeReference<Map<String, String>>() {
});
Assert.assertEquals(healthStatus.get(resourceNameHealthy), "HEALTHY");
Assert.assertEquals(healthStatus.get(resourceNamePartiallyHealthy), "PARTIAL_HEALTHY");
Assert.assertEquals(healthStatus.get(resourceNameUnhealthy), "UNHEALTHY");
System.out.println("End test :" + TestHelper.getTestMethodName());
// Re-enable the cluster
_gSetupTool.getClusterManagementTool().enableCluster(clusterName, true);
}
/**
* Test "update" command of updateResourceConfig.
* @throws Exception
*/
@Test(dependsOnMethods = "testResourceHealth")
public void updateResourceConfig() throws Exception {
// Get ResourceConfig
ResourceConfig resourceConfig = _configAccessor.getResourceConfig(CLUSTER_NAME, RESOURCE_NAME);
ZNRecord record = resourceConfig.getRecord();
// Generate a record containing three keys (k0, k1, k2) for all fields
String value = "RESOURCE_TEST";
for (int i = 0; i < 3; i++) {
String key = "k" + i;
record.getSimpleFields().put(key, value);
record.getMapFields().put(key, ImmutableMap.of(key, value));
record.getListFields().put(key, Arrays.asList(key, value));
}
// 1. Add these fields by way of "update"
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/configs",
Collections.singletonMap("command", "update"), entity, Response.Status.OK.getStatusCode());
// Check that the fields have been added
ResourceConfig updatedConfig = _configAccessor.getResourceConfig(CLUSTER_NAME, RESOURCE_NAME);
Assert.assertEquals(record.getSimpleFields(), updatedConfig.getRecord().getSimpleFields());
Assert.assertEquals(record.getListFields(), updatedConfig.getRecord().getListFields());
Assert.assertEquals(record.getMapFields(), updatedConfig.getRecord().getMapFields());
String newValue = "newValue";
// 2. Modify the record and update
for (int i = 0; i < 3; i++) {
String key = "k" + i;
record.getSimpleFields().put(key, newValue);
record.getMapFields().put(key, ImmutableMap.of(key, newValue));
record.getListFields().put(key, Arrays.asList(key, newValue));
}
entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/configs",
Collections.singletonMap("command", "update"), entity, Response.Status.OK.getStatusCode());
updatedConfig = _configAccessor.getResourceConfig(CLUSTER_NAME, RESOURCE_NAME);
// Check that the fields have been modified
Assert.assertEquals(record.getSimpleFields(), updatedConfig.getRecord().getSimpleFields());
Assert.assertEquals(record.getListFields(), updatedConfig.getRecord().getListFields());
Assert.assertEquals(record.getMapFields(), updatedConfig.getRecord().getMapFields());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Test "delete" command of updateResourceConfig.
* @throws Exception
*/
@Test(dependsOnMethods = "updateResourceConfig")
public void updateResourceConfigIDMissing() throws Exception {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// An invalid input which does not have any ID
String dummyInput = "{\"simpleFields\":{}}";
String dummyResourceName = "RESOURCE_TEST_DUMMY";
// Update the config with dummy input
Entity entity = Entity.entity(dummyInput, MediaType.APPLICATION_JSON_TYPE);
// As id field is missing, the response of the post request should be BAD_REQUEST
post("clusters/" + CLUSTER_NAME + "/resources/" + dummyResourceName + "/configs", null, entity,
Response.Status.BAD_REQUEST.getStatusCode());
ResourceConfig resourceConfig =
_configAccessor.getResourceConfig(CLUSTER_NAME, dummyResourceName);
// Since the id is missing in the input, the znode should not get created.
Assert.assertNull(resourceConfig);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Test "delete" command of updateResourceConfig.
* @throws Exception
*/
@Test(dependsOnMethods = "updateResourceConfigIDMissing")
public void deleteFromResourceConfig() throws Exception {
ZNRecord record = new ZNRecord(RESOURCE_NAME);
// Generate a record containing three keys (k1, k2, k3) for all fields for deletion
String value = "value";
for (int i = 1; i < 4; i++) {
String key = "k" + i;
record.getSimpleFields().put(key, value);
record.getMapFields().put(key, ImmutableMap.of(key, value));
record.getListFields().put(key, Arrays.asList(key, value));
}
// First, add these fields by way of "update"
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/configs",
Collections.singletonMap("command", "delete"), entity, Response.Status.OK.getStatusCode());
ResourceConfig configAfterDelete =
_configAccessor.getResourceConfig(CLUSTER_NAME, RESOURCE_NAME);
// Check that the keys k1 and k2 have been deleted, and k0 remains
for (int i = 0; i < 4; i++) {
String key = "k" + i;
if (i == 0) {
Assert.assertTrue(configAfterDelete.getRecord().getSimpleFields().containsKey(key));
Assert.assertTrue(configAfterDelete.getRecord().getListFields().containsKey(key));
Assert.assertTrue(configAfterDelete.getRecord().getMapFields().containsKey(key));
continue;
}
Assert.assertFalse(configAfterDelete.getRecord().getSimpleFields().containsKey(key));
Assert.assertFalse(configAfterDelete.getRecord().getListFields().containsKey(key));
Assert.assertFalse(configAfterDelete.getRecord().getMapFields().containsKey(key));
}
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Test "update" command of updateResourceIdealState.
* @throws Exception
*/
@Test(dependsOnMethods = "deleteFromResourceConfig")
public void updateResourceIdealState() throws Exception {
// Get IdealState ZNode
String zkPath = PropertyPathBuilder.idealState(CLUSTER_NAME, RESOURCE_NAME);
ZNRecord record = _baseAccessor.get(zkPath, null, AccessOption.PERSISTENT);
// 1. Add these fields by way of "update"
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/idealState",
Collections.singletonMap("command", "update"), entity, Response.Status.OK.getStatusCode());
// Check that the fields have been added
ZNRecord newRecord = _baseAccessor.get(zkPath, null, AccessOption.PERSISTENT);
Assert.assertEquals(record.getSimpleFields(), newRecord.getSimpleFields());
Assert.assertEquals(record.getListFields(), newRecord.getListFields());
Assert.assertEquals(record.getMapFields(), newRecord.getMapFields());
String newValue = "newValue";
// 2. Modify the record and update
for (int i = 0; i < 3; i++) {
String key = "k" + i;
record.getSimpleFields().put(key, newValue);
record.getMapFields().put(key, ImmutableMap.of(key, newValue));
record.getListFields().put(key, Arrays.asList(key, newValue));
}
entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/idealState",
Collections.singletonMap("command", "update"), entity, Response.Status.OK.getStatusCode());
// Check that the fields have been modified
newRecord = _baseAccessor.get(zkPath, null, AccessOption.PERSISTENT);
Assert.assertEquals(record.getSimpleFields(), newRecord.getSimpleFields());
Assert.assertEquals(record.getListFields(), newRecord.getListFields());
Assert.assertEquals(record.getMapFields(), newRecord.getMapFields());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Test "enableWagedRebalance" command of updateResource.
*/
@Test(dependsOnMethods = "updateResourceIdealState")
public void testEnableWagedRebalance() {
IdealState idealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, RESOURCE_NAME);
Assert.assertNotSame(idealState.getRebalancerClassName(), WagedRebalancer.class.getName());
// Enable waged rebalance, which should change the rebalancer class name
Entity entity = Entity.entity(null, MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME,
Collections.singletonMap("command", "enableWagedRebalance"), entity,
Response.Status.OK.getStatusCode());
idealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, RESOURCE_NAME);
Assert.assertEquals(idealState.getRebalancerClassName(), WagedRebalancer.class.getName());
}
/**
* Test "delete" command of updateResourceIdealState.
* @throws Exception
*/
@Test(dependsOnMethods = "testEnableWagedRebalance")
public void deleteFromResourceIdealState() throws Exception {
String zkPath = PropertyPathBuilder.idealState(CLUSTER_NAME, RESOURCE_NAME);
ZNRecord record = new ZNRecord(RESOURCE_NAME);
// Generate a record containing three keys (k1, k2, k3) for all fields for deletion
String value = "value";
for (int i = 1; i < 4; i++) {
String key = "k" + i;
record.getSimpleFields().put(key, value);
record.getMapFields().put(key, ImmutableMap.of(key, value));
record.getListFields().put(key, Arrays.asList(key, value));
}
// First, add these fields by way of "update"
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/resources/" + RESOURCE_NAME + "/idealState",
Collections.singletonMap("command", "delete"), entity, Response.Status.OK.getStatusCode());
ZNRecord recordAfterDelete = _baseAccessor.get(zkPath, null, AccessOption.PERSISTENT);
// Check that the keys k1 and k2 have been deleted, and k0 remains
for (int i = 0; i < 4; i++) {
String key = "k" + i;
if (i == 0) {
Assert.assertTrue(recordAfterDelete.getSimpleFields().containsKey(key));
Assert.assertTrue(recordAfterDelete.getListFields().containsKey(key));
Assert.assertTrue(recordAfterDelete.getMapFields().containsKey(key));
continue;
}
Assert.assertFalse(recordAfterDelete.getSimpleFields().containsKey(key));
Assert.assertFalse(recordAfterDelete.getListFields().containsKey(key));
Assert.assertFalse(recordAfterDelete.getMapFields().containsKey(key));
}
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "deleteFromResourceIdealState")
public void testAddResourceWithWeight() throws IOException {
// Test case 1: Add a valid resource with valid weights
// Create a resource with IdealState and ResourceConfig
String wagedResourceName = "newWagedResource";
// Create an IdealState on full-auto with 1 partition
IdealState idealState = new IdealState(wagedResourceName);
idealState.getRecord().getSimpleFields().putAll(_gSetupTool.getClusterManagementTool()
.getResourceIdealState(CLUSTER_NAME, RESOURCE_NAME).getRecord().getSimpleFields());
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
idealState.setRebalancerClassName(WagedRebalancer.class.getName());
idealState.setNumPartitions(1); // 1 partition for convenience of testing
// Create a ResourceConfig with FOO and BAR at 100 respectively
ResourceConfig resourceConfig = new ResourceConfig(wagedResourceName);
Map<String, Map<String, Integer>> partitionCapacityMap = new HashMap<>();
Map<String, Integer> partitionCapacity = ImmutableMap.of("FOO", 100, "BAR", 100);
partitionCapacityMap.put(wagedResourceName + "_0", partitionCapacity);
// Also add a default key
partitionCapacityMap.put(ResourceConfig.DEFAULT_PARTITION_KEY, partitionCapacity);
resourceConfig.setPartitionCapacityMap(partitionCapacityMap);
// Put both IdealState and ResourceConfig into a map as required
Map<String, ZNRecord> inputMap = ImmutableMap.of(
ResourceAccessor.ResourceProperties.idealState.name(), idealState.getRecord(),
ResourceAccessor.ResourceProperties.resourceConfig.name(), resourceConfig.getRecord());
// Create an entity using the inputMap
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(inputMap), MediaType.APPLICATION_JSON_TYPE);
// Make a HTTP call to the REST endpoint
put("clusters/" + CLUSTER_NAME + "/resources/" + wagedResourceName,
ImmutableMap.of("command", "addWagedResource"), entity, Response.Status.OK.getStatusCode());
// Test case 2: Add a resource with invalid weights
String invalidResourceName = "invalidWagedResource";
ResourceConfig invalidWeightResourceConfig = new ResourceConfig(invalidResourceName);
IdealState invalidWeightIdealState = new IdealState(invalidResourceName);
Map<String, ZNRecord> invalidInputMap = ImmutableMap.of(
ResourceAccessor.ResourceProperties.idealState.name(), invalidWeightIdealState.getRecord(),
ResourceAccessor.ResourceProperties.resourceConfig.name(),
invalidWeightResourceConfig.getRecord());
// Create an entity using invalidInputMap
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(invalidInputMap),
MediaType.APPLICATION_JSON_TYPE);
// Make a HTTP call to the REST endpoint
put("clusters/" + CLUSTER_NAME + "/resources/" + invalidResourceName,
ImmutableMap.of("command", "addWagedResource"), entity,
Response.Status.BAD_REQUEST.getStatusCode());
}
@Test(dependsOnMethods = "testAddResourceWithWeight")
public void testValidateResource() throws IOException {
// Define weight keys in ClusterConfig
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
clusterConfig.setInstanceCapacityKeys(Arrays.asList("FOO", "BAR"));
_configAccessor.setClusterConfig(CLUSTER_NAME, clusterConfig);
// Remove all weight configs in InstanceConfig for testing
for (String instance : _instancesMap.get(CLUSTER_NAME)) {
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, instance);
instanceConfig.setInstanceCapacityMap(Collections.emptyMap());
_configAccessor.setInstanceConfig(CLUSTER_NAME, instance, instanceConfig);
}
// Validate the resource added in testAddResourceWithWeight()
String resourceToValidate = "newWagedResource";
// This should fail because none of the instances have weight configured
get("clusters/" + CLUSTER_NAME + "/resources/" + resourceToValidate,
ImmutableMap.of("command", "validateWeight"), Response.Status.BAD_REQUEST.getStatusCode(),
true);
// Add back weight configurations to all instance configs
Map<String, Integer> instanceCapacityMap = ImmutableMap.of("FOO", 1000, "BAR", 1000);
for (String instance : _instancesMap.get(CLUSTER_NAME)) {
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, instance);
instanceConfig.setInstanceCapacityMap(instanceCapacityMap);
_configAccessor.setInstanceConfig(CLUSTER_NAME, instance, instanceConfig);
}
// Now try validating again - it should go through and return a 200
String body = get("clusters/" + CLUSTER_NAME + "/resources/" + resourceToValidate,
ImmutableMap.of("command", "validateWeight"), Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
Assert.assertEquals(node.get(resourceToValidate).toString(), "true");
}
/**
* Creates a setup where the health API can be tested.
* @param clusterName
* @param resourceName
* @param idealStateParams
* @param partitionReplicaStates maps partitionName to its replicas' states
* @throws Exception
*/
private void createDummyMapping(String clusterName, String resourceName,
Map<String, String> idealStateParams, Map<String, List<String>> partitionReplicaStates)
throws Exception {
IdealState idealState = new IdealState(resourceName);
idealState.setMinActiveReplicas(Integer.parseInt(idealStateParams.get("MinActiveReplicas"))); // 2
idealState.setStateModelDefRef(idealStateParams.get("StateModelDefRef")); // MasterSlave
idealState.setMaxPartitionsPerInstance(
Integer.parseInt(idealStateParams.get("MaxPartitionsPerInstance"))); // 3
idealState.setReplicas(idealStateParams.get("Replicas")); // 3
idealState.setNumPartitions(Integer.parseInt(idealStateParams.get("NumPartitions"))); // 3
idealState.enable(false);
Map<String, List<String>> partitionNames = new LinkedHashMap<>();
List<String> dummyPrefList = new ArrayList<>();
for (int i = 0; i < Integer.parseInt(idealStateParams.get("MaxPartitionsPerInstance")); i++) {
dummyPrefList.add(ANY_INSTANCE);
partitionNames.put("p" + i, dummyPrefList);
}
idealState.getRecord().getListFields().putAll(partitionNames);
if (!_gSetupTool.getClusterManagementTool().getClusters().contains(clusterName)) {
_gSetupTool.getClusterManagementTool().addCluster(clusterName);
}
_gSetupTool.getClusterManagementTool().setResourceIdealState(clusterName, resourceName,
idealState);
// Set ExternalView's replica states for a given parameter map
ExternalView externalView = new ExternalView(resourceName);
Map<String, Map<String, String>> mappingCurrent = new LinkedHashMap<>();
List<String> partitionReplicaStatesList = new ArrayList<>(partitionReplicaStates.keySet());
for (int k = 0; k < partitionReplicaStatesList.size(); k++) {
Map<String, String> replicaStatesForPartition = new LinkedHashMap<>();
List<String> replicaStateList = partitionReplicaStates.get(partitionReplicaStatesList.get(k));
for (int i = 0; i < replicaStateList.size(); i++) {
replicaStatesForPartition.put("r" + i, replicaStateList.get(i));
}
mappingCurrent.put("p" + k, replicaStatesForPartition);
}
externalView.getRecord().getMapFields().putAll(mappingCurrent);
HelixManager helixManager = HelixManagerFactory.getZKHelixManager(clusterName, "p1",
InstanceType.ADMINISTRATOR, ZK_ADDR);
helixManager.connect();
HelixDataAccessor helixDataAccessor = helixManager.getHelixDataAccessor();
helixDataAccessor.setProperty(helixDataAccessor.keyBuilder().externalView(resourceName),
externalView);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
}
| 9,307 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/AbstractTestClass.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.logging.Level;
import javax.ws.rs.client.Entity;
import javax.ws.rs.client.WebTarget;
import javax.ws.rs.core.Application;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.google.common.base.Joiner;
import org.apache.helix.AccessOption;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.PropertyType;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestUtil;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.server.auditlog.AuditLog;
import org.apache.helix.rest.server.auditlog.AuditLogger;
import org.apache.helix.rest.server.filters.AuditLogFilter;
import org.apache.helix.rest.server.resources.AbstractResource;
import org.apache.helix.store.HelixPropertyStore;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskFactory;
import org.apache.helix.task.TaskPartitionState;
import org.apache.helix.task.TaskState;
import org.apache.helix.task.TaskStateModelFactory;
import org.apache.helix.task.TaskUtil;
import org.apache.helix.task.Workflow;
import org.apache.helix.task.WorkflowContext;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.util.ZKClientPool;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.introspect.CodehausJacksonIntrospector;
import org.apache.helix.zookeeper.zkclient.ZkServer;
import org.glassfish.jersey.client.ClientConfig;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.test.DeploymentContext;
import org.glassfish.jersey.test.JerseyTestNg;
import org.glassfish.jersey.test.spi.TestContainer;
import org.glassfish.jersey.test.spi.TestContainerException;
import org.glassfish.jersey.test.spi.TestContainerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
public class AbstractTestClass extends JerseyTestNg.ContainerPerClassTest {
/**
* Constants for multi-ZK environment.
*/
private static final String MULTI_ZK_PROPERTY_KEY = "multiZk";
private static final String NUM_ZK_PROPERTY_KEY = "numZk";
protected static final String ZK_PREFIX = "localhost:";
protected static final int ZK_START_PORT = 2123;
// The following map must be a static map because it needs to be shared throughout tests
protected static final Map<String, ZkServer> ZK_SERVER_MAP = new HashMap<>();
// For a single-ZK/Helix environment
protected static final String ZK_ADDR = "localhost:2123";
protected static final String WORKFLOW_PREFIX = "Workflow_";
protected static final String JOB_PREFIX = "JOB";
protected static int NUM_PARTITIONS = 10;
protected static int NUM_REPLICA = 2;
protected static int MIN_ACTIVE_REPLICA = 3;
protected static ZkServer _zkServer;
protected static HelixZkClient _gZkClient;
protected static ClusterSetup _gSetupTool;
protected static ConfigAccessor _configAccessor;
protected static BaseDataAccessor<ZNRecord> _baseAccessor;
protected static ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected static ObjectReader ZNRECORD_READER = new ObjectMapper()
.setAnnotationIntrospector(new CodehausJacksonIntrospector())
.readerFor(ZNRecord.class);
protected static boolean _init = false;
// For testing namespaced access
protected static ZkServer _zkServerTestNS;
protected static final String _zkAddrTestNS = "localhost:2124";
protected static final String TEST_NAMESPACE = "test-namespace";
protected static HelixZkClient _gZkClientTestNS;
protected static BaseDataAccessor<ZNRecord> _baseAccessorTestNS;
protected static final String STOPPABLE_CLUSTER = "StoppableTestCluster";
protected static final String TASK_TEST_CLUSTER = "TaskTestCluster";
protected static final List<String> STOPPABLE_INSTANCES =
Arrays.asList("instance0", "instance1", "instance2", "instance3", "instance4", "instance5");
protected static Set<String> _clusters;
protected static String _superCluster = "superCluster";
protected static Map<String, Set<String>> _instancesMap = new HashMap<>();
protected static Map<String, Set<String>> _liveInstancesMap = new HashMap<>();
protected static Map<String, Set<String>> _resourcesMap = new HashMap<>();
protected static Map<String, Map<String, Workflow>> _workflowMap = new HashMap<>();
protected static List<ClusterControllerManager> _clusterControllerManagers = new ArrayList<>();
protected static List<MockParticipantManager> _mockParticipantManagers = new ArrayList<>();
protected static MockAuditLogger _auditLogger = new MockAuditLogger();
protected static HelixRestServer _helixRestServer;
protected static class MockAuditLogger implements AuditLogger {
List<AuditLog> _auditLogList = new ArrayList<>();
@Override
public void write(AuditLog auditLog) {
_auditLogList.add(auditLog);
}
public void clearupLogs() {
_auditLogList.clear();
}
public List<AuditLog> getAuditLogs() {
return _auditLogList;
}
}
@Override
protected Application configure() {
// Configure server context
ResourceConfig resourceConfig = new ResourceConfig();
resourceConfig.packages(AbstractResource.class.getPackage().getName());
ServerContext serverContext = new ServerContext(ZK_ADDR);
resourceConfig.property(ContextPropertyKeys.SERVER_CONTEXT.name(), serverContext);
resourceConfig.register(new AuditLogFilter(Collections.singletonList(new MockAuditLogger())));
return resourceConfig;
}
@Override
protected TestContainerFactory getTestContainerFactory()
throws TestContainerException {
return new TestContainerFactory() {
@Override
public TestContainer create(final URI baseUri, DeploymentContext deploymentContext) {
return new TestContainer() {
@Override
public ClientConfig getClientConfig() {
return null;
}
@Override
public URI getBaseUri() {
return baseUri;
}
@Override
public void start() {
if (_helixRestServer == null) {
_helixRestServer = startRestServer();
}
}
@Override
public void stop() {
}
};
}
};
}
@BeforeSuite
public void beforeSuite()
throws Exception {
if (!_init) {
setupZooKeepers();
// TODO: use logging.properties file to config java.util.logging.Logger levels
java.util.logging.Logger topJavaLogger = java.util.logging.Logger.getLogger("");
topJavaLogger.setLevel(Level.WARNING);
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
clientConfig.setZkSerializer(new ZNRecordSerializer());
_gZkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR), clientConfig);
clientConfig.setZkSerializer(new ZNRecordSerializer());
_gZkClientTestNS = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(_zkAddrTestNS), clientConfig);
_gSetupTool = new ClusterSetup(_gZkClient);
_configAccessor = new ConfigAccessor(_gZkClient);
_baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
_baseAccessorTestNS = new ZkBaseDataAccessor<>(_gZkClientTestNS);
// wait for the web service to start
Thread.sleep(100);
setupHelixResources();
_init = true;
}
}
@AfterSuite
public void afterSuite()
throws Exception {
// tear down orphan-ed threads
for (ClusterControllerManager cm : _clusterControllerManagers) {
if (cm != null && cm.isConnected()) {
cm.syncStop();
}
}
for (MockParticipantManager mm : _mockParticipantManagers) {
if (mm != null && mm.isConnected()) {
mm.syncStop();
}
}
ZKClientPool.reset();
if (_gZkClient != null) {
_gZkClient.close();
_gZkClient = null;
}
if (_gZkClientTestNS != null) {
_gZkClientTestNS.close();
_gZkClientTestNS = null;
}
if (_helixRestServer != null) {
_helixRestServer.shutdown();
_helixRestServer = null;
}
// Stop all ZkServers
ZK_SERVER_MAP.forEach((zkAddr, zkServer) -> TestHelper.stopZkServer(zkServer));
}
private void setupZooKeepers() {
// start zk
try {
if (_zkServer == null) {
_zkServer = TestHelper.startZkServer(ZK_ADDR);
Assert.assertNotNull(_zkServer);
ZK_SERVER_MAP.put(ZK_ADDR, _zkServer);
ZKClientPool.reset();
}
if (_zkServerTestNS == null) {
_zkServerTestNS = TestHelper.startZkServer(_zkAddrTestNS);
Assert.assertNotNull(_zkServerTestNS);
ZK_SERVER_MAP.put(_zkAddrTestNS, _zkServerTestNS);
ZKClientPool.reset();
}
} catch (Exception e) {
Assert.fail(String.format("Failed to start ZK servers: %s", e.toString()));
}
// Start additional ZKs in a multi-ZK setup if applicable
String multiZkConfig = System.getProperty(MULTI_ZK_PROPERTY_KEY);
if (multiZkConfig != null && multiZkConfig.equalsIgnoreCase(Boolean.TRUE.toString())) {
String numZkFromConfig = System.getProperty(NUM_ZK_PROPERTY_KEY);
if (numZkFromConfig != null) {
try {
int numZkFromConfigInt = Integer.parseInt(numZkFromConfig);
// Start (numZkFromConfigInt - 2) ZooKeepers
for (int i = 2; i < numZkFromConfigInt; i++) {
String zkAddr = ZK_PREFIX + (ZK_START_PORT + i);
ZkServer zkServer = TestHelper.startZkServer(zkAddr);
Assert.assertNotNull(zkServer);
ZK_SERVER_MAP.put(zkAddr, zkServer);
}
} catch (Exception e) {
Assert.fail("Failed to create multiple ZooKeepers!");
}
}
}
}
protected void setupHelixResources() throws Exception {
_clusters = createClusters(4);
_gSetupTool.addCluster(_superCluster, true);
_gSetupTool.addCluster(TASK_TEST_CLUSTER, true);
_clusters.add(_superCluster);
_clusters.add(TASK_TEST_CLUSTER);
for (String cluster : _clusters) {
Set<String> instances = createInstances(cluster, 10);
Set<String> liveInstances = startInstances(cluster, instances, 6);
ClusterConfig clusterConfig = new ClusterConfig(cluster);
clusterConfig.setFaultZoneType("helixZoneId");
_configAccessor.setClusterConfig(cluster, clusterConfig);
createResourceConfigs(cluster, 8);
_workflowMap.put(cluster, createWorkflows(cluster, 3));
Set<String> resources = createResources(cluster, 8);
_instancesMap.put(cluster, instances);
_liveInstancesMap.put(cluster, liveInstances);
_resourcesMap.put(cluster, resources);
_clusterControllerManagers.add(startController(cluster));
}
preSetupForParallelInstancesStoppableTest(STOPPABLE_CLUSTER, STOPPABLE_INSTANCES);
}
protected Set<String> createInstances(String cluster, int numInstances) {
Set<String> instances = new HashSet<>();
for (int i = 0; i < numInstances; i++) {
String instanceName = cluster + "localhost_" + (12918 + i);
_gSetupTool.addInstanceToCluster(cluster, instanceName);
instances.add(instanceName);
}
return instances;
}
protected Set<String> createResources(String cluster, int numResources) {
Set<String> resources = new HashSet<>();
for (int i = 0; i < numResources; i++) {
String resource = cluster + "_db_" + i;
_gSetupTool.addResourceToCluster(cluster, resource, NUM_PARTITIONS, "MasterSlave");
IdealState idealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(cluster, resource);
idealState.setMinActiveReplicas(MIN_ACTIVE_REPLICA);
_gSetupTool.getClusterManagementTool().setResourceIdealState(cluster, resource, idealState);
_gSetupTool.rebalanceStorageCluster(cluster, resource, NUM_REPLICA);
resources.add(resource);
}
return resources;
}
protected Set<String> createResourceConfigs(String cluster, int numResources) {
Set<String> resources = new HashSet<>();
for (int i = 0; i < numResources; i++) {
String resource = cluster + "_db_" + i;
org.apache.helix.model.ResourceConfig resourceConfig =
new org.apache.helix.model.ResourceConfig.Builder(resource).setNumReplica(NUM_REPLICA)
.build();
_configAccessor.setResourceConfig(cluster, resource, resourceConfig);
resources.add(resource);
}
return resources;
}
protected Set<String> startInstances(String cluster, Set<String> instances,
int numLiveinstances) {
Set<String> liveInstances = new HashSet<>();
int i = 0;
for (String instance : instances) {
MockParticipantManager participant = new MockParticipantManager(ZK_ADDR, cluster, instance);
Map<String, TaskFactory> taskFactoryReg = new HashMap<>();
taskFactoryReg.put(MockTask.TASK_COMMAND, MockTask::new);
StateMachineEngine stateMachineEngine = participant.getStateMachineEngine();
stateMachineEngine.registerStateModelFactory("Task",
new TaskStateModelFactory(participant, taskFactoryReg));
participant.syncStart();
_mockParticipantManagers.add(participant);
liveInstances.add(instance);
if (++i > numLiveinstances) {
break;
}
}
return liveInstances;
}
protected ClusterControllerManager startController(String cluster) {
String controllerName = "controller-" + cluster;
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, cluster, controllerName);
controller.syncStart();
return controller;
}
protected Set<String> createClusters(int numClusters) {
Set<String> clusters = new HashSet<>();
for (int i = 0; i < numClusters; i++) {
String cluster = "TestCluster_" + i;
_gSetupTool.addCluster(cluster, true);
clusters.add(cluster);
}
return clusters;
}
protected Map<String, Workflow> createWorkflows(String cluster, int numWorkflows) {
Map<String, Workflow> workflows = new HashMap<>();
HelixPropertyStore<ZNRecord> propertyStore =
new ZkHelixPropertyStore<>((ZkBaseDataAccessor<ZNRecord>) _baseAccessor,
PropertyPathBuilder.propertyStore(cluster), null);
for (int i = 0; i < numWorkflows; i++) {
Workflow.Builder workflow = new Workflow.Builder(WORKFLOW_PREFIX + i);
int j = 0;
for (JobConfig.Builder job : createJobs(cluster, WORKFLOW_PREFIX + i, 3)) {
workflow.addJob(JOB_PREFIX + j++, job);
}
workflows.put(WORKFLOW_PREFIX + i, workflow.build());
WorkflowContext workflowContext = TaskTestUtil
.buildWorkflowContext(WORKFLOW_PREFIX + i, TaskState.FAILED,
System.currentTimeMillis(), TaskState.COMPLETED, TaskState.COMPLETED,
TaskState.FAILED);
_baseAccessor.set(String.format("/%s/%s%s/%s/%s", cluster, PropertyType.PROPERTYSTORE.name(),
TaskConstants.REBALANCER_CONTEXT_ROOT, WORKFLOW_PREFIX + i, TaskConstants.CONTEXT_NODE),
workflowContext.getRecord(), AccessOption.PERSISTENT);
_configAccessor.setResourceConfig(cluster, WORKFLOW_PREFIX + i, workflow.getWorkflowConfig());
// Add workflow user content
propertyStore.create(Joiner.on("/")
.join(TaskConstants.REBALANCER_CONTEXT_ROOT, WORKFLOW_PREFIX + i,
TaskUtil.USER_CONTENT_NODE), new ZNRecord(TaskUtil.USER_CONTENT_NODE),
AccessOption.PERSISTENT);
}
return workflows;
}
protected Set<JobConfig.Builder> createJobs(String cluster, String workflowName, int numJobs) {
HelixPropertyStore<ZNRecord> propertyStore =
new ZkHelixPropertyStore<>((ZkBaseDataAccessor<ZNRecord>) _baseAccessor,
PropertyPathBuilder.propertyStore(cluster), null);
Set<JobConfig.Builder> jobCfgs = new HashSet<>();
for (int i = 0; i < numJobs; i++) {
JobConfig.Builder job =
new JobConfig.Builder().setCommand("DummyCommand").setTargetResource("RESOURCE")
.setWorkflow(workflowName).setJobId(workflowName + "_" + JOB_PREFIX + i);
jobCfgs.add(job);
JobContext jobContext = TaskTestUtil
.buildJobContext(System.currentTimeMillis(), System.currentTimeMillis() + 1,
TaskPartitionState.COMPLETED);
_baseAccessor.set(String.format("/%s/%s%s/%s/%s", cluster, PropertyType.PROPERTYSTORE.name(),
TaskConstants.REBALANCER_CONTEXT_ROOT, workflowName + "_" + JOB_PREFIX + i,
TaskConstants.CONTEXT_NODE), jobContext.getRecord(), AccessOption.PERSISTENT);
_configAccessor.setResourceConfig(cluster, workflowName + "_" + JOB_PREFIX + i, job.build());
// add job content stores
ZNRecord contentStore = new ZNRecord(TaskUtil.USER_CONTENT_NODE);
contentStore.setMapField(TaskUtil
.getNamespacedTaskName(TaskUtil.getNamespacedJobName(workflowName, JOB_PREFIX + i), "0"),
Collections.<String, String>emptyMap());
propertyStore.create(Joiner.on("/")
.join(TaskConstants.REBALANCER_CONTEXT_ROOT, workflowName + "_" + JOB_PREFIX + i,
TaskUtil.USER_CONTENT_NODE), contentStore, AccessOption.PERSISTENT);
}
return jobCfgs;
}
protected static ZNRecord toZNRecord(String data)
throws IOException {
return ZNRECORD_READER.readValue(data);
}
protected String get(String uri, Map<String, String> queryParams, int expectedReturnStatus,
boolean expectBodyReturned) {
WebTarget webTarget = target(uri);
if (queryParams != null) {
for (Map.Entry<String, String> entry : queryParams.entrySet()) {
webTarget = webTarget.queryParam(entry.getKey(), entry.getValue());
}
}
final Response response = webTarget.request().get();
Assert.assertEquals(response.getStatus(), expectedReturnStatus);
// NOT_FOUND and BAD_REQUEST will throw text based html
if (expectedReturnStatus != Response.Status.NOT_FOUND.getStatusCode()
&& expectedReturnStatus != Response.Status.BAD_REQUEST.getStatusCode()) {
Assert.assertEquals(response.getMediaType().getType(), "application");
} else {
Assert.assertEquals(response.getMediaType().getType(), "text");
}
String body = response.readEntity(String.class);
if (expectBodyReturned) {
Assert.assertNotNull(body);
}
return body;
}
protected void put(String uri, Map<String, String> queryParams, Entity entity,
int expectedReturnStatus) {
WebTarget webTarget = target(uri);
if (queryParams != null) {
for (Map.Entry<String, String> entry : queryParams.entrySet()) {
webTarget = webTarget.queryParam(entry.getKey(), entry.getValue());
}
}
Response response = webTarget.request().put(entity);
Assert.assertEquals(response.getStatus(), expectedReturnStatus);
}
protected void post(String uri, Map<String, String> queryParams, Entity entity,
int expectedReturnStatus) {
post(uri, queryParams, entity,expectedReturnStatus, false);
}
protected Response post(String uri, Map<String, String> queryParams, Entity entity,
int expectedReturnStatus, boolean expectBodyReturned) {
WebTarget webTarget = target(uri);
if (queryParams != null) {
for (Map.Entry<String, String> entry : queryParams.entrySet()) {
webTarget = webTarget.queryParam(entry.getKey(), entry.getValue());
}
}
Response response = webTarget.request().post(entity);
Assert.assertEquals(response.getStatus(), expectedReturnStatus);
return response;
}
protected void delete(String uri, int expectedReturnStatus) {
final Response response = target(uri).request().delete();
Assert.assertEquals(response.getStatus(), expectedReturnStatus);
}
protected TaskDriver getTaskDriver(String clusterName) {
return new TaskDriver(_gZkClient, clusterName);
}
private void preSetupForParallelInstancesStoppableTest(String clusterName,
List<String> instances) throws Exception {
_gSetupTool.addCluster(clusterName, true);
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(clusterName);
clusterConfig.setFaultZoneType("helixZoneId");
clusterConfig.setPersistIntermediateAssignment(true);
_configAccessor.setClusterConfig(clusterName, clusterConfig);
// Create instance configs
List<InstanceConfig> instanceConfigs = new ArrayList<>();
for (int i = 0; i < instances.size() - 1; i++) {
InstanceConfig instanceConfig = new InstanceConfig(instances.get(i));
instanceConfig.setDomain("helixZoneId=zone1,host=instance" + i);
instanceConfigs.add(instanceConfig);
}
instanceConfigs.add(new InstanceConfig(instances.get(instances.size() - 1)));
instanceConfigs.get(instanceConfigs.size() - 1).setDomain("helixZoneId=zone2,host=instance5");
instanceConfigs.get(1).setInstanceEnabled(false);
instanceConfigs.get(3).setInstanceEnabledForPartition("FakeResource", "FakePartition", false);
for (InstanceConfig instanceConfig : instanceConfigs) {
_gSetupTool.getClusterManagementTool().addInstance(clusterName, instanceConfig);
}
// Start participant
startInstances(clusterName, new TreeSet<>(instances), 3);
createResources(clusterName, 1);
_clusterControllerManagers.add(startController(clusterName));
// Make sure that cluster config exists
boolean isClusterConfigExist = TestHelper.verify(() -> {
ClusterConfig stoppableClusterConfig;
try {
stoppableClusterConfig = _configAccessor.getClusterConfig(clusterName);
} catch (Exception e) {
return false;
}
return (stoppableClusterConfig != null);
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(isClusterConfigExist);
// Make sure that instance config exists for the instance0 to instance5
for (String instance: instances) {
boolean isinstanceConfigExist = TestHelper.verify(() -> {
InstanceConfig instanceConfig;
try {
instanceConfig = _configAccessor.getInstanceConfig(STOPPABLE_CLUSTER, instance);
} catch (Exception e) {
return false;
}
return (instanceConfig != null);
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(isinstanceConfigExist);
}
_clusters.add(STOPPABLE_CLUSTER);
_workflowMap.put(STOPPABLE_CLUSTER, createWorkflows(STOPPABLE_CLUSTER, 3));
}
/**
* Starts a HelixRestServer for the test suite.
* @return
*/
protected HelixRestServer startRestServer() {
// Create namespace manifest map
List<HelixRestNamespace> namespaces = new ArrayList<>();
// Add test namespace
namespaces.add(new HelixRestNamespace(TEST_NAMESPACE,
HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, _zkAddrTestNS, false));
// Add default namesapce
namespaces.add(new HelixRestNamespace(ZK_ADDR));
HelixRestServer server;
try {
server =
new HelixRestServer(namespaces, getBaseUri().getPort(), getBaseUri().getPath(),
Collections.singletonList(_auditLogger));
server.start();
} catch (Exception ex) {
throw new TestContainerException(ex);
}
return server;
}
}
| 9,308 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestClusterAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.collect.ImmutableMap;
import com.sun.research.ws.wadl.HTTPMethods;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.api.status.ClusterManagementMode;
import org.apache.helix.api.status.ClusterManagementModeRequest;
import org.apache.helix.cloud.azure.AzureConstants;
import org.apache.helix.cloud.constants.CloudProvider;
import org.apache.helix.cloud.constants.VirtualTopologyGroupConstants;
import org.apache.helix.controller.rebalancer.waged.WagedRebalancer;
import org.apache.helix.integration.manager.ClusterDistributedController;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZKUtil;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ClusterStatus;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.MaintenanceSignal;
import org.apache.helix.model.PauseSignal;
import org.apache.helix.model.RESTConfig;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.server.auditlog.AuditLog;
import org.apache.helix.rest.server.resources.AbstractResource;
import org.apache.helix.rest.server.resources.AbstractResource.Command;
import org.apache.helix.rest.server.resources.helix.ClusterAccessor;
import org.apache.helix.rest.server.util.JerseyUriRequestBuilder;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class TestClusterAccessor extends AbstractTestClass {
private static final String VG_CLUSTER = "vgCluster";
@BeforeClass
public void beforeClass() {
for (String cluster : _clusters) {
ClusterConfig clusterConfig = createClusterConfig(cluster);
_configAccessor.setClusterConfig(cluster, clusterConfig);
}
}
@Test
public void testGetClusters() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
_auditLogger.clearupLogs();
String body = get("clusters", null, Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
String clustersStr = node.get(ClusterAccessor.ClusterProperties.clusters.name()).toString();
Assert.assertNotNull(clustersStr);
Set<String> clusters = OBJECT_MAPPER.readValue(clustersStr,
OBJECT_MAPPER.getTypeFactory().constructCollectionType(Set.class, String.class));
Assert.assertEquals(clusters, _clusters,
"clusters from response: " + clusters + " vs clusters actually: " + _clusters);
validateAuditLogSize(1);
AuditLog auditLog = _auditLogger.getAuditLogs().get(0);
validateAuditLog(auditLog, HTTPMethods.GET.name(), "clusters",
Response.Status.OK.getStatusCode(), body);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetClusters")
public void testGetClusterTopology() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = "TestCluster_1";
String instance = cluster + "localhost_12920";
// set the fake zone id in instance configuration
HelixDataAccessor helixDataAccessor = new ZKHelixDataAccessor(cluster, _baseAccessor);
InstanceConfig instanceConfig =
helixDataAccessor.getProperty(helixDataAccessor.keyBuilder().instanceConfig(instance));
instanceConfig.setDomain("helixZoneId=123");
helixDataAccessor.setProperty(helixDataAccessor.keyBuilder().instanceConfig(instance),
instanceConfig);
String response = new JerseyUriRequestBuilder("clusters/{}/topology").format(cluster).get(this);
Assert.assertEquals(response,
"{\"id\":\"TestCluster_1\",\"zones\":[{\"id\":\"123\",\"instances\":[{\"id\":\"TestCluster_1localhost_12920\"}]}],"
+ "\"allInstances\":[\"TestCluster_1localhost_12918\",\"TestCluster_1localhost_12919\",\"TestCluster_1localhost_12924\","
+ "\"TestCluster_1localhost_12925\",\"TestCluster_1localhost_12926\",\"TestCluster_1localhost_12927\",\"TestCluster_1localhost_12920\","
+ "\"TestCluster_1localhost_12921\",\"TestCluster_1localhost_12922\",\"TestCluster_1localhost_12923\"]}");
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetClusterTopology")
public void testGetClusterTopologyAndFaultZoneMap() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String topologyMapUrlBase = "clusters/TestCluster_1/topologymap/";
String faultZoneUrlBase = "clusters/TestCluster_1/faultzonemap/";
// test invalid case where instance config and cluster topology have not been set.
get(topologyMapUrlBase, null, Response.Status.BAD_REQUEST.getStatusCode(), true);
get(faultZoneUrlBase, null, Response.Status.BAD_REQUEST.getStatusCode(), true);
String cluster = "TestCluster_1";
for (int i = 0; i < 5; i++) {
String instance = cluster + "localhost_129" + String.valueOf(18 + i);
HelixDataAccessor helixDataAccessor = new ZKHelixDataAccessor(cluster, _baseAccessor);
InstanceConfig instanceConfig =
helixDataAccessor.getProperty(helixDataAccessor.keyBuilder().instanceConfig(instance));
instanceConfig.setDomain("helixZoneId=zone0,instance=" + instance);
helixDataAccessor
.setProperty(helixDataAccessor.keyBuilder().instanceConfig(instance), instanceConfig);
}
for (int i = 0; i < 5; i++) {
String instance = cluster + "localhost_129" + String.valueOf(23 + i);
HelixDataAccessor helixDataAccessor = new ZKHelixDataAccessor(cluster, _baseAccessor);
InstanceConfig instanceConfig =
helixDataAccessor.getProperty(helixDataAccessor.keyBuilder().instanceConfig(instance));
instanceConfig.setDomain("helixZoneId=zone1,instance=" + instance);
helixDataAccessor
.setProperty(helixDataAccessor.keyBuilder().instanceConfig(instance), instanceConfig);
}
// test invalid case where instance config is set, but cluster topology has not been set.
get(topologyMapUrlBase, null, Response.Status.BAD_REQUEST.getStatusCode(), true);
get(faultZoneUrlBase, null, Response.Status.BAD_REQUEST.getStatusCode(), true);
ClusterConfig configDelta = new ClusterConfig(cluster);
configDelta.getRecord().setSimpleField("TOPOLOGY", "/helixZoneId/instance");
updateClusterConfigFromRest(cluster, configDelta, Command.update);
//get valid cluster topology map
Map<String, Object> topologyMap = getMapResponseFromRest(topologyMapUrlBase);
Assert.assertEquals(topologyMap.size(), 2);
Assert.assertTrue(topologyMap.get("/helixZoneId:zone0") instanceof List);
List<String> instances = (List<String>) topologyMap.get("/helixZoneId:zone0");
Assert.assertEquals(instances.size(), 5);
Assert.assertTrue(instances.containsAll(new HashSet<>(Arrays
.asList("/instance:TestCluster_1localhost_12918",
"/instance:TestCluster_1localhost_12919",
"/instance:TestCluster_1localhost_12920",
"/instance:TestCluster_1localhost_12921",
"/instance:TestCluster_1localhost_12922"))));
Assert.assertTrue(topologyMap.get("/helixZoneId:zone1") instanceof List);
instances = (List<String>) topologyMap.get("/helixZoneId:zone1");
Assert.assertEquals(instances.size(), 5);
Assert.assertTrue(instances.containsAll(new HashSet<>(Arrays
.asList("/instance:TestCluster_1localhost_12923",
"/instance:TestCluster_1localhost_12924",
"/instance:TestCluster_1localhost_12925",
"/instance:TestCluster_1localhost_12926",
"/instance:TestCluster_1localhost_12927"))));
configDelta = new ClusterConfig(cluster);
configDelta.getRecord().setSimpleField("FAULT_ZONE_TYPE", "helixZoneId");
updateClusterConfigFromRest(cluster, configDelta, Command.update);
//get valid cluster fault zone map
Map<String, Object> faultZoneMap = getMapResponseFromRest(faultZoneUrlBase);
Assert.assertEquals(faultZoneMap.size(), 2);
Assert.assertTrue(faultZoneMap.get("/helixZoneId:zone0") instanceof List);
instances = (List<String>) faultZoneMap.get("/helixZoneId:zone0");
Assert.assertEquals(instances.size(), 5);
Assert.assertTrue(instances.containsAll(new HashSet<>(Arrays
.asList("/instance:TestCluster_1localhost_12918",
"/instance:TestCluster_1localhost_12919",
"/instance:TestCluster_1localhost_12920",
"/instance:TestCluster_1localhost_12921",
"/instance:TestCluster_1localhost_12922"))));
Assert.assertTrue(faultZoneMap.get("/helixZoneId:zone1") instanceof List);
instances = (List<String>) faultZoneMap.get("/helixZoneId:zone1");
Assert.assertEquals(instances.size(), 5);
Assert.assertTrue(instances.containsAll(new HashSet<>(Arrays
.asList("/instance:TestCluster_1localhost_12923",
"/instance:TestCluster_1localhost_12924",
"/instance:TestCluster_1localhost_12925",
"/instance:TestCluster_1localhost_12926",
"/instance:TestCluster_1localhost_12927"))));
}
@Test(dataProvider = "prepareVirtualTopologyTests", dependsOnMethods = "testGetClusters")
public void testAddVirtualTopologyGroup(String requestParam, int numGroups,
Map<String, String> instanceToGroup) throws IOException {
post("clusters/" + VG_CLUSTER,
ImmutableMap.of("command", "addVirtualTopologyGroup"),
Entity.entity(requestParam, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
Map<String, Object> topology = getMapResponseFromRest(String.format("clusters/%s/topology", VG_CLUSTER));
Assert.assertTrue(topology.containsKey("zones"));
Assert.assertEquals(((List) topology.get("zones")).size(), numGroups);
ClusterConfig clusterConfig = getClusterConfigFromRest(VG_CLUSTER);
String expectedTopology = "/" + VirtualTopologyGroupConstants.VIRTUAL_FAULT_ZONE_TYPE + "/hostname";
Assert.assertEquals(clusterConfig.getTopology(), expectedTopology);
Assert.assertEquals(clusterConfig.getFaultZoneType(), VirtualTopologyGroupConstants.VIRTUAL_FAULT_ZONE_TYPE);
HelixDataAccessor helixDataAccessor = new ZKHelixDataAccessor(VG_CLUSTER, _baseAccessor);
for (Map.Entry<String, String> entry : instanceToGroup.entrySet()) {
InstanceConfig instanceConfig =
helixDataAccessor.getProperty(helixDataAccessor.keyBuilder().instanceConfig(entry.getKey()));
String expectedGroup = entry.getValue();
Assert.assertEquals(instanceConfig.getDomainAsMap().get(VirtualTopologyGroupConstants.VIRTUAL_FAULT_ZONE_TYPE),
expectedGroup);
}
}
@Test(dependsOnMethods = "testGetClusters")
public void testVirtualTopologyGroupMaintenanceMode() throws JsonProcessingException {
setupClusterForVirtualTopology(VG_CLUSTER);
String requestParam = "{\"virtualTopologyGroupNumber\":\"7\",\"virtualTopologyGroupName\":\"vgTest\","
+ "\"autoMaintenanceModeDisabled\":\"true\"}";
// expect failure as cluster is not in maintenance mode while autoMaintenanceModeDisabled=true
post("clusters/" + VG_CLUSTER,
ImmutableMap.of("command", "addVirtualTopologyGroup"),
Entity.entity(requestParam, MediaType.APPLICATION_JSON_TYPE),
Response.Status.INTERNAL_SERVER_ERROR.getStatusCode());
// enable maintenance mode and expect success
post("clusters/" + VG_CLUSTER,
ImmutableMap.of("command", "enableMaintenanceMode"),
Entity.entity("virtual group", MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
post("clusters/" + VG_CLUSTER,
ImmutableMap.of("command", "addVirtualTopologyGroup"),
Entity.entity(requestParam, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
Assert.assertTrue(isMaintenanceModeEnabled(VG_CLUSTER));
}
private boolean isMaintenanceModeEnabled(String clusterName) throws JsonProcessingException {
String body =
get("clusters/" + clusterName + "/maintenance", null, Response.Status.OK.getStatusCode(), true);
return OBJECT_MAPPER.readTree(body).get(ClusterAccessor.ClusterProperties.maintenance.name()).booleanValue();
}
@DataProvider
public Object[][] prepareVirtualTopologyTests() {
setupClusterForVirtualTopology(VG_CLUSTER);
String test1 = "{\"virtualTopologyGroupNumber\":\"7\",\"virtualTopologyGroupName\":\"vgTest\"}";
String test2 = "{\"virtualTopologyGroupNumber\":\"9\",\"virtualTopologyGroupName\":\"vgTest\"}";
return new Object[][] {
{test1, 7, ImmutableMap.of(
"vgCluster_localhost_12918", "vgTest_0",
"vgCluster_localhost_12919", "vgTest_0",
"vgCluster_localhost_12925", "vgTest_4",
"vgCluster_localhost_12927", "vgTest_6")},
{test2, 9, ImmutableMap.of(
"vgCluster_localhost_12918", "vgTest_0",
"vgCluster_localhost_12919", "vgTest_0",
"vgCluster_localhost_12925", "vgTest_6",
"vgCluster_localhost_12927", "vgTest_8")},
// repeat test1 for deterministic and test for decreasing numGroups
{test1, 7, ImmutableMap.of(
"vgCluster_localhost_12918", "vgTest_0",
"vgCluster_localhost_12919", "vgTest_0",
"vgCluster_localhost_12925", "vgTest_4",
"vgCluster_localhost_12927", "vgTest_6")}
};
}
private void setupClusterForVirtualTopology(String clusterName) {
HelixDataAccessor helixDataAccessor = new ZKHelixDataAccessor(clusterName, _baseAccessor);
ZNRecord record = new ZNRecord("testZnode");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(), CloudProvider.AZURE.name());
CloudConfig cloudConfig = new CloudConfig.Builder(record).build();
_gSetupTool.addCluster(clusterName, true, cloudConfig);
Set<String> instances = new HashSet<>();
for (int i = 0; i < 10; i++) {
String instanceName = clusterName + "_localhost_" + (12918 + i);
_gSetupTool.addInstanceToCluster(clusterName, instanceName);
InstanceConfig instanceConfig =
helixDataAccessor.getProperty(helixDataAccessor.keyBuilder().instanceConfig(instanceName));
instanceConfig.setDomain("faultDomain=" + i / 2 + ",hostname=" + instanceName);
helixDataAccessor.setProperty(helixDataAccessor.keyBuilder().instanceConfig(instanceName), instanceConfig);
instances.add(instanceName);
}
startInstances(clusterName, instances, 10);
}
@Test(dependsOnMethods = "testGetClusterTopologyAndFaultZoneMap")
public void testAddConfigFields() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
//Need to use TestCluster_1 here since other test may add unwanted key to listField. issue-1336
String cluster = "TestCluster_1";
ClusterConfig oldConfig = getClusterConfigFromRest(cluster);
ClusterConfig configDelta = new ClusterConfig(cluster);
configDelta.getRecord().setSimpleField("newField", "newValue");
configDelta.getRecord().setListField("newList", Arrays.asList("newValue1", "newValue2"));
configDelta.getRecord().setMapField("newMap", new HashMap<String, String>() {
{
put("newkey1", "newvalue1");
put("newkey2", "newvalue2");
}
});
updateClusterConfigFromRest(cluster, configDelta, Command.update);
ClusterConfig newConfig = getClusterConfigFromRest(cluster);
oldConfig.getRecord().update(configDelta.getRecord());
Assert.assertEquals(newConfig, oldConfig,
"cluster config from response: " + newConfig + " vs cluster config actually: " + oldConfig);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testAddConfigFields")
public void testUpdateConfigFields() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = "TestCluster_1";
ClusterConfig config = getClusterConfigFromRest(cluster);
ZNRecord record = config.getRecord();
String key = record.getSimpleFields().keySet().iterator().next();
String value = record.getSimpleField(key);
record.getSimpleFields().clear();
record.setSimpleField(key, value + "--updated");
key = record.getListFields().keySet().iterator().next();
List<String> list = record.getListField(key);
list.remove(0);
list.add("newValue--updated");
record.getListFields().clear();
record.setListField(key, list);
key = record.getMapFields().keySet().iterator().next();
Map<String, String> map = record.getMapField(key);
Iterator it = map.entrySet().iterator();
it.next();
it.remove();
map.put("newKey--updated", "newValue--updated");
record.getMapFields().clear();
record.setMapField(key, map);
ClusterConfig prevConfig = getClusterConfigFromRest(cluster);
updateClusterConfigFromRest(cluster, config, Command.update);
ClusterConfig newConfig = getClusterConfigFromRest(cluster);
prevConfig.getRecord().update(config.getRecord());
Assert.assertEquals(newConfig, prevConfig, "cluster config from response: " + newConfig
+ " vs cluster config actually: " + prevConfig);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testUpdateConfigFields")
public void testDeleteConfigFields() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
ClusterConfig config = getClusterConfigFromRest(cluster);
ZNRecord record = config.getRecord();
String simpleKey = record.getSimpleFields().keySet().iterator().next();
String value = record.getSimpleField(simpleKey);
record.getSimpleFields().clear();
record.setSimpleField(simpleKey, value);
String listKey = record.getListFields().keySet().iterator().next();
List<String> list = record.getListField(listKey);
record.getListFields().clear();
record.setListField(listKey, list);
String mapKey = record.getMapFields().keySet().iterator().next();
Map<String, String> map = record.getMapField(mapKey);
record.getMapFields().clear();
record.setMapField(mapKey, map);
ClusterConfig prevConfig = getClusterConfigFromRest(cluster);
updateClusterConfigFromRest(cluster, config, Command.delete);
ClusterConfig newConfig = getClusterConfigFromRest(cluster);
Assert.assertFalse(newConfig.getRecord().getSimpleFields().containsKey(simpleKey),
"Failed to delete key " + simpleKey + " from cluster config");
Assert.assertFalse(newConfig.getRecord().getListFields().containsKey(listKey),
"Failed to delete key " + listKey + " from cluster config");
Assert.assertFalse(newConfig.getRecord().getSimpleFields().containsKey(mapKey),
"Failed to delete key " + mapKey + " from cluster config");
prevConfig.getRecord().subtract(config.getRecord());
Assert.assertEquals(newConfig, prevConfig, "cluster config from response: " + newConfig
+ " vs cluster config actually: " + prevConfig);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testDeleteConfigFields")
public void testCreateDeleteCluster() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// create an existing cluster should fail.
_auditLogger.clearupLogs();
String cluster = _clusters.iterator().next();
put("clusters/" + cluster, null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// create a new cluster
cluster = "NewCluster";
put("clusters/" + cluster, null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// verify the cluster has been created.
Assert.assertTrue(ZKUtil.isClusterSetup(cluster, _gZkClient));
// delete the cluster
delete("clusters/" + cluster, Response.Status.OK.getStatusCode());
// verify the cluster has been deleted.
Assert.assertFalse(_baseAccessor.exists("/" + cluster, 0));
validateAuditLogSize(3);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testCreateDeleteCluster")
public void testEnableDisableCluster() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// disable a cluster.
String cluster = _clusters.iterator().next();
_auditLogger.clearupLogs();
post("clusters/" + cluster, ImmutableMap.of("command", "disable"),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(cluster);
// verify the cluster is paused.
Assert.assertTrue(_baseAccessor.exists(keyBuilder.pause().getPath(), 0));
// enable a cluster.
post("clusters/" + cluster, ImmutableMap.of("command", "enable"),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// verify the cluster is paused.
Assert.assertFalse(_baseAccessor.exists(keyBuilder.pause().getPath(), 0));
validateAuditLogSize(2);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testEnableDisableCluster")
public void testGetClusterConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
Response response = target("clusters/fakeCluster/configs").request().get();
Assert.assertEquals(response.getStatus(), Response.Status.NOT_FOUND.getStatusCode());
String cluster = _clusters.iterator().next();
getClusterConfigFromRest(cluster);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetClusterConfig")
public void testEnableDisableMaintenanceMode() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
String reasonValue = "Test reason";
String reasonJSONString = "{\"reason\":\"" + reasonValue + "\"}";
// enable maintenance mode
post("clusters/" + cluster, ImmutableMap.of("command", "enableMaintenanceMode"),
Entity.entity(reasonJSONString, MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
// verify is in maintenance mode
Assert.assertTrue(isMaintenanceModeEnabled(cluster));
// Check that we could retrieve maintenance signal correctly
Map<String, Object> maintenanceSignalMap =
getMapResponseFromRest("clusters/" + cluster + "/controller/maintenanceSignal");
Assert.assertEquals(maintenanceSignalMap.get("TRIGGERED_BY"), "USER");
Assert.assertEquals(maintenanceSignalMap.get("REASON"), reasonValue);
Assert.assertNotNull(maintenanceSignalMap.get("TIMESTAMP"));
Assert.assertEquals(maintenanceSignalMap.get("clusterName"), cluster);
// disable maintenance mode
post("clusters/" + cluster, ImmutableMap.of("command", "disableMaintenanceMode"),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
// verify no longer in maintenance mode
Assert.assertFalse(isMaintenanceModeEnabled(cluster));
get("clusters/" + cluster + "/controller/maintenanceSignal", null,
Response.Status.NOT_FOUND.getStatusCode(), false);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testEnableDisableMaintenanceMode")
public void testEmptyMaintenanceSignal() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
// Create empty maintenance znode
ZNRecord record = new ZNRecord("test_maintenance_node");
ZKUtil.createOrUpdate(_gZkClient, "/"+cluster+"/CONTROLLER/MAINTENANCE", record, true, true);
// Verify maintenance mode enabled
Assert.assertTrue(isMaintenanceModeEnabled(cluster));
get("clusters/" + cluster + "/controller/maintenanceSignal", null,
Response.Status.OK.getStatusCode(), true);
// Disable maintenance mode
post("clusters/" + cluster, ImmutableMap.of("command", "disableMaintenanceMode"),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
// Verify no longer in maintenance mode
Assert.assertFalse(isMaintenanceModeEnabled(cluster));
get("clusters/" + cluster + "/controller/maintenanceSignal", null,
Response.Status.NOT_FOUND.getStatusCode(), false);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testEmptyMaintenanceSignal")
public void testGetControllerLeadershipHistory() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
// Get the leader controller name for the cluster
String leader =
get("clusters/" + cluster + "/controller", null, Response.Status.OK.getStatusCode(), true);
Map<String, String> leaderMap =
OBJECT_MAPPER.readValue(leader, new TypeReference<HashMap<String, String>>() {
});
Assert.assertNotNull(leaderMap, "Controller leader cannot be null!");
leader = leaderMap.get("controller");
Assert.assertNotNull(leader, "Leader name cannot be null!");
// Get the controller leadership history JSON's last entry
Map<String, Object> leadershipHistoryMap = getMapResponseFromRest("clusters/" + cluster + "/controller/history");
Assert.assertNotNull(leadershipHistoryMap, "Leadership history cannot be null!");
Object leadershipHistoryList =
leadershipHistoryMap.get(AbstractResource.Properties.history.name());
Assert.assertNotNull(leadershipHistoryList);
List<?> list = (List<?>) leadershipHistoryList;
Assert.assertTrue(list.size() > 0);
String lastLeaderEntry = (String) list.get(list.size() - 1);
// Check that the last entry contains the current leader name
Assert.assertTrue(lastLeaderEntry.contains(leader));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetControllerLeadershipHistory")
public void testGetMaintenanceHistory() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
String reason = TestHelper.getTestMethodName();
// Enable maintenance mode
post("clusters/" + cluster, ImmutableMap.of("command", "enableMaintenanceMode"),
Entity.entity(reason, MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
// Get the maintenance history JSON's last entry
Map<String, Object> maintenanceHistoryMap =
getMapResponseFromRest("clusters/" + cluster + "/controller/maintenanceHistory");
Object maintenanceHistoryList =
maintenanceHistoryMap.get(ClusterAccessor.ClusterProperties.maintenanceHistory.name());
Assert.assertNotNull(maintenanceHistoryList);
List<?> list = (List<?>) maintenanceHistoryList;
Assert.assertTrue(list.size() > 0);
String lastMaintenanceEntry = (String) list.get(list.size() - 1);
// Check that the last entry contains the reason string
Assert.assertTrue(lastMaintenanceEntry.contains(reason));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetMaintenanceHistory")
public void testEnableDisableMaintenanceModeWithCustomFields() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
HelixDataAccessor accessor = new ZKHelixDataAccessor(cluster, _baseAccessor);
String content = "{\"key1\":\"value1\",\"key2\":\"value2\"}";
post("clusters/" + cluster, ImmutableMap.of("command", "enableMaintenanceMode"),
Entity.entity(content, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
MaintenanceSignal signal = accessor.getProperty(accessor.keyBuilder().maintenance());
Assert.assertNotNull(signal);
Assert.assertNull(signal.getReason());
Assert.assertEquals(signal.getTriggeringEntity(), MaintenanceSignal.TriggeringEntity.USER);
Map<String, String> simpleFields = signal.getRecord().getSimpleFields();
Assert.assertEquals(simpleFields.get("key1"), "value1");
Assert.assertEquals(simpleFields.get("key2"), "value2");
post("clusters/" + cluster, ImmutableMap.of("command", "disableMaintenanceMode"),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
Assert.assertFalse(
accessor.getBaseDataAccessor().exists(accessor.keyBuilder().maintenance().getPath(), 0));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testEnableDisableMaintenanceModeWithCustomFields")
public void testPurgeOfflineParticipants() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
HelixDataAccessor accessor = new ZKHelixDataAccessor(cluster, _baseAccessor);
String instance1 = cluster + "localhost_12923";
String instance2 = cluster + "localhost_12924";
String instance3 = cluster + "localhost_12926";
post("clusters/" + cluster,
ImmutableMap.of("command", "purgeOfflineParticipants", "duration", "100000000"), null,
Response.Status.OK.getStatusCode());
//Although the three instances are not in live instances, the timeout is not met, and
// instances will not be dropped by purging action
Assert.assertTrue(accessor.getBaseDataAccessor()
.exists(accessor.keyBuilder().instanceConfig(instance1).getPath(), 0));
Assert.assertTrue(accessor.getBaseDataAccessor()
.exists(accessor.keyBuilder().instanceConfig(instance2).getPath(), 0));
Assert.assertTrue(accessor.getBaseDataAccessor()
.exists(accessor.keyBuilder().instanceConfig(instance3).getPath(), 0));
ClusterConfig configDelta = new ClusterConfig(cluster);
configDelta.getRecord()
.setSimpleField(ClusterConfig.ClusterConfigProperty.OFFLINE_DURATION_FOR_PURGE_MS.name(),
"100");
updateClusterConfigFromRest(cluster, configDelta, Command.update);
//Purge again without customized timeout, and the action will use default timeout value.
post("clusters/" + cluster, ImmutableMap.of("command", "purgeOfflineParticipants"), null,
Response.Status.OK.getStatusCode());
Assert.assertFalse(accessor.getBaseDataAccessor()
.exists(accessor.keyBuilder().instanceConfig(instance1).getPath(), 0));
Assert.assertFalse(accessor.getBaseDataAccessor()
.exists(accessor.keyBuilder().instanceConfig(instance2).getPath(), 0));
Assert.assertFalse(accessor.getBaseDataAccessor()
.exists(accessor.keyBuilder().instanceConfig(instance3).getPath(), 0));
// reset cluster status to previous one
_gSetupTool.addInstanceToCluster(cluster, instance1);
_gSetupTool.addInstanceToCluster(cluster, instance2);
_gSetupTool.addInstanceToCluster(cluster, instance3);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testEnableDisableMaintenanceModeWithCustomFields")
public void testGetStateModelDef() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = "TestCluster_1";
String urlBase = "clusters/TestCluster_1/statemodeldefs/";
Map<String, Object> defMap = getMapResponseFromRest(urlBase);
Assert.assertTrue(defMap.size() == 2);
Assert.assertTrue(defMap.get("stateModelDefinitions") instanceof List);
List<String> stateModelNames = (List<String>) defMap.get("stateModelDefinitions");
Assert.assertEquals(stateModelNames.size(), 7);
String oneModel = stateModelNames.get(1);
String twoModel = stateModelNames.get(2);
String oneModelUri = urlBase + oneModel;
String oneResult = get(oneModelUri, null, Response.Status.OK.getStatusCode(), true);
ZNRecord oneRecord = toZNRecord(oneResult);
String twoResult =
get("clusters/" + cluster + "/statemodeldefs/" + twoModel, null, Response.Status.OK.getStatusCode(), true);
ZNRecord twoRecord = toZNRecord(twoResult);
// delete one, expect success
String deleteOneUri = urlBase + oneRecord.getId();
Response deleteOneRsp = target(deleteOneUri).request().delete();
Assert.assertEquals(deleteOneRsp.getStatus(), Response.Status.OK.getStatusCode());
Response queryRsp = target(oneModelUri).request().get();
Assert.assertTrue(queryRsp.getStatus() == Response.Status.BAD_REQUEST.getStatusCode());
// delete one again, expect success
Response deleteOneRsp2 = target(deleteOneUri).request().delete();
Assert.assertTrue(deleteOneRsp2.getStatus() == Response.Status.OK.getStatusCode());
// create the delete one, expect success
Response createOneRsp = target(oneModelUri).request()
.put(Entity.entity(OBJECT_MAPPER.writeValueAsString(oneRecord), MediaType.APPLICATION_JSON_TYPE));
Assert.assertTrue(createOneRsp.getStatus() == Response.Status.OK.getStatusCode());
// create the delete one again, expect failure
Response createOneRsp2 = target(oneModelUri).request()
.put(Entity.entity(OBJECT_MAPPER.writeValueAsString(oneRecord), MediaType.APPLICATION_JSON_TYPE));
Assert.assertTrue(createOneRsp2.getStatus() == Response.Status.BAD_REQUEST.getStatusCode());
// set the delete one with a modification
ZNRecord newRecord = new ZNRecord(twoRecord, oneRecord.getId());
Response setOneRsp = target(oneModelUri).request()
.post(Entity.entity(OBJECT_MAPPER.writeValueAsString(newRecord), MediaType.APPLICATION_JSON_TYPE));
Assert.assertTrue(setOneRsp.getStatus() == Response.Status.OK.getStatusCode());
String oneResult2 = get(oneModelUri, null, Response.Status.OK.getStatusCode(), true);
ZNRecord oneRecord2 = toZNRecord(oneResult2);
Assert.assertEquals(oneRecord2, newRecord);
// set the delete one with original; namely restore the original condition
Response setOneRsp2 = target(oneModelUri).request()
.post(Entity.entity(OBJECT_MAPPER.writeValueAsString(oneRecord), MediaType.APPLICATION_JSON_TYPE));
Assert.assertTrue(setOneRsp2.getStatus() == Response.Status.OK.getStatusCode());
String oneResult3 = get(oneModelUri, null, Response.Status.OK.getStatusCode(), true);
ZNRecord oneRecord3 = toZNRecord(oneResult3);
Assert.assertEquals(oneRecord3, oneRecord);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetStateModelDef")
public void testActivateSuperCluster() throws Exception {
System.out.println("Start test :" + TestHelper.getTestMethodName());
final String ACTIVATE_SUPER_CLUSTER = "RestSuperClusterActivationTest_SuperCluster";
final String ACTIVATE_NORM_CLUSTER = "RestSuperClusterActivationTest_NormalCluster";
// create testCluster
_gSetupTool.addCluster(ACTIVATE_NORM_CLUSTER, true);
ClusterConfig clusterConfig = new ClusterConfig(ACTIVATE_NORM_CLUSTER);
clusterConfig.setFaultZoneType("helixZoneId");
_configAccessor.setClusterConfig(ACTIVATE_NORM_CLUSTER, clusterConfig);
Set<String> resources = createResourceConfigs(ACTIVATE_NORM_CLUSTER, 8);
// create superCluster
_gSetupTool.addCluster(ACTIVATE_SUPER_CLUSTER,true);
ClusterConfig superClusterConfig = new ClusterConfig(ACTIVATE_SUPER_CLUSTER);
_configAccessor.setClusterConfig(ACTIVATE_SUPER_CLUSTER, superClusterConfig);
Set<String> instances = createInstances(ACTIVATE_SUPER_CLUSTER, 4);
List<ClusterDistributedController> clusterDistributedControllers = new ArrayList<>();
for (String instance : instances) {
ClusterDistributedController controllerParticipant =
new ClusterDistributedController(ZK_ADDR, ACTIVATE_SUPER_CLUSTER, instance);
clusterDistributedControllers.add(controllerParticipant);
controllerParticipant.syncStart();
}
post("clusters/" + ACTIVATE_NORM_CLUSTER,
ImmutableMap.of("command", "activate", "superCluster", ACTIVATE_SUPER_CLUSTER),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.OK .getStatusCode());
HelixDataAccessor accessor = new ZKHelixDataAccessor(ACTIVATE_SUPER_CLUSTER, _baseAccessor);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
final HelixDataAccessor normalAccessor = new ZKHelixDataAccessor(ACTIVATE_NORM_CLUSTER, _baseAccessor);
final PropertyKey.Builder normKeyBuilder = normalAccessor.keyBuilder();
boolean result = TestHelper.verify(() -> {
LiveInstance leader = normalAccessor.getProperty(normKeyBuilder.controllerLeader());
return leader != null;
}, 12000);
Assert.assertTrue(result);
BestPossibleExternalViewVerifier verifier =
new BestPossibleExternalViewVerifier.Builder(ACTIVATE_SUPER_CLUSTER).setZkAddr(ZK_ADDR)
.setZkClient(_gZkClient).build();
Assert.assertTrue(verifier.verifyByPolling());
IdealState idealState = accessor.getProperty(keyBuilder.idealStates(ACTIVATE_NORM_CLUSTER));
Assert.assertEquals(idealState.getRebalanceMode(), IdealState.RebalanceMode.FULL_AUTO);
Assert.assertEquals(idealState.getRebalancerClassName(), WagedRebalancer.class.getName());
// Note, set expected replicas value to 3, as the same value of DEFAULT_SUPERCLUSTER_REPLICA in ClusterAccessor.
Assert.assertEquals(idealState.getReplicas(), "3");
ExternalView externalView = accessor.getProperty(keyBuilder.externalView(ACTIVATE_NORM_CLUSTER));
Map<String, String> extViewMapping = externalView.getRecord().getMapField(ACTIVATE_NORM_CLUSTER);
String superClusterleader = null;
for (Map.Entry<String, String> entry: extViewMapping.entrySet()) {
if (entry.getValue().equals("LEADER")) {
superClusterleader = entry.getKey();
}
}
LiveInstance leader = normalAccessor.getProperty(normKeyBuilder.controllerLeader());
Assert.assertEquals(leader.getId(), superClusterleader);
// deactivate cluster ACTIVATE_NORM_CLUSTER from super cluster ACTIVATE_SUPER_CLUSTER
post("clusters/" + ACTIVATE_NORM_CLUSTER,
ImmutableMap.of("command", "deactivate", "superCluster", ACTIVATE_SUPER_CLUSTER),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.OK .getStatusCode());
idealState = accessor.getProperty(keyBuilder.idealStates(ACTIVATE_NORM_CLUSTER));
Assert.assertNull(idealState);
post("clusters/" + ACTIVATE_NORM_CLUSTER,
ImmutableMap.of("command", "activate", "superCluster", ACTIVATE_SUPER_CLUSTER),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.OK .getStatusCode());
idealState = accessor.getProperty(keyBuilder.idealStates(ACTIVATE_NORM_CLUSTER));
Assert.assertNotNull(idealState);
Assert.assertEquals(idealState.getRebalanceMode(), IdealState.RebalanceMode.FULL_AUTO);
Assert.assertEquals(idealState.getRebalancerClassName(), WagedRebalancer.class.getName());
Assert.assertEquals(idealState.getReplicas(), "3");
// clean up by tearing down controllers and delete clusters
for (ClusterDistributedController dc: clusterDistributedControllers) {
if (dc != null && dc.isConnected()) {
dc.syncStop();
}
}
_gSetupTool.deleteCluster(ACTIVATE_NORM_CLUSTER);
_gSetupTool.deleteCluster(ACTIVATE_SUPER_CLUSTER);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testActivateSuperCluster")
public void testEnableWagedRebalanceForAllResources() {
String cluster = "TestCluster_2";
post("clusters/" + cluster, ImmutableMap.of("command", "enableWagedRebalanceForAllResources"),
Entity.entity("", MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
for (String resource : _gSetupTool.getClusterManagementTool().getResourcesInCluster(cluster)) {
IdealState idealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(cluster, resource);
Assert.assertEquals(idealState.getRebalancerClassName(), WagedRebalancer.class.getName());
}
}
@Test(dependsOnMethods = "testEnableWagedRebalanceForAllResources")
public void testCreateRESTConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
RESTConfig restConfigRest = new RESTConfig(cluster);
restConfigRest.set(RESTConfig.SimpleFields.CUSTOMIZED_HEALTH_URL, "http://*:00");
put("clusters/" + cluster + "/restconfig", null, Entity
.entity(OBJECT_MAPPER.writeValueAsString(restConfigRest.getRecord()),
MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
RESTConfig restConfigZK = _configAccessor.getRESTConfig(cluster);
Assert.assertEquals(restConfigZK, restConfigRest,
"rest config from response: " + restConfigRest + " vs rest config actually: "
+ restConfigZK);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testCreateRESTConfig")
public void testUpdateRESTConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
RESTConfig restConfigRest = new RESTConfig(cluster);
// Update an entry
restConfigRest.set(RESTConfig.SimpleFields.CUSTOMIZED_HEALTH_URL, "http://*:01");
Entity entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(restConfigRest.getRecord()),
MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + cluster + "/restconfig", ImmutableMap.of("command", Command.update.name()),
entity, Response.Status.OK.getStatusCode());
RESTConfig restConfigZK = _configAccessor.getRESTConfig(cluster);
Assert.assertEquals(restConfigZK, restConfigRest,
"rest config from response: " + restConfigRest + " vs rest config actually: "
+ restConfigZK);
// Delete an entry
restConfigRest.set(RESTConfig.SimpleFields.CUSTOMIZED_HEALTH_URL, null);
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(restConfigRest.getRecord()),
MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + cluster + "/restconfig", ImmutableMap.of("command", Command.delete.name()),
entity, Response.Status.OK.getStatusCode());
restConfigZK = _configAccessor.getRESTConfig(cluster);
Assert.assertEquals(restConfigZK, new RESTConfig(cluster),
"rest config from response: " + new RESTConfig(cluster) + " vs rest config actually: "
+ restConfigZK);
// Update a cluster rest config that the cluster does not exist
String wrongClusterId = "wrong_cluster_id";
restConfigRest = new RESTConfig(wrongClusterId);
restConfigRest.set(RESTConfig.SimpleFields.CUSTOMIZED_HEALTH_URL, "http://*:01");
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(restConfigRest.getRecord()),
MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + wrongClusterId + "/restconfig",
ImmutableMap.of("command", Command.update.name()), entity,
Response.Status.NOT_FOUND.getStatusCode());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testUpdateRESTConfig")
public void testDeleteRESTConfig() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
delete("clusters/" + cluster + "/restconfig", Response.Status.OK.getStatusCode());
get("clusters/" + cluster + "/restconfig", null, Response.Status.NOT_FOUND.getStatusCode(), true);
delete("clusters/" + cluster + "/restconfig", Response.Status.OK.getStatusCode());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testActivateSuperCluster")
public void testAddClusterWithCloudConfig() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
ZNRecord record = new ZNRecord("testZnode");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.AZURE.name());
Map<String, String> map = new HashMap<>();
map.put("addCloudConfig", "true");
put("clusters/" + clusterName, map,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestCloudID");
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.AZURE.name());
ClusterConfig clusterConfigFromZk = _configAccessor.getClusterConfig(clusterName);
Assert.assertEquals(clusterConfigFromZk.getTopology(), AzureConstants.AZURE_TOPOLOGY);
Assert.assertEquals(clusterConfigFromZk.getFaultZoneType(), AzureConstants.AZURE_FAULT_ZONE_TYPE);
Assert.assertTrue(clusterConfigFromZk.isTopologyAwareEnabled());
}
@Test(dependsOnMethods = "testAddClusterWithCloudConfig")
public void testAddClusterWithInvalidCloudConfig() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
ZNRecord record = new ZNRecord("testZnode");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
Map<String, String> map = new HashMap<>();
map.put("addCloudConfig", "true");
// Cloud Provider has not been defined. Result of this rest call will be BAD_REQUEST.
put("clusters/" + clusterName, map,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode());
}
@Test(dependsOnMethods = "testAddClusterWithInvalidCloudConfig")
public void testAddClusterWithInvalidCustomizedCloudConfig() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
ZNRecord record = new ZNRecord("testZnode");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.CUSTOMIZED.name());
Map<String, String> map = new HashMap<>();
map.put("addCloudConfig", "true");
// Cloud Provider is customized. CLOUD_INFO_PROCESSOR_NAME and CLOUD_INFO_SOURCE fields are
// required.
put("clusters/" + clusterName, map,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode());
}
@Test(dependsOnMethods = "testAddClusterWithInvalidCustomizedCloudConfig")
public void testAddClusterWithValidCustomizedCloudConfig() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
ZNRecord record = new ZNRecord("testZnode");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.CUSTOMIZED.name());
List<String> sourceList = new ArrayList<String>();
sourceList.add("TestURL");
record.setListField(CloudConfig.CloudConfigProperty.CLOUD_INFO_SOURCE.name(), sourceList);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_INFO_PROCESSOR_NAME.name(),
"TestProcessorName");
Map<String, String> map = new HashMap<>();
map.put("addCloudConfig", "true");
// Cloud Provider is customized. CLOUD_INFO_PROCESSOR_NAME and CLOUD_INFO_SOURCE fields are
// required.
put("clusters/" + clusterName, map,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestCloudID");
List<String> listUrlFromZk = cloudConfigFromZk.getCloudInfoSources();
Assert.assertEquals(listUrlFromZk.get(0), "TestURL");
Assert.assertEquals(cloudConfigFromZk.getCloudInfoProcessorName(), "TestProcessorName");
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.CUSTOMIZED.name());
}
@Test(dependsOnMethods = "testAddClusterWithValidCustomizedCloudConfig")
public void testAddClusterWithCloudConfigDisabledCloud() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
ZNRecord record = new ZNRecord("testZnode");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), false);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.AZURE.name());
Map<String, String> map = new HashMap<>();
map.put("addCloudConfig", "true");
put("clusters/" + clusterName, map,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertFalse(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestCloudID");
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.AZURE.name());
}
@Test(dependsOnMethods = "testAddClusterWithCloudConfigDisabledCloud")
public void testAddCloudConfigNonExistedCluster() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String urlBase = "clusters/TestCloud/cloudconfig/";
ZNRecord record = new ZNRecord("TestCloud");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.AZURE.name());
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
List<String> testList = new ArrayList<String>();
testList.add("TestURL");
record.setListField(CloudConfig.CloudConfigProperty.CLOUD_INFO_SOURCE.name(), testList);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_INFO_PROCESSOR_NAME.name(),
"TestProcessor");
// Not found since the cluster is not setup yet.
put(urlBase, null,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.NOT_FOUND.getStatusCode());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testAddCloudConfigNonExistedCluster")
public void testAddCloudConfig() throws Exception {
System.out.println("Start test :" + TestHelper.getTestMethodName());
_gSetupTool.addCluster("TestCloud", true);
String urlBase = "clusters/TestCloud/cloudconfig/";
ZNRecord record = new ZNRecord("TestCloud");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.CUSTOMIZED.name());
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
List<String> testList = new ArrayList<String>();
testList.add("TestURL");
record.setListField(CloudConfig.CloudConfigProperty.CLOUD_INFO_SOURCE.name(), testList);
// Bad request since Processor has not been defined.
put(urlBase, null,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode());
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_INFO_PROCESSOR_NAME.name(),
"TestProcessorName");
// Now response should be OK since all fields are set
put(urlBase, null,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig("TestCloud");
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestCloudID");
List<String> listUrlFromZk = cloudConfigFromZk.getCloudInfoSources();
Assert.assertEquals(listUrlFromZk.get(0), "TestURL");
Assert.assertEquals(cloudConfigFromZk.getCloudInfoProcessorName(), "TestProcessorName");
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.CUSTOMIZED.name());
// Now test the getCloudConfig method.
String body = get(urlBase, null, Response.Status.OK.getStatusCode(), true);
ZNRecord recordFromRest = toZNRecord(body);
CloudConfig cloudConfigRest = new CloudConfig.Builder(recordFromRest).build();
CloudConfig cloudConfigZk = _configAccessor.getCloudConfig("TestCloud");
// Check that the CloudConfig from Zk and REST get method are equal
Assert.assertEquals(cloudConfigRest, cloudConfigZk);
// Check the fields individually
Assert.assertTrue(cloudConfigRest.isCloudEnabled());
Assert.assertEquals(cloudConfigRest.getCloudID(), "TestCloudID");
Assert.assertEquals(cloudConfigRest.getCloudProvider(), CloudProvider.CUSTOMIZED.name());
List<String> listUrlFromRest = cloudConfigRest.getCloudInfoSources();
Assert.assertEquals(listUrlFromRest.get(0), "TestURL");
Assert.assertEquals(cloudConfigRest.getCloudInfoProcessorName(), "TestProcessorName");
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testAddCloudConfig")
public void testDeleteCloudConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
ZNRecord record = new ZNRecord("testZnode");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.AZURE.name());
Map<String, String> map = new HashMap<>();
map.put("addCloudConfig", "true");
put("clusters/" + clusterName, map,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// Read CloudConfig from Zookeeper and make sure it has been created
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertNotNull(cloudConfigFromZk);
String urlBase = "clusters/" + clusterName + "/cloudconfig/";
delete(urlBase, Response.Status.OK.getStatusCode());
// Read CloudConfig from Zookeeper and make sure it has been removed
_configAccessor = new ConfigAccessor(ZK_ADDR);
cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertNull(cloudConfigFromZk);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testDeleteCloudConfig")
public void testPartialDeleteCloudConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
ZNRecord record = new ZNRecord(clusterName);
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.AZURE.name());
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_INFO_PROCESSOR_NAME.name(), "TestProcessor");
_gSetupTool.addCluster(clusterName, true, new CloudConfig.Builder(record).build());
String urlBase = "clusters/" + clusterName +"/cloudconfig/";
Map<String, String> map = new HashMap<>();
map.put("addCloudConfig", "true");
put("clusters/" + clusterName, map,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// Read CloudConfig from Zookeeper and make sure it has been created
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertNotNull(cloudConfigFromZk);
record = new ZNRecord(clusterName);
Map<String, String> map1 = new HashMap<>();
map1.put("command", Command.delete.name());
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(), CloudProvider.AZURE.name());
post(urlBase, map1, Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Read CloudConfig from Zookeeper and make sure it has been removed
_configAccessor = new ConfigAccessor(ZK_ADDR);
cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertNull(cloudConfigFromZk.getCloudID());
Assert.assertNull(cloudConfigFromZk.getCloudProvider());
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudInfoProcessorName(),"TestProcessor");
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testPartialDeleteCloudConfig")
public void testUpdateCloudConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
_gSetupTool.addCluster("TestCloud", true);
String urlBase = "clusters/TestCloud/cloudconfig/";
ZNRecord record = new ZNRecord("TestCloud");
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudID");
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.AZURE.name());
// Fist add CloudConfig to the cluster
put(urlBase, null,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Now get the Cloud Config and make sure the information is correct
String body = get(urlBase, null, Response.Status.OK.getStatusCode(), true);
ZNRecord recordFromRest = toZNRecord(body);
CloudConfig cloudConfigRest = new CloudConfig.Builder(recordFromRest).build();
Assert.assertTrue(cloudConfigRest.isCloudEnabled());
Assert.assertEquals(cloudConfigRest.getCloudID(), "TestCloudID");
Assert.assertEquals(cloudConfigRest.getCloudProvider(), CloudProvider.AZURE.name());
// Now put new information in the ZNRecord
record.setBooleanField(CloudConfig.CloudConfigProperty.CLOUD_ENABLED.name(), true);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_PROVIDER.name(),
CloudProvider.CUSTOMIZED.name());
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_ID.name(), "TestCloudIdNew");
List<String> testList = new ArrayList<String>();
testList.add("TestURL");
record.setListField(CloudConfig.CloudConfigProperty.CLOUD_INFO_SOURCE.name(), testList);
record.setSimpleField(CloudConfig.CloudConfigProperty.CLOUD_INFO_PROCESSOR_NAME.name(),
"TestProcessorName");
Map<String, String> map1 = new HashMap<>();
map1.put("command", AbstractResource.Command.update.name());
post(urlBase, map1,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Now get the Cloud Config and make sure the information has been updated
body = get(urlBase, null, Response.Status.OK.getStatusCode(), true);
recordFromRest = toZNRecord(body);
cloudConfigRest = new CloudConfig.Builder(recordFromRest).build();
Assert.assertTrue(cloudConfigRest.isCloudEnabled());
Assert.assertEquals(cloudConfigRest.getCloudID(), "TestCloudIdNew");
Assert.assertEquals(cloudConfigRest.getCloudProvider(), CloudProvider.CUSTOMIZED.name());
List<String> listUrlFromRest = cloudConfigRest.getCloudInfoSources();
Assert.assertEquals(listUrlFromRest.get(0), "TestURL");
Assert.assertEquals(cloudConfigRest.getCloudInfoProcessorName(), "TestProcessorName");
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testUpdateCloudConfig")
public void testAddCustomizedConfigNonExistedCluster() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String urlBase = "clusters/TestCluster/customizedstateconfig/";
ZNRecord record = new ZNRecord("TestCustomizedStateConfig");
List<String> testList = new ArrayList<String>();
testList.add("mockType1");
record.setListField(
CustomizedStateConfig.CustomizedStateProperty.AGGREGATION_ENABLED_TYPES
.name(),
testList);
// Expecting not found response since the cluster is not setup yet.
put(urlBase, null,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.NOT_FOUND.getStatusCode());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testAddCustomizedConfigNonExistedCluster")
public void testAddCustomizedConfig() throws Exception {
System.out.println("Start test :" + TestHelper.getTestMethodName());
_gSetupTool.addCluster("TestClusterCustomized", true);
String urlBase = "clusters/TestClusterCustomized/customized-state-config/";
ZNRecord record = new ZNRecord("TestCustomizedStateConfig");
List<String> testList = new ArrayList<String>();
testList.add("mockType1");
testList.add("mockType2");
record.setListField(
CustomizedStateConfig.CustomizedStateProperty.AGGREGATION_ENABLED_TYPES
.name(),
testList);
put(urlBase, null,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Read CustomizedStateConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig customizedConfigFromZk = _configAccessor.getCustomizedStateConfig("TestClusterCustomized");
List<String> listTypesFromZk = customizedConfigFromZk.getAggregationEnabledTypes();
Assert.assertEquals(listTypesFromZk.get(0), "mockType1");
Assert.assertEquals(listTypesFromZk.get(1), "mockType2");
// Now test the getCustomizedStateConfig method.
String body = get(urlBase, null, Response.Status.OK.getStatusCode(), true);
ZNRecord recordFromRest = toZNRecord(body);
CustomizedStateConfig customizedConfigRest = new CustomizedStateConfig.Builder(recordFromRest).build();
CustomizedStateConfig customizedConfigZk = _configAccessor.getCustomizedStateConfig("TestClusterCustomized");
// Check that the CustomizedStateConfig from Zk and REST get method are equal
Assert.assertEquals(customizedConfigRest, customizedConfigZk);
// Check the fields individually
List<String> listUrlFromRest = customizedConfigRest.getAggregationEnabledTypes();
Assert.assertEquals(listUrlFromRest.get(0), "mockType1");
Assert.assertEquals(listUrlFromRest.get(1), "mockType2");
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testAddCustomizedConfig")
public void testDeleteCustomizedConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
_gSetupTool.addCluster("TestClusterCustomized", true);
String urlBase = "clusters/TestClusterCustomized/customized-state-config/";
ZNRecord record = new ZNRecord("TestCustomizedStateConfig");
List<String> testList = new ArrayList<String>();
testList.add("mockType1");
record.setListField(
CustomizedStateConfig.CustomizedStateProperty.AGGREGATION_ENABLED_TYPES
.name(),
testList);
put(urlBase, null,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Read CustomizedStateConfig from Zookeeper and make sure it exists
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig customizedConfigFromZk = _configAccessor.getCustomizedStateConfig("TestClusterCustomized");
Assert.assertNotNull(customizedConfigFromZk);
delete(urlBase, Response.Status.OK.getStatusCode());
customizedConfigFromZk = _configAccessor.getCustomizedStateConfig("TestClusterCustomized");
Assert.assertNull(customizedConfigFromZk);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testDeleteCustomizedConfig")
public void testUpdateCustomizedConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
_gSetupTool.addCluster("TestClusterCustomized", true);
String urlBase = "clusters/TestClusterCustomized/customized-state-config/";
ZNRecord record = new ZNRecord("TestCustomizedStateConfig");
List<String> testList = new ArrayList<String>();
testList.add("mockType1");
record.setListField(
CustomizedStateConfig.CustomizedStateProperty.AGGREGATION_ENABLED_TYPES
.name(),
testList);
put(urlBase, null,
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Read CustomizedStateConfig from Zookeeper and make sure it exists
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig customizedConfigFromZk = _configAccessor.getCustomizedStateConfig("TestClusterCustomized");
Assert.assertNotNull(customizedConfigFromZk);
// Add new type to CustomizedStateConfig
Map<String, String> map1 = new HashMap<>();
map1.put("command", Command.add.name());
map1.put("type", "mockType2");
post(urlBase, map1, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
customizedConfigFromZk =
_configAccessor.getCustomizedStateConfig("TestClusterCustomized");
List<String> listTypesFromZk = customizedConfigFromZk.getAggregationEnabledTypes();
Assert.assertEquals(listTypesFromZk.get(0), "mockType1");
Assert.assertEquals(listTypesFromZk.get(1), "mockType2");
// Remove a type to CustomizedStateConfig
Map<String, String> map2 = new HashMap<>();
map2.put("command", Command.delete.name());
map2.put("type", "mockType1");
post(urlBase, map2, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
customizedConfigFromZk =
_configAccessor.getCustomizedStateConfig("TestClusterCustomized");
listTypesFromZk = customizedConfigFromZk.getAggregationEnabledTypes();
Assert.assertEquals(listTypesFromZk.get(0), "mockType2");
Assert.assertFalse(listTypesFromZk.contains("mockType1"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testUpdateCustomizedConfig")
public void testOnDemandRebalance() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
long currentTime = System.currentTimeMillis();
String cluster = "TestCluster_1";
new JerseyUriRequestBuilder("clusters/{}?command=onDemandRebalance").format(cluster)
.post(this, Entity.entity("{}", MediaType.APPLICATION_JSON_TYPE));
ClusterConfig config = _configAccessor.getClusterConfig(cluster);
long lastOnDemandRebalanceTime = config.getLastOnDemandRebalanceTimestamp();
Assert.assertFalse(lastOnDemandRebalanceTime == -1L,
"The last on-demand rebalance timestamp is not found.");
Assert.assertTrue(lastOnDemandRebalanceTime > currentTime, String.format(
"The last on-demand rebalance timestamp {} is stale. Expect a timestamp that is larger than {}.",
lastOnDemandRebalanceTime, currentTime));
// restore the state
config.setLastOnDemandRebalanceTimestamp(-1L);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test
public void testClusterFreezeMode() throws Exception {
String cluster = "TestCluster_0";
HelixDataAccessor dataAccessor =
new ZKHelixDataAccessor(cluster, new ZkBaseDataAccessor<>(_gZkClient));
// Pause not existed
Assert.assertNull(dataAccessor.getProperty(dataAccessor.keyBuilder().pause()));
String endpoint = "clusters/" + cluster + "/management-mode";
// Set cluster pause mode
ClusterManagementModeRequest request = ClusterManagementModeRequest.newBuilder()
.withMode(ClusterManagementMode.Type.CLUSTER_FREEZE)
.withClusterName(cluster).build();
String payload = OBJECT_MAPPER.writeValueAsString(request);
post(endpoint, null, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
PauseSignal pauseSignal = dataAccessor.getProperty(dataAccessor.keyBuilder().pause());
Assert.assertNotNull(pauseSignal);
Assert.assertTrue(pauseSignal.isClusterPause());
Assert.assertFalse(pauseSignal.getCancelPendingST());
// Wait until cluster status is persisted and equals CLUSTER_FREEZE
TestHelper.verify(() -> {
ClusterStatus clusterStatus =
dataAccessor.getProperty(dataAccessor.keyBuilder().clusterStatus());
return clusterStatus != null
&& clusterStatus.getManagementMode() == ClusterManagementMode.Type.CLUSTER_FREEZE;
}, TestHelper.WAIT_DURATION);
// Verify get cluster status GET request
String body = get(endpoint, null, Response.Status.OK.getStatusCode(), true);
Map<String, Object> responseMap = OBJECT_MAPPER.readerFor(Map.class).readValue(body);
Assert.assertEquals(responseMap.get("mode"), ClusterManagementMode.Type.CLUSTER_FREEZE.name());
// Depending on timing, it could IN_PROGRESS or COMPLETED.
// It's just to verify the rest response format is correct
String status = (String) responseMap.get("status");
Assert.assertTrue(ClusterManagementMode.Status.IN_PROGRESS.name().equals(status)
|| ClusterManagementMode.Status.COMPLETED.name().equals(status));
body = get(endpoint, ImmutableMap.of("showDetails", "true"), Response.Status.OK.getStatusCode(),
true);
responseMap = OBJECT_MAPPER.readerFor(Map.class).readValue(body);
Map<String, Object> detailsMap = (Map<String, Object>) responseMap.get("details");
status = (String) responseMap.get("status");
Assert.assertEquals(responseMap.get("cluster"), cluster);
Assert.assertEquals(responseMap.get("mode"), ClusterManagementMode.Type.CLUSTER_FREEZE.name());
Assert.assertEquals(responseMap.get("status"), status);
Assert.assertTrue(responseMap.containsKey("details"));
Assert.assertTrue(detailsMap.containsKey("cluster"));
Assert.assertTrue(detailsMap.containsKey("liveInstances"));
// set normal mode
request = ClusterManagementModeRequest.newBuilder()
.withMode(ClusterManagementMode.Type.NORMAL)
.withClusterName(cluster)
.build();
payload = OBJECT_MAPPER.writeValueAsString(request);
post(endpoint, null, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Pause signal is deleted
pauseSignal = dataAccessor.getProperty(dataAccessor.keyBuilder().pause());
Assert.assertNull(pauseSignal);
}
private ClusterConfig getClusterConfigFromRest(String cluster) throws IOException {
String body = get("clusters/" + cluster + "/configs", null, Response.Status.OK.getStatusCode(), true);
ZNRecord record = toZNRecord(body);
ClusterConfig clusterConfigRest = new ClusterConfig(record);
ClusterConfig clusterConfigZk = _configAccessor.getClusterConfig(cluster);
Assert.assertEquals(clusterConfigZk, clusterConfigRest, "cluster config from response: "
+ clusterConfigRest + " vs cluster config actually: " + clusterConfigZk);
return clusterConfigRest;
}
private void updateClusterConfigFromRest(String cluster, ClusterConfig newConfig, Command command)
throws IOException {
_auditLogger.clearupLogs();
Entity entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(newConfig.getRecord()),
MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + cluster + "/configs", ImmutableMap.of("command", command.name()), entity,
Response.Status.OK.getStatusCode());
validateAuditLogSize(1);
AuditLog auditLog = _auditLogger.getAuditLogs().get(0);
validateAuditLog(auditLog, HTTPMethods.POST.name(), "clusters/" + cluster + "/configs",
Response.Status.OK.getStatusCode(), null);
}
private void validateAuditLogSize(int expected) {
Assert.assertEquals(_auditLogger.getAuditLogs().size(), expected,
"AuditLog:" + _auditLogger.getAuditLogs().toString());
}
private ClusterConfig createClusterConfig(String cluster) {
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(cluster);
clusterConfig.setPersistBestPossibleAssignment(true);
clusterConfig.getRecord().setSimpleField("SimpleField1", "Value1");
clusterConfig.getRecord().setSimpleField("SimpleField2", "Value2");
clusterConfig.getRecord().setListField("ListField1",
Arrays.asList("Value1", "Value2", "Value3"));
clusterConfig.getRecord().setListField("ListField2",
Arrays.asList("Value2", "Value1", "Value3"));
clusterConfig.getRecord().setMapField("MapField1", new HashMap<String, String>() {
{
put("key1", "value1");
put("key2", "value2");
}
});
clusterConfig.getRecord().setMapField("MapField2", new HashMap<String, String>() {
{
put("key3", "value1");
put("key4", "value2");
}
});
return clusterConfig;
}
private void validateAuditLog(AuditLog auditLog, String httpMethod, String requestPath,
int statusCode, String responseEntity) {
Assert.assertEquals(auditLog.getHttpMethod(), httpMethod);
Assert.assertNotNull(auditLog.getClientIP());
Assert.assertNotNull(auditLog.getClientHostPort());
Assert.assertNotNull(auditLog.getCompleteTime());
Assert.assertNotNull(auditLog.getStartTime());
Assert.assertEquals(auditLog.getNamespace(), HelixRestNamespace.DEFAULT_NAMESPACE_NAME);
Assert.assertEquals(auditLog.getRequestPath(), requestPath);
Assert.assertEquals(auditLog.getResponseCode(), statusCode);
Assert.assertEquals(auditLog.getResponseEntity(), responseEntity);
}
private Map<String, Object> getMapResponseFromRest(String uri) throws JsonProcessingException {
String response = get(uri, null, Response.Status.OK.getStatusCode(), true);
return OBJECT_MAPPER.readValue(response, new TypeReference<HashMap<String, Object>>() { });
}
}
| 9,309 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestPerInstanceAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.TestHelper;
import org.apache.helix.constants.InstanceConstants;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.Message;
import org.apache.helix.rest.server.resources.AbstractResource;
import org.apache.helix.rest.server.resources.helix.InstancesAccessor;
import org.apache.helix.rest.server.resources.helix.PerInstanceAccessor;
import org.apache.helix.rest.server.util.JerseyUriRequestBuilder;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestPerInstanceAccessor extends AbstractTestClass {
private final static String CLUSTER_NAME = "TestCluster_0";
private final static String INSTANCE_NAME = CLUSTER_NAME + "localhost_12918";
@Test
public void testIsInstanceStoppable() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
Map<String, String> params = ImmutableMap.of("client", "espresso");
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(params), MediaType.APPLICATION_JSON_TYPE);
Response response = new JerseyUriRequestBuilder(
"clusters/{}/instances/{}/stoppable?skipHealthCheckCategories=CUSTOM_INSTANCE_CHECK,CUSTOM_PARTITION_CHECK").format(
STOPPABLE_CLUSTER, "instance1").post(this, entity);
String stoppableCheckResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(stoppableCheckResult, Map.class);
List<String> failedChecks =
Arrays.asList("HELIX:EMPTY_RESOURCE_ASSIGNMENT", "HELIX:INSTANCE_NOT_ENABLED",
"HELIX:INSTANCE_NOT_STABLE");
Map<String, Object> expectedMap =
ImmutableMap.of("stoppable", false, "failedChecks", failedChecks);
Assert.assertEquals(actualMap, expectedMap);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testIsInstanceStoppable")
public void testTakeInstanceNegInput() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
post("clusters/TestCluster_0/instances/instance1/takeInstance", null,
Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode(), true);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceNegInput")
public void testTakeInstanceNegInput2() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/takeInstance")
.format(STOPPABLE_CLUSTER, "instance1").post(this, Entity.entity("{}", MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
List<String> errorMsg = Arrays.asList("Invalid input. Please provide at least one health check or operation.");
Map<String, Object> expectedMap =
ImmutableMap.of("successful", false, "messages", errorMsg, "operationResult", "");
Assert.assertEquals(actualMap, expectedMap);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceNegInput2")
public void testTakeInstanceHealthCheck() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String payload = "{ \"health_check_list\" : [\"HelixInstanceStoppableCheck\", \"CustomInstanceStoppableCheck\"],"
+ "\"health_check_config\" : { \"client\" : \"espresso\" }} ";
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/takeInstance")
.format(STOPPABLE_CLUSTER, "instance1").post(this, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
List<String> errorMsg = Arrays
.asList("HELIX:EMPTY_RESOURCE_ASSIGNMENT", "HELIX:INSTANCE_NOT_ENABLED",
"HELIX:INSTANCE_NOT_STABLE");
Map<String, Object> expectedMap =
ImmutableMap.of("successful", false, "messages", errorMsg, "operationResult", "");
Assert.assertEquals(actualMap, expectedMap);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceNegInput2")
public void testTakeInstanceNonBlockingCheck() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String payload = "{ \"health_check_list\" : [\"HelixInstanceStoppableCheck\"],"
+ "\"health_check_config\" : { \"client\" : \"espresso\" , "
+ "\"continueOnFailures\" : [\"HELIX:EMPTY_RESOURCE_ASSIGNMENT\", \"HELIX:INSTANCE_NOT_ENABLED\","
+ " \"HELIX:INSTANCE_NOT_STABLE\"]} } ";
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/takeInstance")
.format(STOPPABLE_CLUSTER, "instance1").post(this, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
List<String> errorMsg = Arrays
.asList("HELIX:EMPTY_RESOURCE_ASSIGNMENT", "HELIX:INSTANCE_NOT_ENABLED",
"HELIX:INSTANCE_NOT_STABLE");
Map<String, Object> expectedMap =
ImmutableMap.of("successful", true, "messages", errorMsg, "operationResult", "");
Assert.assertEquals(actualMap, expectedMap);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceHealthCheck")
public void testTakeInstanceOperationSuccess() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String payload =
"{ \"operation_list\" : [\"org.apache.helix.rest.server.TestOperationImpl\"]} ";
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/takeInstance")
.format(STOPPABLE_CLUSTER, "instance1")
.post(this, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
Map<String, Object> expectedMap = ImmutableMap
.of("successful", true, "messages", new ArrayList<>(), "operationResult", "DummyTakeOperationResult");
Assert.assertEquals(actualMap, expectedMap);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceOperationSuccess")
public void testFreeInstanceOperationSuccess() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String payload =
"{ \"operation_list\" : [\"org.apache.helix.rest.server.TestOperationImpl\"]} ";
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/freeInstance")
.format(STOPPABLE_CLUSTER, "instance1")
.post(this, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
Map<String, Object> expectedMap = ImmutableMap
.of("successful", true, "messages", new ArrayList<>(), "operationResult",
"DummyFreeOperationResult");
Assert.assertEquals(actualMap, expectedMap);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testFreeInstanceOperationSuccess")
public void testTakeInstanceOperationCheckFailure() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String payload = "{ \"operation_list\" : [\"org.apache.helix.rest.server.TestOperationImpl\"],"
+ "\"operation_config\": { \"org.apache.helix.rest.server.TestOperationImpl\" :"
+ " {\"instance0\": true, \"instance2\": true, "
+ "\"instance3\": true, \"instance4\": true, \"instance5\": true, "
+ " \"value\" : \"i001\", \"list_value\" : [\"list1\"]}} } ";
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/takeInstance")
.format(STOPPABLE_CLUSTER, "instance0")
.post(this, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
Assert.assertFalse((boolean)actualMap.get("successful"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceOperationCheckFailure")
public void testTakeInstanceOperationCheckFailureCommonInput() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String payload = "{ \"operation_list\" : [\"org.apache.helix.rest.server.TestOperationImpl\"],"
+ "\"operation_config\": { \"OperationConfigSharedInput\" :"
+ " {\"instance0\": true, \"instance2\": true, "
+ "\"instance3\": true, \"instance4\": true, \"instance5\": true, "
+ " \"value\" : \"i001\", \"list_value\" : [\"list1\"]}}} ";
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/takeInstance")
.format(STOPPABLE_CLUSTER, "instance0")
.post(this, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
Assert.assertFalse((boolean)actualMap.get("successful"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceOperationCheckFailureCommonInput")
public void testTakeInstanceOperationCheckFailureNonBlocking() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String payload = "{ \"operation_list\" : [\"org.apache.helix.rest.server.TestOperationImpl\"],"
+ "\"operation_config\": { \"org.apache.helix.rest.server.TestOperationImpl\" : "
+ "{\"instance0\": true, \"instance2\": true, "
+ "\"instance3\": true, \"instance4\": true, \"instance5\": true, "
+ "\"continueOnFailures\" : true} } } ";
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/takeInstance")
.format(STOPPABLE_CLUSTER, "instance0")
.post(this, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
System.out.println("testTakeInstanceOperationCheckFailureNonBlocking" + takeInstanceResult);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
Assert.assertTrue((boolean)actualMap.get("successful"));
Assert.assertEquals(actualMap.get("operationResult"), "DummyTakeOperationResult");
// The non blocking test should generate msg but won't return failure status
Assert.assertFalse(actualMap.get("messages").equals("[]"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceOperationCheckFailureNonBlocking")
public void testTakeInstanceCheckOnly() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String payload = "{ \"operation_list\" : [\"org.apache.helix.rest.server.TestOperationImpl\"],"
+ "\"operation_config\": {\"performOperation\": false} } ";
Response response = new JerseyUriRequestBuilder("clusters/{}/instances/{}/takeInstance")
.format(STOPPABLE_CLUSTER, "instance1")
.post(this, Entity.entity(payload, MediaType.APPLICATION_JSON_TYPE));
String takeInstanceResult = response.readEntity(String.class);
Map<String, Object> actualMap = OBJECT_MAPPER.readValue(takeInstanceResult, Map.class);
Assert.assertTrue((boolean)actualMap.get("successful"));
Assert.assertTrue(actualMap.get("operationResult").equals(""));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testTakeInstanceCheckOnly")
public void testGetAllMessages() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String testInstance = CLUSTER_NAME + "localhost_12926"; //Non-live instance
String messageId = "msg1";
Message message = new Message(Message.MessageType.STATE_TRANSITION, messageId);
message.setStateModelDef("MasterSlave");
message.setFromState("OFFLINE");
message.setToState("SLAVE");
message.setResourceName("testResourceName");
message.setPartitionName("testResourceName_1");
message.setTgtName("localhost_3");
message.setTgtSessionId("session_3");
HelixDataAccessor helixDataAccessor = new ZKHelixDataAccessor(CLUSTER_NAME, _baseAccessor);
helixDataAccessor.setProperty(helixDataAccessor.keyBuilder().message(testInstance, messageId), message);
String body = new JerseyUriRequestBuilder("clusters/{}/instances/{}/messages").isBodyReturnExpected(true).format(CLUSTER_NAME, testInstance).get(this);
JsonNode node = OBJECT_MAPPER.readTree(body);
int newMessageCount =
node.get(PerInstanceAccessor.PerInstanceProperties.total_message_count.name()).intValue();
Assert.assertEquals(newMessageCount, 1);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetAllMessages")
public void testGetMessagesByStateModelDef() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String testInstance = CLUSTER_NAME + "localhost_12926"; //Non-live instance
String messageId = "msg1";
Message message = new Message(Message.MessageType.STATE_TRANSITION, messageId);
message.setStateModelDef("MasterSlave");
message.setFromState("OFFLINE");
message.setToState("SLAVE");
message.setResourceName("testResourceName");
message.setPartitionName("testResourceName_1");
message.setTgtName("localhost_3");
message.setTgtSessionId("session_3");
HelixDataAccessor helixDataAccessor = new ZKHelixDataAccessor(CLUSTER_NAME, _baseAccessor);
helixDataAccessor.setProperty(helixDataAccessor.keyBuilder().message(testInstance, messageId),
message);
String body =
new JerseyUriRequestBuilder("clusters/{}/instances/{}/messages?stateModelDef=MasterSlave")
.isBodyReturnExpected(true).format(CLUSTER_NAME, testInstance).get(this);
JsonNode node = OBJECT_MAPPER.readTree(body);
int newMessageCount =
node.get(PerInstanceAccessor.PerInstanceProperties.total_message_count.name()).intValue();
Assert.assertEquals(newMessageCount, 1);
body =
new JerseyUriRequestBuilder("clusters/{}/instances/{}/messages?stateModelDef=LeaderStandBy")
.isBodyReturnExpected(true).format(CLUSTER_NAME, testInstance).get(this);
node = OBJECT_MAPPER.readTree(body);
newMessageCount =
node.get(PerInstanceAccessor.PerInstanceProperties.total_message_count.name()).intValue();
Assert.assertEquals(newMessageCount, 0);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetMessagesByStateModelDef")
public void testGetAllInstances() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = new JerseyUriRequestBuilder("clusters/{}/instances").isBodyReturnExpected(true)
.format(CLUSTER_NAME).get(this);
JsonNode node = OBJECT_MAPPER.readTree(body);
String instancesStr = node.get(InstancesAccessor.InstancesProperties.instances.name()).toString();
Assert.assertNotNull(instancesStr);
Set<String> instances = OBJECT_MAPPER.readValue(instancesStr,
OBJECT_MAPPER.getTypeFactory().constructCollectionType(Set.class, String.class));
Assert.assertEquals(instances, _instancesMap.get(CLUSTER_NAME), "Instances from response: "
+ instances + " vs instances actually: " + _instancesMap.get(CLUSTER_NAME));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetAllInstances")
public void testGetInstanceById() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = new JerseyUriRequestBuilder("clusters/{}/instances/{}").isBodyReturnExpected(true)
.format(CLUSTER_NAME, INSTANCE_NAME).get(this);
JsonNode node = OBJECT_MAPPER.readTree(body);
String instancesCfg = node.get(PerInstanceAccessor.PerInstanceProperties.config.name()).toString();
Assert.assertNotNull(instancesCfg);
boolean isHealth = node.get("health").booleanValue();
Assert.assertFalse(isHealth);
InstanceConfig instanceConfig = new InstanceConfig(toZNRecord(instancesCfg));
Assert.assertEquals(instanceConfig,
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetInstanceById")
public void testAddInstance() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
InstanceConfig instanceConfig = new InstanceConfig(INSTANCE_NAME + "TEST");
Entity entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(instanceConfig.getRecord()),
MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}").format(CLUSTER_NAME, INSTANCE_NAME)
.put(this, entity);
Assert.assertEquals(instanceConfig,
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME + "TEST"));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testAddInstance", expectedExceptions = HelixException.class)
public void testDeleteInstance() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
delete("clusters/" + CLUSTER_NAME + "/instances/" + INSTANCE_NAME + "TEST",
Response.Status.OK.getStatusCode());
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME + "TEST");
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testDeleteInstance")
public void updateInstance() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Disable instance
Entity entity = Entity.entity("", MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder(
"clusters/{}/instances/{}?command=disable&instanceDisabledReason=reason1")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
Assert.assertFalse(
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getInstanceEnabled());
Assert.assertEquals(
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getInstanceDisabledType(),
InstanceConstants.InstanceDisabledType.DEFAULT_INSTANCE_DISABLE_TYPE.toString());
Assert.assertEquals(
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getInstanceDisabledReason(),
"reason1");
// Enable instance
new JerseyUriRequestBuilder(
"clusters/{}/instances/{}?command=enable&instanceDisabledType=USER_OPERATION&instanceDisabledReason=reason1")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
Assert.assertTrue(
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getInstanceEnabled());
Assert.assertEquals(
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getInstanceDisabledType(),
InstanceConstants.INSTANCE_NOT_DISABLED);
Assert.assertEquals(
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getInstanceDisabledReason(),
"");
// We should see no instance disable related field in to clusterConfig
ClusterConfig cls = _configAccessor.getClusterConfig(CLUSTER_NAME);
Assert.assertFalse(cls.getRecord().getMapFields()
.containsKey(ClusterConfig.ClusterConfigProperty.DISABLED_INSTANCES.name()));
// disable instance with no reason input
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=disable")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
Assert.assertFalse(
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getInstanceEnabled());
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=enable")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
Assert.assertTrue(
_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getInstanceEnabled());
// Disable instance should see no field write to clusterConfig
cls = _configAccessor.getClusterConfig(CLUSTER_NAME);
Assert.assertFalse(cls.getRecord().getMapFields()
.containsKey(ClusterConfig.ClusterConfigProperty.DISABLED_INSTANCES.name()));
// AddTags
List<String> tagList = ImmutableList.of("tag3", "tag1", "tag2");
entity = Entity.entity(
OBJECT_MAPPER.writeValueAsString(ImmutableMap.of(AbstractResource.Properties.id.name(),
INSTANCE_NAME, PerInstanceAccessor.PerInstanceProperties.instanceTags.name(), tagList)),
MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=addInstanceTag")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
Assert.assertEquals(_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getTags(),
tagList);
// RemoveTags
List<String> removeList = new ArrayList<>(tagList);
removeList.remove("tag2");
entity = Entity.entity(
OBJECT_MAPPER.writeValueAsString(ImmutableMap.of(AbstractResource.Properties.id.name(),
INSTANCE_NAME, PerInstanceAccessor.PerInstanceProperties.instanceTags.name(), removeList)),
MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=removeInstanceTag")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
Assert.assertEquals(_configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME).getTags(),
ImmutableList.of("tag2"));
// Test enable disable partitions
String dbName = "_db_0_";
List<String> partitionsToDisable = Arrays.asList(CLUSTER_NAME + dbName + "0",
CLUSTER_NAME + dbName + "1", CLUSTER_NAME + dbName + "3");
entity = Entity.entity(
OBJECT_MAPPER.writeValueAsString(ImmutableMap.of(AbstractResource.Properties.id.name(),
INSTANCE_NAME, PerInstanceAccessor.PerInstanceProperties.resource.name(),
CLUSTER_NAME + dbName.substring(0, dbName.length() - 1),
PerInstanceAccessor.PerInstanceProperties.partitions.name(), partitionsToDisable)),
MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=disablePartitions")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME);
Assert.assertEquals(
new HashSet<>(instanceConfig.getDisabledPartitionsMap()
.get(CLUSTER_NAME + dbName.substring(0, dbName.length() - 1))),
new HashSet<>(partitionsToDisable));
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(ImmutableMap
.of(AbstractResource.Properties.id.name(), INSTANCE_NAME,
PerInstanceAccessor.PerInstanceProperties.resource.name(),
CLUSTER_NAME + dbName.substring(0, dbName.length() - 1),
PerInstanceAccessor.PerInstanceProperties.partitions.name(),
ImmutableList.of(CLUSTER_NAME + dbName + "1"))), MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=enablePartitions")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME);
Assert.assertEquals(new HashSet<>(instanceConfig.getDisabledPartitionsMap()
.get(CLUSTER_NAME + dbName.substring(0, dbName.length() - 1))),
new HashSet<>(Arrays.asList(CLUSTER_NAME + dbName + "0", CLUSTER_NAME + dbName + "3")));
// test set instance operation
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=setInstanceOperation&instanceOperation=EVACUATE")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME);
Assert.assertEquals(
instanceConfig.getInstanceOperation(), InstanceConstants.InstanceOperation.EVACUATE.toString());
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=setInstanceOperation&instanceOperation=INVALIDOP")
.expectedReturnStatusCode(Response.Status.NOT_FOUND.getStatusCode()).format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=setInstanceOperation&instanceOperation=")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, INSTANCE_NAME);
Assert.assertEquals(
instanceConfig.getInstanceOperation(), "");
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Test "update" command for updateInstanceConfig endpoint.
* @throws IOException
*/
@Test(dependsOnMethods = "updateInstance")
public void updateInstanceConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String instanceName = CLUSTER_NAME + "localhost_12918";
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName);
ZNRecord record = instanceConfig.getRecord();
// Generate a record containing three keys (k0, k1, k2) for all fields
String value = "value";
for (int i = 0; i < 3; i++) {
String key = "k" + i;
record.getSimpleFields().put(key, value);
record.getMapFields().put(key, ImmutableMap.of(key, value));
record.getListFields().put(key, Arrays.asList(key, value));
}
// 1. Add these fields by way of "update"
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}/configs?command=update")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
// Check that the fields have been added
Assert.assertEquals(record.getSimpleFields(), _configAccessor
.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord().getSimpleFields());
Assert.assertEquals(record.getListFields(),
_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord().getListFields());
Assert.assertEquals(record.getMapFields(),
_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord().getMapFields());
String newValue = "newValue";
// 2. Modify the record and update
for (int i = 0; i < 3; i++) {
String key = "k" + i;
record.getSimpleFields().put(key, newValue);
record.getMapFields().put(key, ImmutableMap.of(key, newValue));
record.getListFields().put(key, Arrays.asList(key, newValue));
}
entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}/configs?command=update")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
// Check that the fields have been modified
Assert.assertEquals(record.getSimpleFields(), _configAccessor
.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord().getSimpleFields());
Assert.assertEquals(record.getListFields(),
_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord().getListFields());
Assert.assertEquals(record.getMapFields(),
_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord().getMapFields());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Test the "delete" command of updateInstanceConfig.
* @throws IOException
*/
@Test(dependsOnMethods = "updateInstanceConfig")
public void deleteInstanceConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String instanceName = CLUSTER_NAME + "localhost_12918";
ZNRecord record = new ZNRecord(instanceName);
// Generate a record containing three keys (k1, k2, k3) for all fields for deletion
String value = "value";
for (int i = 1; i < 4; i++) {
String key = "k" + i;
record.getSimpleFields().put(key, value);
record.getMapFields().put(key, ImmutableMap.of(key, value));
record.getListFields().put(key, Arrays.asList(key, value));
}
// First, add these fields by way of "update"
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}/configs?command=delete")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
// Check that the keys k1 and k2 have been deleted, and k0 remains
for (int i = 0; i < 4; i++) {
String key = "k" + i;
if (i == 0) {
Assert.assertTrue(_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord()
.getSimpleFields().containsKey(key));
Assert.assertTrue(_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord()
.getListFields().containsKey(key));
Assert.assertTrue(_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord()
.getMapFields().containsKey(key));
continue;
}
Assert.assertFalse(_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord()
.getSimpleFields().containsKey(key));
Assert.assertFalse(_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord()
.getListFields().containsKey(key));
Assert.assertFalse(_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getRecord()
.getMapFields().containsKey(key));
}
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Check that updateInstanceConfig fails when there is no pre-existing InstanceConfig ZNode. This
* is because InstanceConfig should have been created when the instance was added, and this REST
* endpoint is not meant for creation.
*/
@Test(dependsOnMethods = "deleteInstanceConfig")
public void checkUpdateFails() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String instanceName = CLUSTER_NAME + "non_existent_instance";
InstanceConfig instanceConfig = new InstanceConfig(INSTANCE_NAME + "TEST");
ZNRecord record = instanceConfig.getRecord();
record.getSimpleFields().put("TestSimple", "value");
record.getMapFields().put("TestMap", ImmutableMap.of("key", "value"));
record.getListFields().put("TestList", Arrays.asList("e1", "e2", "e3"));
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
new JerseyUriRequestBuilder("clusters/{}/instances/{}/configs")
.expectedReturnStatusCode(Response.Status.NOT_FOUND.getStatusCode())
.format(CLUSTER_NAME, instanceName).post(this, entity);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Check that validateWeightForInstance() works by
* 1. First call validate -> We should get "true" because nothing is set in ClusterConfig.
* 2. Define keys in ClusterConfig and call validate -> We should get BadRequest.
* 3. Define weight configs in InstanceConfig and call validate -> We should get OK with "true".
*/
@Test(dependsOnMethods = "checkUpdateFails")
public void testValidateWeightForInstance()
throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Empty out ClusterConfig's weight key setting and InstanceConfig's capacity maps for testing
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
clusterConfig.getRecord()
.setListField(ClusterConfig.ClusterConfigProperty.INSTANCE_CAPACITY_KEYS.name(),
new ArrayList<>());
_configAccessor.setClusterConfig(CLUSTER_NAME, clusterConfig);
List<String> instances =
_gSetupTool.getClusterManagementTool().getInstancesInCluster(CLUSTER_NAME);
for (String instance : instances) {
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, instance);
instanceConfig.setInstanceCapacityMap(Collections.emptyMap());
_configAccessor.setInstanceConfig(CLUSTER_NAME, instance, instanceConfig);
}
// Get one instance in the cluster
String selectedInstance =
_gSetupTool.getClusterManagementTool().getInstancesInCluster(CLUSTER_NAME).iterator()
.next();
// Issue a validate call
String body = new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=validateWeight")
.isBodyReturnExpected(true).format(CLUSTER_NAME, selectedInstance).get(this);
JsonNode node = OBJECT_MAPPER.readTree(body);
// Must have the result saying (true) because there's no capacity keys set
// in ClusterConfig
node.iterator().forEachRemaining(child -> Assert.assertTrue(child.booleanValue()));
// Define keys in ClusterConfig
clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
clusterConfig.setInstanceCapacityKeys(Arrays.asList("FOO", "BAR"));
_configAccessor.setClusterConfig(CLUSTER_NAME, clusterConfig);
body = new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=validateWeight")
.isBodyReturnExpected(true).format(CLUSTER_NAME, selectedInstance)
.expectedReturnStatusCode(Response.Status.BAD_REQUEST.getStatusCode()).get(this);
node = OBJECT_MAPPER.readTree(body);
// Since instance does not have weight-related configs, the result should return error
Assert.assertTrue(node.has("error"));
// Now set weight-related config in InstanceConfig
InstanceConfig instanceConfig =
_configAccessor.getInstanceConfig(CLUSTER_NAME, selectedInstance);
instanceConfig.setInstanceCapacityMap(ImmutableMap.of("FOO", 1000, "BAR", 1000));
_configAccessor.setInstanceConfig(CLUSTER_NAME, selectedInstance, instanceConfig);
body = new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=validateWeight")
.isBodyReturnExpected(true).format(CLUSTER_NAME, selectedInstance)
.expectedReturnStatusCode(Response.Status.OK.getStatusCode()).get(this);
node = OBJECT_MAPPER.readTree(body);
// Must have the results saying they are all valid (true) because capacity keys are set
// in ClusterConfig
node.iterator().forEachRemaining(child -> Assert.assertTrue(child.booleanValue()));
System.out.println("End test :" + TestHelper.getTestMethodName());
}
/**
* Test the sanity check when updating the instance config.
* The config is validated at rest server side.
*/
@Test(dependsOnMethods = "testValidateWeightForInstance")
public void testValidateDeltaInstanceConfigForUpdate() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
// Enable Topology aware for the cluster
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
clusterConfig.getRecord()
.setListField(ClusterConfig.ClusterConfigProperty.INSTANCE_CAPACITY_KEYS.name(),
new ArrayList<>());
clusterConfig.setTopologyAwareEnabled(true);
clusterConfig.setTopology("/Rack/Sub-Rack/Host/Instance");
clusterConfig.setFaultZoneType("Host");
_configAccessor.setClusterConfig(CLUSTER_NAME, clusterConfig);
String instanceName = CLUSTER_NAME + "localhost_12918";
InstanceConfig instanceConfig = _configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName);
// Update InstanceConfig with Topology Info
String domain = "Rack=rack1, Sub-Rack=Sub-Rack1, Host=Host-1";
ZNRecord record = instanceConfig.getRecord();
record.getSimpleFields().put(InstanceConfig.InstanceConfigProperty.DOMAIN.name(), domain);
// Add these fields by way of "update"
Entity entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
Response response = new JerseyUriRequestBuilder(
"clusters/{}/instances/{}/configs?command=update&doSanityCheck=true")
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
// Check that the fields have been added
Assert.assertEquals(response.getStatus(), 200);
// Check the cluster config is updated
Assert.assertEquals(
_configAccessor.getInstanceConfig(CLUSTER_NAME, instanceName).getDomainAsString(), domain);
// set domain to an invalid value
record.getSimpleFields()
.put(InstanceConfig.InstanceConfigProperty.DOMAIN.name(), "InvalidDomainValue");
entity =
Entity.entity(OBJECT_MAPPER.writeValueAsString(record), MediaType.APPLICATION_JSON_TYPE);
// Updating using an invalid domain value should return a non-OK response
new JerseyUriRequestBuilder(
"clusters/{}/instances/{}/configs?command=update&doSanityCheck=true")
.expectedReturnStatusCode(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode())
.format(CLUSTER_NAME, INSTANCE_NAME).post(this, entity);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testValidateDeltaInstanceConfigForUpdate")
public void testGetResourcesOnInstance() throws JsonProcessingException, InterruptedException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = new JerseyUriRequestBuilder("clusters/{}/instances/{}/resources")
.isBodyReturnExpected(true).format(CLUSTER_NAME, INSTANCE_NAME).get(this);
JsonNode node = OBJECT_MAPPER.readTree(body);
ArrayNode arrayOfResource =
(ArrayNode) node.get(PerInstanceAccessor.PerInstanceProperties.resources.name());
Assert.assertTrue(arrayOfResource.size() != 0);
String dbNameString= arrayOfResource.get(0).toString();
String dbName = dbNameString.substring(1,dbNameString.length()-1);
// The below calls should successfully return
body = new JerseyUriRequestBuilder("clusters/{}/instances/{}/resources/{}")
.isBodyReturnExpected(true).format(CLUSTER_NAME, INSTANCE_NAME, dbName).get(this);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
}
| 9,310 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestWorkflowAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.TestHelper;
import org.apache.helix.rest.server.resources.helix.WorkflowAccessor;
import org.apache.helix.task.JobQueue;
import org.apache.helix.task.TargetState;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskExecutionInfo;
import org.apache.helix.task.TaskState;
import org.apache.helix.task.WorkflowConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestWorkflowAccessor extends AbstractTestClass {
private final static String CLUSTER_NAME = "TestCluster_0";
private final static String WORKFLOW_NAME = WORKFLOW_PREFIX + 0;
private final static String TEST_WORKFLOW_NAME = "TestWorkflow";
private final static String TEST_QUEUE_NAME = TEST_WORKFLOW_NAME + "_JOBQUEUE";
private final static String WORKFLOW_INPUT =
"{\"id\":\"Workflow1\",\"WorkflowConfig\":{\"id\":\"Workflow1\",\"Expiry\":\"43200000\","
+ "\"FailureThreshold\":\"0\",\"IsJobQueue\":\"false\",\"TargetState\":\"START\","
+ "\"Terminable\":\"true\",\"capacity\":\"500\"},\"Jobs\":[{\"id\":\"Job1\","
+ "\"simpleFields\":{\"JobID\":\"Job1\",\"WorkflowID\":\"Workflow1\"},\"mapFields\":"
+ "{\"Task1\":{\"TASK_ID\":\"Task1\",\"TASK_COMMAND\":\"Backup\",\"TASK_TARGET_PARTITION\""
+ ":\"p1\"},\"Task2\":{\"TASK_ID\":\"Task2\",\"TASK_COMMAND\":\"ReIndex\"}},"
+ "\"listFields\":{}},{\"id\":\"Job2\",\"Command\":\"Cleanup\",\"TargetResource\":\"DB2\""
+ "}],\"ParentJobs\":{\"Job1\":[\"Job2\"]}}";
@Test
public void testGetWorkflows() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body =
get("clusters/" + CLUSTER_NAME + "/workflows", null, Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
String workflowsStr = node.get(WorkflowAccessor.WorkflowProperties.Workflows.name()).toString();
Set<String> workflows = OBJECT_MAPPER.readValue(workflowsStr,
OBJECT_MAPPER.getTypeFactory().constructCollectionType(Set.class, String.class));
Assert.assertEquals(workflows, _workflowMap.get(CLUSTER_NAME).keySet());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetWorkflows")
public void testGetWorkflow() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/workflows/" + WORKFLOW_NAME, null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
Assert.assertNotNull(node.get(WorkflowAccessor.WorkflowProperties.WorkflowConfig.name()));
Assert.assertNotNull(node.get(WorkflowAccessor.WorkflowProperties.WorkflowContext.name()));
TaskExecutionInfo lastScheduledTask = OBJECT_MAPPER
.treeToValue(node.get(WorkflowAccessor.WorkflowProperties.LastScheduledTask.name()),
TaskExecutionInfo.class);
Assert.assertTrue(lastScheduledTask
.equals(new TaskExecutionInfo(null, null, null, TaskExecutionInfo.TIMESTAMP_NOT_SET)));
String workflowId =
node.get(WorkflowAccessor.WorkflowProperties.WorkflowConfig.name()).get("WorkflowID")
.textValue();
Assert.assertEquals(workflowId, WORKFLOW_NAME);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetWorkflow")
public void testGetWorkflowConfig() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/workflows/" + WORKFLOW_NAME + "/configs", null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
String workflowId = node.get("WorkflowID").textValue();
Assert.assertEquals(workflowId, WORKFLOW_NAME);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetWorkflowConfig")
public void testGetWorkflowContext() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String body = get("clusters/" + CLUSTER_NAME + "/workflows/" + WORKFLOW_NAME + "/context", null,
Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
Assert.assertEquals(node.get("STATE").textValue(),
TaskState.FAILED.name());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetWorkflowContext")
public void testCreateWorkflow() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
TaskDriver driver = getTaskDriver(CLUSTER_NAME);
// Create one time workflow
Entity entity = Entity.entity(WORKFLOW_INPUT, MediaType.APPLICATION_JSON_TYPE);
put("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_WORKFLOW_NAME, null, entity,
Response.Status.OK.getStatusCode());
WorkflowConfig workflowConfig = driver.getWorkflowConfig(TEST_WORKFLOW_NAME);
Assert.assertNotNull(workflowConfig);
Assert.assertEquals(workflowConfig.getJobDag().getAllNodes().size(), 2);
// Create JobQueue
JobQueue.Builder jobQueue = new JobQueue.Builder(TEST_QUEUE_NAME)
.setWorkflowConfig(driver.getWorkflowConfig(TEST_WORKFLOW_NAME));
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(Collections
.singletonMap(WorkflowAccessor.WorkflowProperties.WorkflowConfig.name(),
jobQueue.build().getWorkflowConfig().getRecord().getSimpleFields())),
MediaType.APPLICATION_JSON_TYPE);
put("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_QUEUE_NAME, null, entity,
Response.Status.OK.getStatusCode());
workflowConfig = driver.getWorkflowConfig(TEST_QUEUE_NAME);
Assert.assertNotNull(workflowConfig);
Assert.assertTrue(workflowConfig.isJobQueue());
Assert.assertEquals(workflowConfig.getJobDag().getAllNodes().size(), 0);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testCreateWorkflow")
public void testUpdateWorkflow() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
TaskDriver driver = getTaskDriver(CLUSTER_NAME);
Entity entity = Entity.entity("", MediaType.APPLICATION_JSON_TYPE);
post("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_QUEUE_NAME,
ImmutableMap.of("command", "stop"), entity, Response.Status.OK.getStatusCode());
Assert
.assertEquals(driver.getWorkflowConfig(TEST_QUEUE_NAME).getTargetState(), TargetState.STOP);
post("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_QUEUE_NAME,
ImmutableMap.of("command", "resume"), entity, Response.Status.OK.getStatusCode());
Assert.assertEquals(driver.getWorkflowConfig(TEST_QUEUE_NAME).getTargetState(),
TargetState.START);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testUpdateWorkflow")
public void testGetAndUpdateWorkflowContentStore() throws IOException, InterruptedException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String workflowName = "Workflow_0";
TaskDriver driver = getTaskDriver(CLUSTER_NAME);
// Wait for workflow to start processing
driver.pollForWorkflowState(workflowName, TaskState.IN_PROGRESS, TaskState.COMPLETED, TaskState.FAILED);
String uri = "clusters/" + CLUSTER_NAME + "/workflows/Workflow_0/userContent";
String body =
get(uri, null, Response.Status.OK.getStatusCode(), true);
Map<String, String> contentStore = OBJECT_MAPPER.readValue(body, new TypeReference<Map<String, String>>() {});
Assert.assertTrue(contentStore.isEmpty());
Map<String, String> map1 = new HashMap<>();
map1.put("k1", "v1");
Entity entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(map1), MediaType.APPLICATION_JSON_TYPE);
post(uri, ImmutableMap.of("command", "update"), entity, Response.Status.OK.getStatusCode());
// update (add items) workflow content store
body = get(uri, null, Response.Status.OK.getStatusCode(), true);
contentStore = OBJECT_MAPPER.readValue(body, new TypeReference<Map<String, String>>() {});
Assert.assertEquals(contentStore, map1);
// modify map1 and verify
map1.put("k1", "v2");
map1.put("k2", "v2");
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(map1), MediaType.APPLICATION_JSON_TYPE);
post(uri, ImmutableMap.of("command", "update"), entity, Response.Status.OK.getStatusCode());
body = get(uri, null, Response.Status.OK.getStatusCode(), true);
contentStore = OBJECT_MAPPER.readValue(body, new TypeReference<Map<String, String>>() {});
Assert.assertEquals(contentStore, map1);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testGetAndUpdateWorkflowContentStore")
public void testInvalidGetAndUpdateWorkflowContentStore() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String validURI = "clusters/" + CLUSTER_NAME + "/workflows/Workflow_0/userContent";
String invalidURI = "clusters/" + CLUSTER_NAME + "/workflows/xxx/userContent"; // workflow not exist
Entity validEntity = Entity.entity("{\"k1\":\"v1\"}", MediaType.APPLICATION_JSON_TYPE);
Entity invalidEntity = Entity.entity("{\"k1\":{}}", MediaType.APPLICATION_JSON_TYPE); // not Map<String, String>
Map<String, String> validCmd = ImmutableMap.of("command", "update");
Map<String, String> invalidCmd = ImmutableMap.of("command", "delete"); // cmd not supported
get(invalidURI, null, Response.Status.NOT_FOUND.getStatusCode(), false);
// The following expects a OK because if the usercontent ZNode is not there, it is created
post(invalidURI, validCmd, validEntity, Response.Status.OK.getStatusCode());
post(validURI, invalidCmd, validEntity, Response.Status.BAD_REQUEST.getStatusCode());
post(validURI, validCmd, invalidEntity, Response.Status.BAD_REQUEST.getStatusCode());
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testInvalidGetAndUpdateWorkflowContentStore")
public void testDeleteWorkflow() throws InterruptedException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
TaskDriver driver = getTaskDriver(CLUSTER_NAME);
int currentWorkflowNumbers = driver.getWorkflows().size();
delete("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_WORKFLOW_NAME,
Response.Status.OK.getStatusCode());
delete("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_QUEUE_NAME,
Response.Status.OK.getStatusCode());
Thread.sleep(500);
Assert.assertEquals(driver.getWorkflows().size(), currentWorkflowNumbers - 2);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
}
| 9,311 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestPropertyStoreAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.AccessOption;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.manager.zk.ByteArraySerializer;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.rest.server.util.JerseyUriRequestBuilder;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.zkclient.exception.ZkMarshallingError;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
import org.apache.http.HttpStatus;
import org.junit.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestPropertyStoreAccessor extends AbstractTestClass {
private static final String TEST_CLUSTER = "TestCluster_0";
private static final String ZNRECORD_PATH =
PropertyPathBuilder.propertyStore(TEST_CLUSTER) + "/ZnRecord";
private static final ZNRecord TEST_ZNRECORD = new ZNRecord("TestContent");
private static final String CUSTOM_PATH =
PropertyPathBuilder.propertyStore(TEST_CLUSTER) + "/NonZnRecord";
private static final String EMPTY_PATH =
PropertyPathBuilder.propertyStore(TEST_CLUSTER) + "/EmptyNode";
private static final String TEST_CONTENT = "TestContent";
private static final String CONTENT_KEY = "content";
private ZkBaseDataAccessor<String> _customDataAccessor;
@BeforeClass
public void init() {
_customDataAccessor = new ZkBaseDataAccessor<>(ZK_ADDR, new ZkSerializer() {
@Override
public byte[] serialize(Object o) throws ZkMarshallingError {
return o.toString().getBytes();
}
@Override
public Object deserialize(byte[] bytes) throws ZkMarshallingError {
return new String(bytes);
}
});
// initially prepare the data in different paths
Assert
.assertTrue(_customDataAccessor.create(CUSTOM_PATH, TEST_CONTENT, AccessOption.PERSISTENT));
Assert.assertTrue(_baseAccessor.create(ZNRECORD_PATH, TEST_ZNRECORD, AccessOption.PERSISTENT));
Assert.assertTrue(_baseAccessor.create(EMPTY_PATH, null, AccessOption.EPHEMERAL));
}
@AfterClass
public void close() {
if (_customDataAccessor != null) {
_customDataAccessor.close();
}
}
@Test
public void testGetPropertyStoreWithEmptyContent() {
String data = new JerseyUriRequestBuilder("clusters/{}/propertyStore/EmptyNode").format(TEST_CLUSTER)
.expectedReturnStatusCode(Response.Status.NO_CONTENT.getStatusCode()).get(this);
Assert.assertTrue(data.isEmpty());
}
@Test
public void testGetPropertyStoreWithZNRecordData() throws IOException {
String data =
new JerseyUriRequestBuilder("clusters/{}/propertyStore/ZnRecord").format(TEST_CLUSTER)
.isBodyReturnExpected(true).get(this);
ZNRecord record = toZNRecord(data);
Assert.assertEquals(record.getId(), TEST_ZNRECORD.getId());
}
@Test
public void testGetPropertyStoreWithTestStringData() throws IOException {
String actual = new JerseyUriRequestBuilder("clusters/{}/propertyStore/NonZnRecord").format(TEST_CLUSTER)
.isBodyReturnExpected(true)
.get(this);
JsonNode jsonNode = OBJECT_MAPPER.readTree(actual);
String payLoad = jsonNode.get(CONTENT_KEY).textValue();
Assert.assertEquals(TEST_CONTENT, payLoad);
}
@Test
public void testGetPropertyStoreWithEmptyDataPath() {
Response response =
new JerseyUriRequestBuilder("clusters/{}/propertyStore/EmptyPath").format(TEST_CLUSTER)
.isBodyReturnExpected(true).getResponse(this);
Assert.assertEquals(response.getStatus(), HttpStatus.SC_NOT_FOUND);
}
@Test
public void testGetPropertyStoreWithInValidPath() {
String path = "/context/";
Response response =
new JerseyUriRequestBuilder("clusters/{}/propertyStore" + path).format(TEST_CLUSTER)
.getResponse(this);
Assert.assertEquals(response.getStatus(), HttpStatus.SC_BAD_REQUEST);
}
@Test
public void testPutPropertyStore() throws IOException {
String path = "/writePath/content";
// First, try to write byte array
String content = TestHelper.getTestMethodName();
put("clusters/" + TEST_CLUSTER + "/propertyStore" + path,
ImmutableMap.of("isZNRecord", "false"),
Entity.entity(OBJECT_MAPPER.writeValueAsBytes(content), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Verify
ZkBaseDataAccessor<byte[]> byteAccessor =
new ZkBaseDataAccessor<>(ZK_ADDR, new ByteArraySerializer());
byte[] data = byteAccessor
.get(PropertyPathBuilder.propertyStore(TEST_CLUSTER) + path, null, AccessOption.PERSISTENT);
byteAccessor.close();
Assert.assertEquals(content, OBJECT_MAPPER.readValue(data, String.class));
// Second, try to write a ZNRecord
ZNRecord contentRecord = new ZNRecord(TestHelper.getTestMethodName());
contentRecord.setSimpleField("testField", TestHelper.getTestMethodName());
put("clusters/" + TEST_CLUSTER + "/propertyStore" + path, null, Entity
.entity(OBJECT_MAPPER.writeValueAsBytes(contentRecord), MediaType.APPLICATION_JSON_TYPE),
Response.Status.OK.getStatusCode());
// Verify
ZNRecord record = _baseAccessor
.get(PropertyPathBuilder.propertyStore(TEST_CLUSTER) + path, null, AccessOption.PERSISTENT);
Assert.assertEquals(contentRecord, record);
}
@Test(dependsOnMethods = "testPutPropertyStore")
public void testDeletePropertyStore() {
String path = "/writePath/content";
delete("clusters/" + TEST_CLUSTER + "/propertyStore" + path,
Response.Status.OK.getStatusCode());
Assert.assertFalse(_baseAccessor
.exists(PropertyPathBuilder.propertyStore(TEST_CLUSTER) + path, AccessOption.PERSISTENT));
}
}
| 9,312 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestMetadataStoreDirectoryAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.rest.server.util.JerseyUriRequestBuilder;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestMetadataStoreDirectoryAccessor extends MetadataStoreDirectoryAccessorTestBase {
/*
* Tests REST endpoint: "GET /namespaces/{namespace}/metadata-store-namespaces"
*/
@Test
public void testGetAllNamespaces() throws IOException {
String responseBody = get(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-namespaces", null,
Response.Status.OK.getStatusCode(), true);
// It is safe to cast the object and suppress warnings.
@SuppressWarnings("unchecked")
Map<String, Collection<String>> queriedNamespacesMap =
OBJECT_MAPPER.readValue(responseBody, Map.class);
Assert.assertEquals(queriedNamespacesMap.keySet(),
ImmutableSet.of(MetadataStoreRoutingConstants.METADATA_STORE_NAMESPACES));
Set<String> queriedNamespacesSet = new HashSet<>(
queriedNamespacesMap.get(MetadataStoreRoutingConstants.METADATA_STORE_NAMESPACES));
Set<String> expectedNamespaces = ImmutableSet.of(TEST_NAMESPACE);
Assert.assertEquals(queriedNamespacesSet, expectedNamespaces);
}
/*
* Tests REST endpoint: "GET /metadata-store-realms"
*/
@Test(dependsOnMethods = "testGetAllNamespaces")
public void testGetAllMetadataStoreRealms() throws IOException {
get(NON_EXISTING_NAMESPACE_URI_PREFIX + "metadata-store-realms", null,
Response.Status.NOT_FOUND.getStatusCode(), false);
String responseBody = get(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms", null,
Response.Status.OK.getStatusCode(), true);
// It is safe to cast the object and suppress warnings.
@SuppressWarnings("unchecked")
Map<String, Collection<String>> queriedRealmsMap =
OBJECT_MAPPER.readValue(responseBody, Map.class);
Assert.assertEquals(queriedRealmsMap.keySet(),
ImmutableSet.of(MetadataStoreRoutingConstants.METADATA_STORE_REALMS));
Set<String> queriedRealmsSet =
new HashSet<>(queriedRealmsMap.get(MetadataStoreRoutingConstants.METADATA_STORE_REALMS));
Set<String> expectedRealms = ImmutableSet.of(TEST_REALM_1, TEST_REALM_2);
Assert.assertEquals(queriedRealmsSet, expectedRealms);
}
/*
* Tests REST endpoint: "GET /metadata-store-realms?sharding-key={sharding-key}"
*/
@Test(dependsOnMethods = "testGetAllMetadataStoreRealms")
public void testGetMetadataStoreRealmWithShardingKey() throws IOException {
String shardingKey = TEST_SHARDING_KEYS_1.get(0);
new JerseyUriRequestBuilder(
NON_EXISTING_NAMESPACE_URI_PREFIX + "metadata-store-realms?sharding-key=" + shardingKey)
.expectedReturnStatusCode(Response.Status.NOT_FOUND.getStatusCode()).get(this);
new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms?sharding-key=" + TEST_SHARDING_KEY)
.expectedReturnStatusCode(Response.Status.NOT_FOUND.getStatusCode()).get(this);
new JerseyUriRequestBuilder(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms?sharding-key="
+ INVALID_TEST_SHARDING_KEY)
.expectedReturnStatusCode(Response.Status.BAD_REQUEST.getStatusCode()).get(this);
String responseBody = new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms?sharding-key=" + shardingKey)
.isBodyReturnExpected(true).get(this);
// It is safe to cast the object and suppress warnings.
@SuppressWarnings("unchecked")
Map<String, String> queriedRealmMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
Map<String, String> expectedRealm = ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, TEST_REALM_1,
MetadataStoreRoutingConstants.SINGLE_SHARDING_KEY, shardingKey);
Assert.assertEquals(queriedRealmMap, expectedRealm);
}
/*
* Tests REST endpoint: "PUT /metadata-store-realms/{realm}"
*/
@Test(dependsOnMethods = "testGetMetadataStoreRealmWithShardingKey")
public void testAddMetadataStoreRealm() throws InvalidRoutingDataException {
Set<String> expectedRealmsSet = getAllRealms();
Assert.assertFalse(expectedRealmsSet.contains(TEST_REALM_3),
"Metadata store directory should not have realm: " + TEST_REALM_3);
// Test a request that has not found response.
put(NON_EXISTING_NAMESPACE_URI_PREFIX + TEST_REALM_3, null,
Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.NOT_FOUND.getStatusCode());
// Successful request.
put(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_3, null,
Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// Second addition also succeeds.
put(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_3, null,
Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
expectedRealmsSet.add(TEST_REALM_3);
Assert.assertEquals(getAllRealms(), expectedRealmsSet);
}
/*
* Tests REST endpoint: "DELETE /metadata-store-realms/{realm}"
*/
@Test(dependsOnMethods = "testAddMetadataStoreRealm")
public void testDeleteMetadataStoreRealm() throws InvalidRoutingDataException {
Set<String> expectedRealmsSet = getAllRealms();
// Test a request that has not found response.
delete(NON_EXISTING_NAMESPACE_URI_PREFIX + TEST_REALM_3,
Response.Status.NOT_FOUND.getStatusCode());
// Successful request.
delete(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_3,
Response.Status.OK.getStatusCode());
// Second deletion also succeeds.
delete(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_3,
Response.Status.OK.getStatusCode());
Set<String> updateRealmsSet = getAllRealms();
expectedRealmsSet.remove(TEST_REALM_3);
Assert.assertEquals(updateRealmsSet, expectedRealmsSet);
}
/*
* Tests REST endpoint: "GET /sharding-keys"
*/
@Test(dependsOnMethods = "testDeleteMetadataStoreRealm")
public void testGetShardingKeysInNamespace() throws IOException {
get(NON_EXISTING_NAMESPACE_URI_PREFIX + "sharding-keys", null,
Response.Status.NOT_FOUND.getStatusCode(), true);
String responseBody =
get(TEST_NAMESPACE_URI_PREFIX + "/sharding-keys", null, Response.Status.OK.getStatusCode(),
true);
// It is safe to cast the object and suppress warnings.
@SuppressWarnings("unchecked")
Map<String, Object> queriedShardingKeysMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
Assert.assertEquals(queriedShardingKeysMap.keySet(), ImmutableSet
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_NAMESPACE,
MetadataStoreRoutingConstants.SHARDING_KEYS));
Assert.assertEquals(
queriedShardingKeysMap.get(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_NAMESPACE),
TEST_NAMESPACE);
@SuppressWarnings("unchecked")
Set<String> queriedShardingKeys = new HashSet<>((Collection<String>) queriedShardingKeysMap
.get(MetadataStoreRoutingConstants.SHARDING_KEYS));
Set<String> expectedShardingKeys = new HashSet<>();
expectedShardingKeys.addAll(TEST_SHARDING_KEYS_1);
expectedShardingKeys.addAll(TEST_SHARDING_KEYS_2);
Assert.assertEquals(queriedShardingKeys, expectedShardingKeys);
}
/*
* Tests REST endpoint: "GET /sharding-keys?prefix={prefix}"
*/
@SuppressWarnings("unchecked")
@Test(dependsOnMethods = "testGetShardingKeysInNamespace")
public void testGetShardingKeysUnderPath() throws IOException {
new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/sharding-keys?prefix=" + INVALID_TEST_SHARDING_KEY)
.expectedReturnStatusCode(Response.Status.BAD_REQUEST.getStatusCode()).get(this);
// Test non existed prefix and empty sharding keys in response.
String responseBody = new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/sharding-keys?prefix=/non/Existed/Prefix")
.isBodyReturnExpected(true).get(this);
Map<String, Object> queriedShardingKeysMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
Collection<Map<String, String>> emptyKeysList =
(Collection<Map<String, String>>) queriedShardingKeysMap
.get(MetadataStoreRoutingConstants.SHARDING_KEYS);
Assert.assertTrue(emptyKeysList.isEmpty());
// Success response with non empty sharding keys.
String shardingKeyPrefix = "/sharding/key";
responseBody = new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/sharding-keys?prefix=" + shardingKeyPrefix)
.isBodyReturnExpected(true).get(this);
queriedShardingKeysMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
// Check fields.
Assert.assertEquals(queriedShardingKeysMap.keySet(), ImmutableSet
.of(MetadataStoreRoutingConstants.SHARDING_KEY_PATH_PREFIX,
MetadataStoreRoutingConstants.SHARDING_KEYS));
// Check sharding key prefix in json response.
Assert.assertEquals(
queriedShardingKeysMap.get(MetadataStoreRoutingConstants.SHARDING_KEY_PATH_PREFIX),
shardingKeyPrefix);
Collection<Map<String, String>> queriedShardingKeys =
(Collection<Map<String, String>>) queriedShardingKeysMap
.get(MetadataStoreRoutingConstants.SHARDING_KEYS);
Set<Map<String, String>> queriedShardingKeysSet = new HashSet<>(queriedShardingKeys);
Set<Map<String, String>> expectedShardingKeysSet = new HashSet<>();
TEST_SHARDING_KEYS_1.forEach(key -> expectedShardingKeysSet.add(ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_SHARDING_KEY, key,
MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, TEST_REALM_1)));
TEST_SHARDING_KEYS_2.forEach(key -> expectedShardingKeysSet.add(ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_SHARDING_KEY, key,
MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, TEST_REALM_2)));
Assert.assertEquals(queriedShardingKeysSet, expectedShardingKeysSet);
}
/*
* Tests REST endpoint: "GET /routing-data"
*/
@Test(dependsOnMethods = "testGetShardingKeysUnderPath")
public void testGetRoutingData() throws IOException {
/*
* responseBody:
* {
* "namespace" : "test-namespace",
* "routingData" : [ {
* "realm" : "testRealm2",
* "shardingKeys" : [ "/sharding/key/1/d", "/sharding/key/1/e", "/sharding/key/1/f" ]
* }, {
* "realm" : "testRealm1",
* "shardingKeys" : [ "/sharding/key/1/a", "/sharding/key/1/b", "/sharding/key/1/c" ]
* } ]
* }
*/
String responseBody = new JerseyUriRequestBuilder(TEST_NAMESPACE_URI_PREFIX + "/routing-data")
.isBodyReturnExpected(true).get(this);
// It is safe to cast the object and suppress warnings.
@SuppressWarnings("unchecked")
Map<String, Object> queriedShardingKeysMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
// Check fields.
Assert.assertEquals(queriedShardingKeysMap.keySet(), ImmutableSet
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_NAMESPACE,
MetadataStoreRoutingConstants.ROUTING_DATA));
// Check namespace in json response.
Assert.assertEquals(
queriedShardingKeysMap.get(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_NAMESPACE),
TEST_NAMESPACE);
@SuppressWarnings("unchecked")
List<Map<String, Object>> queriedShardingKeys =
(List<Map<String, Object>>) queriedShardingKeysMap
.get(MetadataStoreRoutingConstants.ROUTING_DATA);
Set<Map<String, Object>> queriedShardingKeysSet = new HashSet<>(queriedShardingKeys);
Set<Map<String, Object>> expectedShardingKeysSet = ImmutableSet.of(ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, TEST_REALM_1,
MetadataStoreRoutingConstants.SHARDING_KEYS, TEST_SHARDING_KEYS_1), ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, TEST_REALM_2,
MetadataStoreRoutingConstants.SHARDING_KEYS, TEST_SHARDING_KEYS_2));
Assert.assertEquals(queriedShardingKeysSet, expectedShardingKeysSet);
}
/*
* Tests REST endpoint: "GET /metadata-store-realms/{realm}/sharding-keys"
*/
@Test(dependsOnMethods = "testGetRoutingData")
public void testGetShardingKeysInRealm() throws IOException {
// Test NOT_FOUND response for a non existed realm.
new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/nonExistedRealm/sharding-keys")
.expectedReturnStatusCode(Response.Status.NOT_FOUND.getStatusCode()).get(this);
// Success response for "GET /metadata-store-realms/{realm}/sharding-keys"
String responseBody = new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1 + "/sharding-keys")
.isBodyReturnExpected(true).get(this);
verifyRealmShardingKeys(responseBody);
}
/*
* Tests REST endpoint: "GET /metadata-store-realms/{realm}/sharding-keys?prefix={prefix}"
*/
@SuppressWarnings("unchecked")
@Test(dependsOnMethods = "testGetShardingKeysInRealm")
public void testGetRealmShardingKeysUnderPath() throws IOException {
new JerseyUriRequestBuilder(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1
+ "/sharding-keys?prefix=" + INVALID_TEST_SHARDING_KEY)
.expectedReturnStatusCode(Response.Status.BAD_REQUEST.getStatusCode()).get(this);
// Test non existed prefix and empty sharding keys in response.
String responseBody = new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1
+ "/sharding-keys?prefix=/non/Existed/Prefix").isBodyReturnExpected(true).get(this);
Map<String, Object> queriedShardingKeysMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
Collection<Map<String, String>> emptyKeysList =
(Collection<Map<String, String>>) queriedShardingKeysMap
.get(MetadataStoreRoutingConstants.SHARDING_KEYS);
Assert.assertTrue(emptyKeysList.isEmpty());
// Test non existed realm and empty sharding keys in response.
responseBody = new JerseyUriRequestBuilder(TEST_NAMESPACE_URI_PREFIX
+ "/metadata-store-realms/nonExistedRealm/sharding-keys?prefix=/sharding/key")
.isBodyReturnExpected(true).get(this);
queriedShardingKeysMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
emptyKeysList = (Collection<Map<String, String>>) queriedShardingKeysMap
.get(MetadataStoreRoutingConstants.SHARDING_KEYS);
Assert.assertTrue(emptyKeysList.isEmpty());
// Valid query params and non empty sharding keys.
String shardingKeyPrefix = "/sharding/key/1";
responseBody = new JerseyUriRequestBuilder(
TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1
+ "/sharding-keys?prefix=" + shardingKeyPrefix).isBodyReturnExpected(true).get(this);
queriedShardingKeysMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
// Check fields.
Assert.assertEquals(queriedShardingKeysMap.keySet(), ImmutableSet
.of(MetadataStoreRoutingConstants.SHARDING_KEY_PATH_PREFIX,
MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM,
MetadataStoreRoutingConstants.SHARDING_KEYS));
// Check sharding key prefix in json response.
Assert.assertEquals(
queriedShardingKeysMap.get(MetadataStoreRoutingConstants.SHARDING_KEY_PATH_PREFIX),
shardingKeyPrefix);
Assert.assertEquals(
queriedShardingKeysMap.get(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM),
TEST_REALM_1);
Set<String> queriedShardingKeys = new HashSet<>((Collection<String>) queriedShardingKeysMap
.get(MetadataStoreRoutingConstants.SHARDING_KEYS));
Set<String> expectedShardingKeys = new HashSet<>(TEST_SHARDING_KEYS_1);
Assert.assertEquals(queriedShardingKeys, expectedShardingKeys);
}
/*
* Tests REST endpoint: "PUT /metadata-store-realms/{realm}/sharding-keys/{sharding-key}"
*/
@Test(dependsOnMethods = "testGetRealmShardingKeysUnderPath")
public void testAddShardingKey() throws InvalidRoutingDataException {
Set<String> expectedShardingKeysSet = getAllShardingKeysInTestRealm1();
Assert.assertFalse(expectedShardingKeysSet.contains(TEST_SHARDING_KEY),
"Realm does not have sharding key: " + TEST_SHARDING_KEY);
// Request that gets not found response.
put(NON_EXISTING_NAMESPACE_URI_PREFIX + TEST_REALM_1 + "/sharding-keys" + TEST_SHARDING_KEY,
null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.NOT_FOUND.getStatusCode());
put(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1 + "/sharding-keys"
+ "//" + INVALID_TEST_SHARDING_KEY, null,
Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode());
// Successful request.
put(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1 + "/sharding-keys"
+ TEST_SHARDING_KEY, null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// Idempotency
put(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1 + "/sharding-keys"
+ TEST_SHARDING_KEY, null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
// Invalid
put(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_2 + "/sharding-keys"
+ TEST_SHARDING_KEY, null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode());
expectedShardingKeysSet.add(TEST_SHARDING_KEY);
Assert.assertEquals(getAllShardingKeysInTestRealm1(), expectedShardingKeysSet);
}
/*
* Tests REST endpoint: "PUT /metadata-store-realms/{realm}/sharding-keys/{sharding-key}"
*/
@Test(dependsOnMethods = "testAddShardingKey")
public void testDeleteShardingKey() throws InvalidRoutingDataException {
Set<String> expectedShardingKeysSet = getAllShardingKeysInTestRealm1();
// Request that gets not found response.
delete(NON_EXISTING_NAMESPACE_URI_PREFIX + TEST_REALM_1 + "/sharding-keys" + TEST_SHARDING_KEY,
Response.Status.NOT_FOUND.getStatusCode());
// Successful request.
delete(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1 + "/sharding-keys"
+ TEST_SHARDING_KEY, Response.Status.OK.getStatusCode());
// Idempotency
delete(TEST_NAMESPACE_URI_PREFIX + "/metadata-store-realms/" + TEST_REALM_1 + "/sharding-keys"
+ TEST_SHARDING_KEY, Response.Status.OK.getStatusCode());
expectedShardingKeysSet.remove(TEST_SHARDING_KEY);
Assert.assertEquals(getAllShardingKeysInTestRealm1(), expectedShardingKeysSet);
}
@Test(dependsOnMethods = "testDeleteShardingKey")
public void testSetRoutingData() throws InvalidRoutingDataException, IOException {
Map<String, List<String>> routingData = new HashMap<>();
routingData.put(TEST_REALM_1, TEST_SHARDING_KEYS_2);
routingData.put(TEST_REALM_2, TEST_SHARDING_KEYS_1);
String routingDataString = OBJECT_MAPPER.writeValueAsString(routingData);
Map<String, String> badFormatRoutingData = new HashMap<>();
badFormatRoutingData.put(TEST_REALM_1, TEST_REALM_2);
badFormatRoutingData.put(TEST_REALM_2, TEST_REALM_1);
String badFormatRoutingDataString = OBJECT_MAPPER.writeValueAsString(badFormatRoutingData);
// Request that gets not found response.
put("/namespaces/non-existing-namespace"
+ MetadataStoreRoutingConstants.MSDS_GET_ALL_ROUTING_DATA_ENDPOINT, null,
Entity.entity(routingDataString, MediaType.APPLICATION_JSON_TYPE),
Response.Status.NOT_FOUND.getStatusCode());
put(TEST_NAMESPACE_URI_PREFIX
+ MetadataStoreRoutingConstants.MSDS_GET_ALL_ROUTING_DATA_ENDPOINT, null,
Entity.entity("?", MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode());
put(TEST_NAMESPACE_URI_PREFIX
+ MetadataStoreRoutingConstants.MSDS_GET_ALL_ROUTING_DATA_ENDPOINT, null,
Entity.entity(badFormatRoutingDataString, MediaType.APPLICATION_JSON_TYPE),
Response.Status.BAD_REQUEST.getStatusCode());
// Successful request.
put(TEST_NAMESPACE_URI_PREFIX
+ MetadataStoreRoutingConstants.MSDS_GET_ALL_ROUTING_DATA_ENDPOINT, null,
Entity.entity(routingDataString, MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
Assert.assertEquals(getRawRoutingData(), routingData);
}
private void verifyRealmShardingKeys(String responseBody) throws IOException {
// It is safe to cast the object and suppress warnings.
@SuppressWarnings("unchecked")
Map<String, Object> queriedShardingKeysMap = OBJECT_MAPPER.readValue(responseBody, Map.class);
// Check fields in JSON response.
Assert.assertEquals(queriedShardingKeysMap.keySet(), ImmutableSet
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM,
MetadataStoreRoutingConstants.SHARDING_KEYS));
// Check realm name in json response.
Assert.assertEquals(
queriedShardingKeysMap.get(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM),
TEST_REALM_1);
// It is safe to cast the object and suppress warnings.
@SuppressWarnings("unchecked")
Set<String> queriedShardingKeys = new HashSet<>((Collection<String>) queriedShardingKeysMap
.get(MetadataStoreRoutingConstants.SHARDING_KEYS));
Set<String> expectedShardingKeys = new HashSet<>(TEST_SHARDING_KEYS_1);
Assert.assertEquals(queriedShardingKeys, expectedShardingKeys);
}
}
| 9,313 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/MetadataStoreDirectoryAccessorTestBase.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.TestHelper;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.rest.metadatastore.accessor.MetadataStoreRoutingDataReader;
import org.apache.helix.rest.metadatastore.accessor.ZkRoutingDataReader;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.zkclient.ZkClient;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
public class MetadataStoreDirectoryAccessorTestBase extends AbstractTestClass {
/*
* The following are constants to be used for testing.
*/
protected static final String TEST_NAMESPACE_URI_PREFIX = "/namespaces/" + TEST_NAMESPACE;
protected static final String NON_EXISTING_NAMESPACE_URI_PREFIX =
"/namespaces/not-existed-namespace/metadata-store-realms/";
protected static final String TEST_REALM_1 = "testRealm1";
protected static final List<String> TEST_SHARDING_KEYS_1 =
Arrays.asList("/sharding/key/1/a", "/sharding/key/1/b", "/sharding/key/1/c");
protected static final String TEST_REALM_2 = "testRealm2";
protected static final List<String> TEST_SHARDING_KEYS_2 =
Arrays.asList("/sharding/key/1/d", "/sharding/key/1/e", "/sharding/key/1/f");
protected static final String TEST_REALM_3 = "testRealm3";
protected static final String TEST_SHARDING_KEY = "/sharding/key/1/x";
protected static final String INVALID_TEST_SHARDING_KEY = "sharding/key/1/x";
// List of all ZK addresses, each of which corresponds to a namespace/routing ZK
protected List<String> _zkList;
protected MetadataStoreRoutingDataReader _routingDataReader;
@BeforeClass
public void beforeClass() throws Exception {
_zkList = new ArrayList<>(ZK_SERVER_MAP.keySet());
clearRoutingData();
// Write dummy mappings in ZK
// Create a node that represents a realm address and add 3 sharding keys to it
ZNRecord znRecord = new ZNRecord("RoutingInfo");
_zkList.forEach(zk -> {
ZK_SERVER_MAP.get(zk).getZkClient().setZkSerializer(new ZNRecordSerializer());
// Write first realm and sharding keys pair
znRecord.setListField(MetadataStoreRoutingConstants.ZNRECORD_LIST_FIELD_KEY,
TEST_SHARDING_KEYS_1);
ZK_SERVER_MAP.get(zk).getZkClient()
.createPersistent(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + TEST_REALM_1,
true);
ZK_SERVER_MAP.get(zk).getZkClient()
.writeData(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + TEST_REALM_1,
znRecord);
// Create another realm and sharding keys pair
znRecord.setListField(MetadataStoreRoutingConstants.ZNRECORD_LIST_FIELD_KEY,
TEST_SHARDING_KEYS_2);
ZK_SERVER_MAP.get(zk).getZkClient()
.createPersistent(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + TEST_REALM_2,
true);
ZK_SERVER_MAP.get(zk).getZkClient()
.writeData(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + TEST_REALM_2,
znRecord);
});
_routingDataReader = new ZkRoutingDataReader(TEST_NAMESPACE, _zkAddrTestNS, null);
System.setProperty(MetadataStoreRoutingConstants.MSDS_SERVER_HOSTNAME_KEY,
getBaseUri().getHost());
System.setProperty(MetadataStoreRoutingConstants.MSDS_SERVER_PORT_KEY,
Integer.toString(getBaseUri().getPort()));
}
@AfterClass
public void afterClass() throws Exception {
System.clearProperty(MetadataStoreRoutingConstants.MSDS_SERVER_HOSTNAME_KEY);
System.clearProperty(MetadataStoreRoutingConstants.MSDS_SERVER_PORT_KEY);
_routingDataReader.close();
clearRoutingData();
}
protected void clearRoutingData() throws Exception {
Assert.assertTrue(TestHelper.verify(() -> {
for (String zk : _zkList) {
ZkClient zkClient = ZK_SERVER_MAP.get(zk).getZkClient();
if (zkClient.exists(MetadataStoreRoutingConstants.ROUTING_DATA_PATH)) {
for (String zkRealm : zkClient
.getChildren(MetadataStoreRoutingConstants.ROUTING_DATA_PATH)) {
zkClient.delete(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + zkRealm);
}
}
}
for (String zk : _zkList) {
ZkClient zkClient = ZK_SERVER_MAP.get(zk).getZkClient();
if (zkClient.exists(MetadataStoreRoutingConstants.ROUTING_DATA_PATH) && !zkClient
.getChildren(MetadataStoreRoutingConstants.ROUTING_DATA_PATH).isEmpty()) {
return false;
}
}
return true;
}, TestHelper.WAIT_DURATION), "Routing data path should be deleted after the tests.");
}
// Uses routingDataReader to get the latest realms in test-namespace; returns a modifiable copy
// because it'll be modified in test cases
protected Set<String> getAllRealms() throws InvalidRoutingDataException {
return new HashSet<>(_routingDataReader.getRoutingData().keySet());
}
// Uses routingDataReader to get the latest sharding keys in test-namespace, testRealm1
protected Set<String> getAllShardingKeysInTestRealm1() throws InvalidRoutingDataException {
return new HashSet<>(_routingDataReader.getRoutingData().get(TEST_REALM_1));
}
protected Map<String, List<String>> getRawRoutingData() throws InvalidRoutingDataException {
return _routingDataReader.getRoutingData();
}
}
| 9,314 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestDefaultMonitoringMbeans.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.ws.rs.core.Response;
import org.apache.helix.TestHelper;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestDefaultMonitoringMbeans extends AbstractTestClass {
private static final String DEFAULT_METRIC_DOMAIN = "org.glassfish.jersey";
// For entire testing environment, we could have 2 - 4 rest server during the testing. So we dont
// know which REST server got the request and report number. So we have to loop all of them to
// report data.
// This is unstable test because the getcluster MBean is even not there after our call
// and this is not critical for all existing logic. So disable it now.
// TODO: Make MBean can be stable queried.
@Test (enabled = false)
public void testDefaultMonitoringMbeans()
throws MBeanException, ReflectionException, InstanceNotFoundException, InterruptedException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
int listClusters = new Random().nextInt(10);
for (int i = 0; i < listClusters; i++) {
get("clusters", null, Response.Status.OK.getStatusCode(), true);
}
MBeanServer beanServer = ManagementFactory.getPlatformMBeanServer();
boolean correctReports = false;
// It may take couple milisecond to propagate the data to MBeanServer
while (!correctReports) {
for (ObjectName objectName : beanServer.queryNames(null, null)) {
if (objectName.toString().contains("getClusters")) {
// The object name is complicated, so we get the matched one and try to find out whether
// they have the expected attributes and value matched our expectation.
try {
if (beanServer.getAttribute(objectName, "RequestCount_total")
.equals(Long.valueOf(listClusters))) {
correctReports = true;
}
} catch (AttributeNotFoundException e) {
}
}
}
Thread.sleep(50);
}
Assert.assertTrue(correctReports);
System.out.println("End test :" + TestHelper.getTestMethodName());
}
@Test
public void testMBeanApplicationName() throws Exception {
Set<String> namespaces =
new HashSet<>(Arrays.asList(HelixRestNamespace.DEFAULT_NAMESPACE_NAME, TEST_NAMESPACE));
MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer();
TestHelper.verify(() -> {
Set<ObjectName> objectNames =
mBeanServer.queryNames(new ObjectName(DEFAULT_METRIC_DOMAIN + ":*"), null);
Set<String> appNames = new HashSet<>();
for (ObjectName mBeanName : objectNames) {
appNames.add(mBeanName.getKeyProperty("type"));
}
return namespaces.equals(appNames);
}, TestHelper.WAIT_DURATION);
}
}
| 9,315 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestAuthValidator.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import org.apache.helix.TestHelper;
import org.apache.helix.rest.acl.NoopAclRegister;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.authValidator.AuthValidator;
import org.apache.helix.rest.server.resources.helix.ClusterAccessor;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class TestAuthValidator extends AbstractTestClass {
private String _mockBaseUri;
private CloseableHttpClient _httpClient;
private static String CLASSNAME_TEST_DEFAULT_AUTH = "testDefaultAuthValidator";
private static String CLASSNAME_TEST_CST_AUTH = "testCustomAuthValidator";
@AfterClass
public void afterClass() {
TestHelper.dropCluster(CLASSNAME_TEST_DEFAULT_AUTH, _gZkClient);
TestHelper.dropCluster(CLASSNAME_TEST_CST_AUTH, _gZkClient);
}
@Test
public void testDefaultAuthValidator() throws JsonProcessingException {
put("clusters/" + CLASSNAME_TEST_DEFAULT_AUTH, null, Entity.entity("", MediaType.APPLICATION_JSON_TYPE),
Response.Status.CREATED.getStatusCode());
String body = get("clusters/", null, Response.Status.OK.getStatusCode(), true);
JsonNode node = OBJECT_MAPPER.readTree(body);
String clustersStr = node.get(ClusterAccessor.ClusterProperties.clusters.name()).toString();
Assert.assertTrue(clustersStr.contains(CLASSNAME_TEST_DEFAULT_AUTH));
}
@Test(dependsOnMethods = "testDefaultAuthValidator")
public void testCustomAuthValidator() throws IOException, InterruptedException {
int newPort = getBaseUri().getPort() + 1;
// Start a second server for testing Distributed Leader Election for writes
_mockBaseUri = HttpConstants.HTTP_PROTOCOL_PREFIX + getBaseUri().getHost() + ":" + newPort;
_httpClient = HttpClients.createDefault();
AuthValidator mockAuthValidatorPass = Mockito.mock(AuthValidator.class);
when(mockAuthValidatorPass.validate(any())).thenReturn(true);
AuthValidator mockAuthValidatorReject = Mockito.mock(AuthValidator.class);
when(mockAuthValidatorReject.validate(any())).thenReturn(false);
List<HelixRestNamespace> namespaces = new ArrayList<>();
namespaces.add(new HelixRestNamespace(HelixRestNamespace.DEFAULT_NAMESPACE_NAME,
HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, ZK_ADDR, true));
// Create a server that allows operations based on namespace auth and rejects operations based
// on cluster auth
HelixRestServer server =
new HelixRestServer(namespaces, newPort, getBaseUri().getPath(), Collections.emptyList(),
mockAuthValidatorReject, mockAuthValidatorPass, new NoopAclRegister());
server.start();
HttpUriRequest request =
buildRequest("/clusters/" + CLASSNAME_TEST_CST_AUTH, HttpConstants.RestVerbs.PUT, "");
sendRequestAndValidate(request, Response.Status.CREATED.getStatusCode());
request = buildRequest("/clusters/" + CLASSNAME_TEST_CST_AUTH, HttpConstants.RestVerbs.GET, "");
sendRequestAndValidate(request, Response.Status.FORBIDDEN.getStatusCode());
server.shutdown();
_httpClient.close();
// Create a server that rejects operations based on namespace auth and allows operations based
// on cluster auth
server =
new HelixRestServer(namespaces, newPort, getBaseUri().getPath(), Collections.emptyList(),
mockAuthValidatorPass, mockAuthValidatorReject, new NoopAclRegister());
server.start();
_httpClient = HttpClients.createDefault();
request = buildRequest("/clusters/" + CLASSNAME_TEST_CST_AUTH, HttpConstants.RestVerbs.GET, "");
sendRequestAndValidate(request, Response.Status.OK.getStatusCode());
request = buildRequest("/clusters", HttpConstants.RestVerbs.GET, "");
sendRequestAndValidate(request, Response.Status.FORBIDDEN.getStatusCode());
server.shutdown();
_httpClient.close();
}
private HttpUriRequest buildRequest(String urlSuffix, HttpConstants.RestVerbs requestMethod,
String jsonEntity) {
String url = _mockBaseUri + urlSuffix;
switch (requestMethod) {
case PUT:
HttpPut httpPut = new HttpPut(url);
httpPut.setEntity(new StringEntity(jsonEntity, ContentType.APPLICATION_JSON));
return httpPut;
case DELETE:
return new HttpDelete(url);
case GET:
return new HttpGet(url);
default:
throw new IllegalArgumentException("Unsupported requestMethod: " + requestMethod);
}
}
private void sendRequestAndValidate(HttpUriRequest request, int expectedResponseCode)
throws IllegalArgumentException, IOException {
HttpResponse response = _httpClient.execute(request);
Assert.assertEquals(response.getStatusLine().getStatusCode(), expectedResponseCode);
}
}
| 9,316 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/TestZooKeeperAccessor.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Base64;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.ws.rs.core.Response;
import org.apache.helix.AccessOption;
import org.apache.helix.manager.zk.ZKUtil;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.rest.server.util.JerseyUriRequestBuilder;
import org.apache.helix.zookeeper.zkclient.exception.ZkMarshallingError;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
import org.apache.zookeeper.data.Stat;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestZooKeeperAccessor extends AbstractTestClass {
private ZkBaseDataAccessor<byte[]> _testBaseDataAccessor;
@BeforeClass
public void beforeClass() {
_testBaseDataAccessor = new ZkBaseDataAccessor<>(ZK_ADDR, new ZkSerializer() {
@Override
public byte[] serialize(Object o)
throws ZkMarshallingError {
return (byte[]) o;
}
@Override
public Object deserialize(byte[] bytes)
throws ZkMarshallingError {
return new String(bytes);
}
}, ZkBaseDataAccessor.ZkClientType.DEDICATED);
}
@AfterClass
public void afterClass() {
_testBaseDataAccessor.close();
}
@Test
public void testExists()
throws IOException {
String path = "/path";
Assert.assertFalse(_testBaseDataAccessor.exists(path, AccessOption.PERSISTENT));
Map<String, Boolean> result;
String data = new JerseyUriRequestBuilder("zookeeper{}?command=exists").format(path)
.isBodyReturnExpected(true).get(this);
result = OBJECT_MAPPER.readValue(data, HashMap.class);
Assert.assertTrue(result.containsKey("exists"));
Assert.assertFalse(result.get("exists"));
// Create a ZNode and check again
String content = "testExists";
Assert.assertTrue(
_testBaseDataAccessor.create(path, content.getBytes(), AccessOption.PERSISTENT));
Assert.assertTrue(_testBaseDataAccessor.exists(path, AccessOption.PERSISTENT));
data = new JerseyUriRequestBuilder("zookeeper{}?command=exists").format(path)
.isBodyReturnExpected(true).get(this);
result = OBJECT_MAPPER.readValue(data, HashMap.class);
Assert.assertTrue(result.containsKey("exists"));
Assert.assertTrue(result.get("exists"));
// Clean up
_testBaseDataAccessor.remove(path, AccessOption.PERSISTENT);
}
@Test
public void testGetData()
throws IOException {
String path = "/path";
String content = "testGetData";
Assert.assertFalse(_testBaseDataAccessor.exists(path, AccessOption.PERSISTENT));
// Expect BAD_REQUEST
String data = new JerseyUriRequestBuilder("zookeeper{}?command=getStringData").format(path)
.isBodyReturnExpected(false)
.expectedReturnStatusCode(Response.Status.NOT_FOUND.getStatusCode()).get(this);
// Now write data and test
_testBaseDataAccessor.create(path, content.getBytes(), AccessOption.PERSISTENT);
// Get the stat object
Stat expectedStat = _testBaseDataAccessor.getStat(path, AccessOption.PERSISTENT);
String getStatKey = "getStat";
// Test getStringData
String getStringDataKey = "getStringData";
data = new JerseyUriRequestBuilder("zookeeper{}?command=getStringData").format(path)
.isBodyReturnExpected(true).get(this);
Map<String, Object> stringResult = OBJECT_MAPPER.readValue(data, Map.class);
Assert.assertTrue(stringResult.containsKey(getStringDataKey));
Assert.assertEquals(stringResult.get(getStringDataKey), content);
Assert.assertTrue(stringResult.containsKey(getStatKey));
Assert.assertEquals(stringResult.get(getStatKey), ZKUtil.fromStatToMap(expectedStat));
// Test getBinaryData
String getBinaryDataKey = "getBinaryData";
data = new JerseyUriRequestBuilder("zookeeper{}?command=getBinaryData").format(path)
.isBodyReturnExpected(true).get(this);
Map<String, Object> binaryResult = OBJECT_MAPPER.readValue(data, Map.class);
Assert.assertTrue(binaryResult.containsKey(getBinaryDataKey));
// Note: The response's byte array is encoded into a String using Base64 (for safety),
// so the user must decode with Base64 to get the original byte array back
byte[] decodedBytes = Base64.getDecoder().decode((String) binaryResult.get(getBinaryDataKey));
Assert.assertEquals(decodedBytes, content.getBytes());
Assert.assertTrue(binaryResult.containsKey(getStatKey));
Assert.assertEquals(binaryResult.get(getStatKey), ZKUtil.fromStatToMap(expectedStat));
// Clean up
_testBaseDataAccessor.remove(path, AccessOption.PERSISTENT);
}
@Test
public void testGetChildren()
throws IOException {
String path = "/path";
String childrenKey = "/children";
int numChildren = 20;
// Create a ZNode and its children
for (int i = 0; i < numChildren; i++) {
_testBaseDataAccessor.create(path + childrenKey, null, AccessOption.PERSISTENT_SEQUENTIAL);
}
// Verify
String getChildrenKey = "getChildren";
String data = new JerseyUriRequestBuilder("zookeeper{}?command=getChildren").format(path)
.isBodyReturnExpected(true).get(this);
Map<String, List<String>> result = OBJECT_MAPPER.readValue(data, HashMap.class);
Assert.assertTrue(result.containsKey(getChildrenKey));
Assert.assertEquals(result.get(getChildrenKey).size(), numChildren);
// Check that all children are indeed created with PERSISTENT_SEQUENTIAL
result.get(getChildrenKey).forEach(child -> {
Assert.assertTrue(child.contains("children"));
});
// Clean up
_testBaseDataAccessor.remove(path, AccessOption.PERSISTENT);
}
@Test
public void testGetStat() throws IOException {
String path = "/path/getStat";
// Make sure it returns a NOT FOUND if there is no ZNode
String data = new JerseyUriRequestBuilder("zookeeper{}?command=getStat").format(path)
.isBodyReturnExpected(false)
.expectedReturnStatusCode(Response.Status.NOT_FOUND.getStatusCode()).get(this);
// Create a test ZNode (ephemeral)
_testBaseDataAccessor.create(path, null, AccessOption.PERSISTENT);
Stat stat = _testBaseDataAccessor.getStat(path, AccessOption.PERSISTENT);
Map<String, String> expectedFields = ZKUtil.fromStatToMap(stat);
expectedFields.put("path", path);
// Verify with the REST endpoint
data = new JerseyUriRequestBuilder("zookeeper{}?command=getStat").format(path)
.isBodyReturnExpected(true).get(this);
Map<String, String> result = OBJECT_MAPPER.readValue(data, HashMap.class);
Assert.assertEquals(result, expectedFields);
// Clean up
_testBaseDataAccessor.remove(path, AccessOption.PERSISTENT);
}
@Test
public void testDelete() {
String path = "/path";
String deletePath = path + "/delete";
try {
// 1. Create a persistent node. Delete shall fail.
_testBaseDataAccessor.create(deletePath, null, AccessOption.PERSISTENT);
new JerseyUriRequestBuilder("zookeeper{}").format(deletePath)
.expectedReturnStatusCode(Response.Status.FORBIDDEN.getStatusCode()).delete(this);
Assert.assertTrue(_testBaseDataAccessor.exists(deletePath, AccessOption.PERSISTENT));
// 2. Try to delete a non-exist ZNode
new JerseyUriRequestBuilder("zookeeper{}").format(deletePath + "/foobar")
.expectedReturnStatusCode(Response.Status.NOT_FOUND.getStatusCode()).delete(this);
// 3. Create an ephemeral node. Delete shall be done successfully.
_testBaseDataAccessor.remove(deletePath, AccessOption.PERSISTENT);
_testBaseDataAccessor.create(deletePath, null, AccessOption.EPHEMERAL);
// Verify with the REST endpoint
new JerseyUriRequestBuilder("zookeeper{}").format(deletePath)
.expectedReturnStatusCode(Response.Status.OK.getStatusCode()).delete(this);
Assert.assertFalse(_testBaseDataAccessor.exists(deletePath, AccessOption.PERSISTENT));
} finally {
// Clean up
_testBaseDataAccessor.remove(path, AccessOption.PERSISTENT);
}
}
}
| 9,317 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/util/JerseyUriRequestBuilder.java
|
package org.apache.helix.rest.server.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import javax.ws.rs.client.Entity;
import javax.ws.rs.client.WebTarget;
import javax.ws.rs.core.Response;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import org.apache.commons.lang3.StringUtils;
import org.glassfish.jersey.test.JerseyTestNg;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Simplify the REST URI construction for Jersey Test Framework
* Example usage:
* new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=disable")
* .format(CLUSTER_NAME, INSTANCE_NAME)
* .post(...);
*/
public class JerseyUriRequestBuilder {
private static final String PLACE_HOLDER = "{}";
private final StringBuilder _uriBuilder;
private final Map<String, String> _queryParams;
private final int _requiredParameters;
private final String _rawQuery;
// default expected status code and if body returned
private int _expectedStatusCode = Response.Status.OK.getStatusCode();
private boolean _isBodyReturnExpected = false;
public JerseyUriRequestBuilder(String uri) {
String[] uris = uri.split("\\?");
if (uris.length > 1) {
_queryParams = Splitter.on('&').trimResults().withKeyValueSeparator("=").split(uris[1]);
_rawQuery = uris[1];
} else {
_queryParams = new HashMap<>();
_rawQuery = "";
}
_uriBuilder = new StringBuilder(uris[0]);
_requiredParameters = StringUtils.countMatches(uris[0], PLACE_HOLDER);
}
public JerseyUriRequestBuilder format(String... parameters) {
Preconditions.checkArgument(_requiredParameters == parameters.length);
for (String param : parameters) {
int index = _uriBuilder.indexOf(PLACE_HOLDER);
_uriBuilder.replace(index, index + PLACE_HOLDER.length(), param);
}
return this;
}
public JerseyUriRequestBuilder expectedReturnStatusCode(int expectedStatusCode) {
_expectedStatusCode = expectedStatusCode;
return this;
}
public JerseyUriRequestBuilder isBodyReturnExpected(boolean isBodyReturnExpected) {
_isBodyReturnExpected = isBodyReturnExpected;
return this;
}
/**
* Execute get request
* @param container
* @return
*/
public String get(JerseyTestNg.ContainerPerClassTest container) {
final Response response = buildWebTarget(container).request().get();
Assert.assertEquals(response.getStatus(), _expectedStatusCode);
if (_expectedStatusCode != Response.Status.NO_CONTENT.getStatusCode()) {
// NOT_FOUND will throw text based html
if (_expectedStatusCode != Response.Status.NOT_FOUND.getStatusCode()
&& _expectedStatusCode != Response.Status.BAD_REQUEST.getStatusCode()) {
Assert.assertEquals(response.getMediaType().getType(), "application");
} else {
Assert.assertEquals(response.getMediaType().getType(), "text");
}
} // else, NO_CONTENT should not return any content, so no need to check the type.
String body = response.readEntity(String.class);
if (_isBodyReturnExpected) {
Assert.assertNotNull(body);
}
return body;
}
public Response getResponse(JerseyTestNg.ContainerPerClassTest container) {
return buildWebTarget(container).request().get();
}
/**
* Execute put request
* @param container
* @param entity
*/
public void put(JerseyTestNg.ContainerPerClassTest container, Entity entity) {
final Response response = buildWebTarget(container).request().put(entity);
Assert.assertEquals(response.getStatus(), _expectedStatusCode);
}
/**
* Execute post request
* @param container
* @param entity
*/
public Response post(JerseyTestNg.ContainerPerClassTest container, Entity entity) {
final Response response = buildWebTarget(container).request().post(entity);
Assert.assertEquals(response.getStatus(), _expectedStatusCode);
return response;
}
/**
* Execute delete request
* @param container
*/
public void delete(JerseyTestNg.ContainerPerClassTest container) {
final Response response = buildWebTarget(container).request().delete();
Assert.assertEquals(response.getStatus(), _expectedStatusCode);
}
private WebTarget buildWebTarget(JerseyTestNg.ContainerPerClassTest container) {
WebTarget webTarget = container.target(_uriBuilder.toString());
for (Map.Entry<String, String> entry : _queryParams.entrySet()) {
webTarget = webTarget.queryParam(entry.getKey(), entry.getValue());
}
return webTarget;
}
private String getPath() {
if (StringUtils.isEmpty(_rawQuery)) {
return _uriBuilder.toString();
} else {
return _uriBuilder.toString() + "?" + _rawQuery;
}
}
@Test
public void testUriBuilderGetPath() {
JerseyUriRequestBuilder uriBuilder = new JerseyUriRequestBuilder("clusters/{}/instances/{}?command=disable")
.format("TEST-CLUSTER", "instance1");
String path = uriBuilder.getPath();
Assert.assertEquals(uriBuilder._uriBuilder.toString(), "clusters/TEST-CLUSTER/instances/instance1");
Assert.assertEquals(path, "clusters/TEST-CLUSTER/instances/instance1?command=disable");
Assert.assertEquals(uriBuilder._queryParams.get("command"), "disable");
}
}
| 9,318 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/util/TestInstanceValidationUtilInRest.java
|
package org.apache.helix.rest.server.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.MasterSlaveSMD;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.util.InstanceValidationUtil;
import org.junit.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestInstanceValidationUtilInRest{
private static final String RESOURCE_NAME = "TestResource";
private static final String TEST_CLUSTER = "TestCluster";
@Test
public void testPartitionLevelCheck() {
List<ExternalView> externalViews = new ArrayList<>(Arrays.asList(prepareExternalView()));
Mock mock = new Mock();
HelixDataAccessor accessor = mock.dataAccessor;
when(mock.dataAccessor.keyBuilder())
.thenReturn(new PropertyKey.Builder(TEST_CLUSTER));
when(mock.dataAccessor
.getProperty(new PropertyKey.Builder(TEST_CLUSTER).stateModelDef(MasterSlaveSMD.name)))
.thenReturn(mock.stateModel);
when(mock.stateModel.getTopState()).thenReturn("MASTER");
Map<String, List<String>> failedPartitions = InstanceValidationUtil
.perPartitionHealthCheck(externalViews, preparePartitionStateMap(), "h2", accessor);
Assert.assertTrue(failedPartitions.keySet().size() == 1);
Assert.assertEquals(failedPartitions.keySet().iterator().next(), "p2");
}
@Test
public void testPartitionLevelCheckInitState() {
List<ExternalView> externalViews = new ArrayList<>(Arrays.asList(prepareExternalViewOffline()));
Mock mock = new Mock();
HelixDataAccessor accessor = mock.dataAccessor;
when(mock.dataAccessor.keyBuilder())
.thenReturn(new PropertyKey.Builder(TEST_CLUSTER));
when(mock.dataAccessor
.getProperty(new PropertyKey.Builder(TEST_CLUSTER).stateModelDef(MasterSlaveSMD.name)))
.thenReturn(mock.stateModel);
when(mock.stateModel.getTopState()).thenReturn("MASTER");
when(mock.stateModel.getInitialState()).thenReturn("OFFLINE");
Map<String, Map<String, Boolean>> partitionStateMap = new HashMap<>();
partitionStateMap.put("h1", new HashMap<>());
partitionStateMap.put("h2", new HashMap<>());
partitionStateMap.put("h3", new HashMap<>());
partitionStateMap.put("h4", new HashMap<>());
partitionStateMap.get("h1").put("p1", true);
partitionStateMap.get("h2").put("p1", true);
partitionStateMap.get("h3").put("p1", true);
partitionStateMap.get("h4").put("p1", true);
partitionStateMap.get("h1").put("p2", true);
partitionStateMap.get("h2").put("p2", false);
partitionStateMap.get("h3").put("p2", true);
Map<String, List<String>> failedPartitions = InstanceValidationUtil
.perPartitionHealthCheck(externalViews, partitionStateMap, "h1", accessor);
Assert.assertEquals(failedPartitions.get("p1").size(), 1);
Assert.assertEquals(failedPartitions.get("p1").get(0), "PARTITION_INITIAL_STATE_FAIL");
partitionStateMap.get("h3").put("p1", false);
failedPartitions = InstanceValidationUtil
.perPartitionHealthCheck(externalViews, partitionStateMap, "h1", accessor);
Assert.assertEquals(failedPartitions.get("p1").size(), 2);
Assert.assertTrue(failedPartitions.get("p1").contains("PARTITION_INITIAL_STATE_FAIL"));
Assert.assertTrue(failedPartitions.get("p1").contains("UNHEALTHY_PARTITION"));
Assert.assertEquals(failedPartitions.keySet().size(), 2);
}
private ExternalView prepareExternalView() {
ExternalView externalView = new ExternalView(RESOURCE_NAME);
externalView.getRecord()
.setSimpleField(ExternalView.ExternalViewProperty.STATE_MODEL_DEF_REF.toString(),
MasterSlaveSMD.name);
externalView.setState("p1", "h1", "MASTER");
externalView.setState("p1", "h2", "SLAVE");
externalView.setState("p1", "h3", "SLAVE");
externalView.setState("p2", "h1", "SLAVE");
externalView.setState("p2", "h2", "MASTER");
externalView.setState("p2", "h3", "SLAVE");
externalView.setState("p3", "h1", "SLAVE");
externalView.setState("p3", "h2", "MASTER");
externalView.setState("p3", "h3", "SLAVE");
return externalView;
}
private Map<String, Map<String, Boolean>> preparePartitionStateMap() {
Map<String, Map<String, Boolean>> partitionStateMap = new HashMap<>();
partitionStateMap.put("h1", new HashMap<>());
partitionStateMap.put("h2", new HashMap<>());
partitionStateMap.put("h3", new HashMap<>());
// h1 holds master for p1 is unhealthy should not impact decision of shut down h2
// But h2 holds master for p2, shutdown h2 may cause unhealthy master on h3.
partitionStateMap.get("h1").put("p1", false);
partitionStateMap.get("h1").put("p2", true);
partitionStateMap.get("h1").put("p3", true);
partitionStateMap.get("h2").put("p1", true);
partitionStateMap.get("h2").put("p2", true);
partitionStateMap.get("h2").put("p3", true);
partitionStateMap.get("h3").put("p1", true);
partitionStateMap.get("h3").put("p2", false);
partitionStateMap.get("h3").put("p3", true);
return partitionStateMap;
}
private ExternalView prepareExternalViewOffline() {
ExternalView externalView = new ExternalView(RESOURCE_NAME);
externalView.getRecord()
.setSimpleField(ExternalView.ExternalViewProperty.STATE_MODEL_DEF_REF.toString(),
MasterSlaveSMD.name);
externalView.setState("p1", "h1", "MASTER");
externalView.setState("p1", "h2", "SLAVE");
externalView.setState("p1", "h3", "SLAVE");
externalView.setState("p1", "h4", "OFFLINE");
externalView.setState("p2", "h1", "MASTER");
externalView.setState("p2", "h2", "SLAVE");
externalView.setState("p2", "h3", "SLAVE");
return externalView;
}
private final class Mock {
private HelixDataAccessor dataAccessor = mock(HelixDataAccessor.class);
private StateModelDefinition stateModel = mock(StateModelDefinition.class);
Mock() {
}
}
}
| 9,319 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/mock/MockMetadataStoreDirectoryAccessor.java
|
package org.apache.helix.rest.server.mock;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import javax.ws.rs.Path;
import org.apache.helix.msdcommon.datamodel.TrieRoutingData;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.rest.metadatastore.MetadataStoreDirectory;
import org.apache.helix.rest.metadatastore.ZkMetadataStoreDirectory;
import org.apache.helix.rest.metadatastore.accessor.MetadataStoreRoutingDataReader;
import org.apache.helix.rest.metadatastore.accessor.MetadataStoreRoutingDataWriter;
import org.apache.helix.rest.metadatastore.accessor.ZkRoutingDataReader;
import org.apache.helix.rest.metadatastore.accessor.ZkRoutingDataWriter;
import org.apache.helix.rest.server.resources.metadatastore.MetadataStoreDirectoryAccessor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An accessor that mocks the MetadataStoreDirectoryAccessor for testing purpose.
*/
@Path("/mock")
public class MockMetadataStoreDirectoryAccessor extends MetadataStoreDirectoryAccessor {
//TODO: use this class as a template for https://github.com/apache/helix/issues/816
private static final Logger LOG =
LoggerFactory.getLogger(MockMetadataStoreDirectoryAccessor.class);
// A flag that will be modified if the underlying MockZkRoutingDataWriter makes an operation
// against ZooKeeper
public static boolean operatedOnZk = false;
// The instance of mockMSD that's created by this accessor; it's saved here to be closed later
public static MetadataStoreDirectory _mockMSDInstance;
/**
* This method is overriden so that an instance of MockZkMetadataStoreDirectory can be passed in
*/
@Override
protected void buildMetadataStoreDirectory(String namespace, String address) {
try {
_metadataStoreDirectory = new MockZkMetadataStoreDirectory(namespace, address);
_mockMSDInstance = _metadataStoreDirectory;
} catch (InvalidRoutingDataException e) {
LOG.error("buildMetadataStoreDirectory encountered an exception.", e);
}
}
/**
* Used to artificially create another instance of ZkMetadataStoreDirectory.
* ZkMetadataStoreDirectory being a singleton makes it difficult to test it,
* therefore this is the only way to create another instance.
*/
class MockZkMetadataStoreDirectory extends ZkMetadataStoreDirectory {
MockZkMetadataStoreDirectory(String namespace, String zkAddress)
throws InvalidRoutingDataException {
super();
// Manually populate the map so that MockZkRoutingDataWriter can be passed in
_routingZkAddressMap.put(namespace, zkAddress);
_routingDataReaderMap.put(namespace, new ZkRoutingDataReader(namespace, zkAddress, this));
_routingDataWriterMap.put(namespace, new MockZkRoutingDataWriter(namespace, zkAddress));
_realmToShardingKeysMap.put(namespace, _routingDataReaderMap.get(namespace).getRoutingData());
_routingDataMap.put(namespace, new TrieRoutingData(_realmToShardingKeysMap.get(namespace)));
}
@Override
public void close() {
_routingDataReaderMap.values().forEach(MetadataStoreRoutingDataReader::close);
_routingDataWriterMap.values().forEach(MetadataStoreRoutingDataWriter::close);
}
}
/**
* A mock to ZkRoutingDataWriter. The only purpose is to set the static flag signifying that
* this writer is used for zookeeper operations.
*/
class MockZkRoutingDataWriter extends ZkRoutingDataWriter {
public MockZkRoutingDataWriter(String namespace, String zkAddress) {
super(namespace, zkAddress);
operatedOnZk = false;
}
@Override
protected boolean createZkRealm(String realm) {
operatedOnZk = true;
return super.createZkRealm(realm);
}
@Override
protected boolean deleteZkRealm(String realm) {
operatedOnZk = true;
return super.deleteZkRealm(realm);
}
@Override
protected boolean createZkShardingKey(String realm, String shardingKey) {
operatedOnZk = true;
return super.createZkShardingKey(realm, shardingKey);
}
@Override
protected boolean deleteZkShardingKey(String realm, String shardingKey) {
operatedOnZk = true;
return super.deleteZkShardingKey(realm, shardingKey);
}
}
}
| 9,320 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/json
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/json/cluster/TestClusterInfo.java
|
package org.apache.helix.rest.server.json.cluster;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestClusterInfo {
@Test
public void whenSerializingClusterInfo() throws JsonProcessingException {
ClusterInfo clusterInfo = new ClusterInfo.Builder("cluster0")
.controller("controller")
.idealStates(ImmutableList.of("idealState0"))
.instances(ImmutableList.of("instance0"))
.maintenance(true)
.paused(true)
.liveInstances(ImmutableList.of("instance0"))
.build();
ObjectMapper mapper = new ObjectMapper();
String result = mapper.writeValueAsString(clusterInfo);
Assert.assertEquals(mapper.readTree(result),
mapper.readTree("{\"id\":\"cluster0\",\"controller\":\"controller\",\"paused\":true,\"maintenance\":true,\"resources\":[\"idealState0\"],\"instances\":[\"instance0\"],\"liveInstances\":[\"instance0\"]}"));
}
}
| 9,321 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/json
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/json/cluster/TestClusterTopology.java
|
package org.apache.helix.rest.server.json.cluster;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestClusterTopology {
@Test
public void whenSerializingClusterTopology() throws IOException {
List<ClusterTopology.Instance> instances =
ImmutableList.of(new ClusterTopology.Instance("instance"));
List<ClusterTopology.Zone> zones = ImmutableList.of(new ClusterTopology.Zone("zone", instances));
ClusterTopology clusterTopology =
new ClusterTopology("cluster0", zones, Collections.emptySet());
ObjectMapper mapper = new ObjectMapper();
String result = mapper.writeValueAsString(clusterTopology);
Assert.assertEquals(mapper.readTree(result),
mapper.readTree("{\"id\":\"cluster0\",\"zones\":[{\"id\":\"zone\",\"instances\":[{\"id\":\"instance\"}]}],\"allInstances\":[]}"));
}
}
| 9,322 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/json
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/json/instance/TestStoppableCheck.java
|
package org.apache.helix.rest.server.json.instance;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestStoppableCheck {
@Test
public void whenSerializingStoppableCheck() throws JsonProcessingException {
StoppableCheck stoppableCheck = new StoppableCheck(false, ImmutableList.of("check"),
StoppableCheck.Category.HELIX_OWN_CHECK);
ObjectMapper mapper = new ObjectMapper();
String result = mapper.writeValueAsString(stoppableCheck);
Assert.assertEquals(mapper.readTree(result), mapper.readTree("{\"stoppable\":false,\"failedChecks\":[\"HELIX:check\"]}"));
}
@Test
public void testConstructorSortingOrder() {
StoppableCheck stoppableCheck =
new StoppableCheck(ImmutableMap.of("a", true, "c", false, "b", false),
StoppableCheck.Category.HELIX_OWN_CHECK);
Assert.assertFalse(stoppableCheck.isStoppable());
Assert.assertEquals(stoppableCheck.getFailedChecks(), ImmutableList.of("HELIX:b", "HELIX:c"));
}
}
| 9,323 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/service/TestClusterService.java
|
package org.apache.helix.rest.server.service;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import com.google.common.collect.ImmutableList;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.rest.server.json.cluster.ClusterTopology;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestClusterService {
private static final String TEST_CLUSTER = "Test_Cluster";
@Test
public void testGetClusterTopology_whenMultiZones() {
InstanceConfig instanceConfig1 = new InstanceConfig("instance0");
instanceConfig1.setDomain("helixZoneId=zone0");
InstanceConfig instanceConfig2 = new InstanceConfig("instance1");
instanceConfig2.setDomain("helixZoneId=zone1");
List<HelixProperty> instanceConfigs = (List) ImmutableList.of(instanceConfig1, instanceConfig2);
Mock mock = new Mock();
when(mock.dataAccessor.keyBuilder()).thenReturn(new PropertyKey.Builder(TEST_CLUSTER));
when(mock.dataAccessor.getChildValues(any(PropertyKey.class), anyBoolean())).thenReturn(instanceConfigs);
ClusterTopology clusterTopology = mock.clusterService.getClusterTopology(TEST_CLUSTER);
Assert.assertEquals(clusterTopology.getZones().size(), 2);
Assert.assertEquals(clusterTopology.getClusterId(), TEST_CLUSTER);
}
@Test
public void testGetClusterTopology_whenZeroZones() {
InstanceConfig instanceConfig1 = new InstanceConfig("instance0");
InstanceConfig instanceConfig2 = new InstanceConfig("instance1");
List<HelixProperty> instanceConfigs = (List) ImmutableList.of(instanceConfig1, instanceConfig2);
Mock mock = new Mock();
when(mock.dataAccessor.keyBuilder()).thenReturn(new PropertyKey.Builder(TEST_CLUSTER));
when(mock.dataAccessor.getChildValues(any(PropertyKey.class), anyBoolean()))
.thenReturn(instanceConfigs);
ClusterTopology clusterTopology = mock.clusterService.getClusterTopology(TEST_CLUSTER);
Assert.assertEquals(clusterTopology.getZones().size(), 0);
Assert.assertEquals(clusterTopology.getClusterId(), TEST_CLUSTER);
}
@Test
public void testGetClusterTopology_whenZoneHasMultiInstances() {
InstanceConfig instanceConfig1 = new InstanceConfig("instance0");
instanceConfig1.setDomain("helixZoneId=zone0");
InstanceConfig instanceConfig2 = new InstanceConfig("instance1");
instanceConfig2.setDomain("helixZoneId=zone0");
List<HelixProperty> instanceConfigs = (List) ImmutableList.of(instanceConfig1, instanceConfig2);
Mock mock = new Mock();
when(mock.dataAccessor.keyBuilder()).thenReturn(new PropertyKey.Builder(TEST_CLUSTER));
when(mock.dataAccessor.getChildValues(any(PropertyKey.class), anyBoolean()))
.thenReturn(instanceConfigs);
ClusterTopology clusterTopology = mock.clusterService.getClusterTopology(TEST_CLUSTER);
Assert.assertEquals(clusterTopology.getZones().size(), 1);
Assert.assertEquals(clusterTopology.getZones().get(0).getInstances().size(), 2);
Assert.assertEquals(clusterTopology.getClusterId(), TEST_CLUSTER);
}
private final class Mock {
private HelixDataAccessor dataAccessor = mock(HelixDataAccessor.class);
private ConfigAccessor configAccessor = mock(ConfigAccessor.class);
private ClusterService clusterService;
Mock() {
ClusterConfig mockConfig = new ClusterConfig(TEST_CLUSTER);
mockConfig.setFaultZoneType("helixZoneId");
when(configAccessor.getClusterConfig(TEST_CLUSTER)).thenReturn(mockConfig);
clusterService = new ClusterServiceImpl(dataAccessor, configAccessor);
}
}
}
| 9,324 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/server/service/TestVirtualTopologyGroupService.java
|
package org.apache.helix.rest.server.service;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.cloud.azure.AzureConstants;
import org.apache.helix.cloud.constants.CloudProvider;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.rest.server.json.cluster.ClusterTopology;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import static org.apache.helix.cloud.constants.VirtualTopologyGroupConstants.*;
import static org.mockito.Mockito.*;
public class TestVirtualTopologyGroupService {
private static final String TEST_CLUSTER = "Test_Cluster";
private static final String TEST_CLUSTER0 = "TestCluster_0";
private static final String TEST_CLUSTER1 = "TestCluster_1";
private final ConfigAccessor _configAccessor = mock(ConfigAccessor.class);
private final HelixDataAccessor _dataAccessor = mock(HelixDataAccessor.class);
private InstanceConfig _instanceConfig0;
private InstanceConfig _instanceConfig1;
private InstanceConfig _instanceConfig2;
private Map<String, DataUpdater<ZNRecord>> _updaterMap;
private HelixAdmin _helixAdmin;
private VirtualTopologyGroupService _service;
@BeforeTest
public void prepare() {
Map<String, Set<String>> assignment = new HashMap<>();
_instanceConfig0 = new InstanceConfig("instance_0");
_instanceConfig0.setDomain("helixZoneId=zone0");
_instanceConfig1 = new InstanceConfig("instance_1");
_instanceConfig1.setDomain("helixZoneId=zone0");
_instanceConfig2 = new InstanceConfig("instance_2");
_instanceConfig2.setDomain("helixZoneId=zone1");
assignment.put("virtual_group_0", ImmutableSet.of("instance_0", "instance_1"));
assignment.put("virtual_group_1", ImmutableSet.of("instance_2"));
_updaterMap = VirtualTopologyGroupService.createInstanceConfigUpdater(TEST_CLUSTER, assignment);
ClusterConfig clusterConfig = new ClusterConfig(TEST_CLUSTER0);
clusterConfig.setFaultZoneType(AzureConstants.AZURE_FAULT_ZONE_TYPE);
clusterConfig.setTopology(AzureConstants.AZURE_TOPOLOGY);
clusterConfig.setTopologyAwareEnabled(true);
when(_configAccessor.getClusterConfig(TEST_CLUSTER0)).thenReturn(clusterConfig);
_helixAdmin = mock(HelixAdmin.class);
when(_helixAdmin.isInMaintenanceMode(anyString())).thenReturn(true);
boolean[] results = new boolean[2];
results[0] = results[1] = true;
when(_dataAccessor.updateChildren(anyList(), anyList(), anyInt())).thenReturn(results);
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.getClusterTopology(anyString())).thenReturn(prepareClusterTopology());
_service = new VirtualTopologyGroupService(_helixAdmin, clusterService, _configAccessor, _dataAccessor);
}
@Test(expectedExceptions = IllegalStateException.class,
expectedExceptionsMessageRegExp = "Topology-aware rebalance is not enabled.*")
public void testTopologyAwareEnabledSetup() {
when(_configAccessor.getClusterConfig(TEST_CLUSTER1)).thenReturn(new ClusterConfig(TEST_CLUSTER1));
_service.addVirtualTopologyGroup(TEST_CLUSTER1, ImmutableMap.of(GROUP_NAME, "test-group", GROUP_NUMBER, "2"));
}
@Test
public void testVirtualTopologyGroupService() {
_service.addVirtualTopologyGroup(TEST_CLUSTER0, ImmutableMap.of(
GROUP_NAME, "test-group", GROUP_NUMBER, "2", AUTO_MAINTENANCE_MODE_DISABLED, "true"));
verify(_dataAccessor, times(1)).updateChildren(anyList(), anyList(), anyInt());
verify(_configAccessor, times(1)).updateClusterConfig(anyString(), any());
}
@Test(expectedExceptions = IllegalStateException.class,
expectedExceptionsMessageRegExp = "This operation is not allowed if cluster is already in maintenance mode.*")
public void testMaintenanceModeCheckBeforeApiCall() {
_service.addVirtualTopologyGroup(TEST_CLUSTER0, ImmutableMap.of(GROUP_NAME, "test-group", GROUP_NUMBER, "2"));
}
@Test(expectedExceptions = IllegalStateException.class,
expectedExceptionsMessageRegExp = "Cluster is not in maintenance mode. This is required for virtual topology group setting. "
+ "Please set autoMaintenanceModeDisabled=false.*")
public void testMaintenanceModeCheckAfter() {
try {
when(_helixAdmin.isInMaintenanceMode(anyString())).thenReturn(false);
_service.addVirtualTopologyGroup(TEST_CLUSTER0, ImmutableMap.of(GROUP_NAME, "test-group", GROUP_NUMBER, "2"));
} finally {
when(_helixAdmin.isInMaintenanceMode(anyString())).thenReturn(true);
}
}
@Test(expectedExceptions = IllegalArgumentException.class,
expectedExceptionsMessageRegExp = "Number of virtual groups cannot be greater than the number of instances.*")
public void testNumberOfInstanceCheck() {
_service.addVirtualTopologyGroup(TEST_CLUSTER0, ImmutableMap.of(
GROUP_NAME, "test-group", GROUP_NUMBER, "10", AUTO_MAINTENANCE_MODE_DISABLED, "true"));
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testParamValidation() {
_service.addVirtualTopologyGroup(TEST_CLUSTER0, ImmutableMap.of(GROUP_NUMBER, "2"));
}
@Test(dataProvider = "instanceTestProvider")
public void testInstanceConfigUpdater(String zkPath, InstanceConfig instanceConfig, Map<String, String> expectedDomain) {
ZNRecord update = _updaterMap.get(zkPath).update(instanceConfig.getRecord());
InstanceConfig updatedConfig = new InstanceConfig(update);
Assert.assertEquals(updatedConfig.getDomainAsMap(), expectedDomain);
}
@DataProvider
public Object[][] instanceTestProvider() {
return new Object[][] {
{computeZkPath("instance_0"), _instanceConfig0,
ImmutableMap.of("helixZoneId", "zone0", VIRTUAL_FAULT_ZONE_TYPE, "virtual_group_0")},
{computeZkPath("instance_1"), _instanceConfig1,
ImmutableMap.of("helixZoneId", "zone0", VIRTUAL_FAULT_ZONE_TYPE, "virtual_group_0")},
{computeZkPath("instance_2"), _instanceConfig2,
ImmutableMap.of("helixZoneId", "zone1", VIRTUAL_FAULT_ZONE_TYPE, "virtual_group_1")}
};
}
@Test
public void testVirtualTopologyString() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setTopologyAwareEnabled(true);
testConfig.setTopology("/zone/instance");
Assert.assertEquals(VirtualTopologyGroupService.computeVirtualTopologyString(testConfig),
"/virtualZone/instance");
}
private static ClusterTopology prepareClusterTopology() {
List<ClusterTopology.Zone> zones = ImmutableList.of(
new ClusterTopology.Zone("zone0", ImmutableList.of(
new ClusterTopology.Instance("instance_0"), new ClusterTopology.Instance("instance_1"))),
new ClusterTopology.Zone("zone1", ImmutableList.of(new ClusterTopology.Instance("instance_2"))));
return new ClusterTopology(TEST_CLUSTER0, zones, ImmutableSet.of("instance_0", "instance_1", "instance_2"));
}
private static String computeZkPath(String instanceName) {
HelixConfigScope scope = new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.PARTICIPANT)
.forCluster(TEST_CLUSTER)
.forParticipant(instanceName)
.build();
return scope.getZkPath();
}
}
| 9,325 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/common/TestHelixDataAccessorWrapper.java
|
package org.apache.helix.rest.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.PropertyKey;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.model.HealthStat;
import org.apache.helix.model.RESTConfig;
import org.apache.helix.rest.client.CustomRestClient;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyList;
import static org.mockito.Matchers.anyMap;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.CALLS_REAL_METHODS;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
public class TestHelixDataAccessorWrapper {
private static final String TEST_INSTANCE0 = "host0";
private static final String TEST_INSTANCE1 = "host1";
private static final String TEST_PARTITION = "db0";
private static final String TEST_CLUSTER = "TestCluster";
private static final Map<String, String> CUSTOM_PAY_LOADS = Collections.emptyMap();
private MockHelixDataAccessorWrapper _dataAccessor;
private RESTConfig _restConfig;
private CustomRestClient _restClient;
private static class MockHelixDataAccessorWrapper extends HelixDataAccessorWrapper {
public MockHelixDataAccessorWrapper(ZKHelixDataAccessor dataAccessor) {
super(dataAccessor);
}
void setRestClient(CustomRestClient restClient) {
_restClient = restClient;
}
void setNamespace(String namespace) {
_namespace = namespace;
}
}
@BeforeMethod
public void beforeMethod() throws IOException {
_dataAccessor = mock(MockHelixDataAccessorWrapper.class, CALLS_REAL_METHODS);
_restConfig = mock(RESTConfig.class);
when(_restConfig.getBaseUrl(anyString())).thenReturn("http://localhost:1000");
_restClient = mock(CustomRestClient.class);
_dataAccessor.setRestClient(_restClient);
_dataAccessor.setNamespace("test");
when(_restClient.getPartitionStoppableCheck(anyString(), anyList(), anyMap()))
.thenAnswer((invocationOnMock) -> {
Object[] args = invocationOnMock.getArguments();
List<String> inputPartitions = (List<String>) args[1];
return inputPartitions.stream()
// always return true for rest partition checks
.collect(Collectors.toMap(Function.identity(), partition -> true));
});
doReturn(new PropertyKey.Builder(TEST_CLUSTER)).when(_dataAccessor).keyBuilder();
}
@Test
public void testGetPartitionHealthOfInstanceWithValidZKRecord() {
long validDate = System.currentTimeMillis() + 10000L;
doReturn(ImmutableList.of(TEST_INSTANCE0)).when(_dataAccessor).getChildNames(any());
// generate health record for TEST_INSTANCE0
ZNRecord record = new ZNRecord(HelixDataAccessorWrapper.PARTITION_HEALTH_KEY);
record.setMapField(TEST_PARTITION, ImmutableMap.of(HelixDataAccessorWrapper.IS_HEALTHY_KEY,
"true", HelixDataAccessorWrapper.EXPIRY_KEY, String.valueOf(validDate)));
doReturn(ImmutableList.of(new HealthStat(record))).when(_dataAccessor).getProperty(anyList(),
anyBoolean());
Map<String, Map<String, Boolean>> actual =
_dataAccessor.getAllPartitionsHealthOnLiveInstance(_restConfig, CUSTOM_PAY_LOADS);
Map<String, Map<String, Boolean>> expected =
ImmutableMap.of(TEST_INSTANCE0, ImmutableMap.of(TEST_PARTITION, true));
Assert.assertEquals(actual, expected);
// No need to query against rest endpoint
verifyZeroInteractions(_restClient);
}
@Test
public void testGetPartitionHealthOfInstanceWithExpiredZKRecord() throws IOException {
long expiredDate = System.currentTimeMillis() - 10000L;
doReturn(ImmutableList.of(TEST_INSTANCE0)).when(_dataAccessor).getChildNames(any());
// generate health record for TEST_INSTANCE0
ZNRecord record = new ZNRecord(HelixDataAccessorWrapper.PARTITION_HEALTH_KEY);
record.setMapField(TEST_PARTITION, ImmutableMap.of(HelixDataAccessorWrapper.IS_HEALTHY_KEY,
"false", HelixDataAccessorWrapper.EXPIRY_KEY, String.valueOf(expiredDate)));
doReturn(ImmutableList.of(new HealthStat(record))).when(_dataAccessor).getProperty(anyList(),
anyBoolean());
Map<String, Map<String, Boolean>> actual =
_dataAccessor.getAllPartitionsHealthOnLiveInstance(_restConfig, CUSTOM_PAY_LOADS);
Map<String, Map<String, Boolean>> expected =
ImmutableMap.of(TEST_INSTANCE0, ImmutableMap.of(TEST_PARTITION, true));
Assert.assertEquals(actual, expected);
// query once because the partition health is expired
verify(_restClient, times(1)).getPartitionStoppableCheck(anyString(), anyList(), anyMap());
}
@Test
public void testGetPartitionHealthOfInstanceWithIncompleteZKRecord() throws IOException {
long validDate = System.currentTimeMillis() + 10000L;
doReturn(ImmutableList.of(TEST_INSTANCE0, TEST_INSTANCE1)).when(_dataAccessor)
.getChildNames(any());
// generate health record only for TEST_INSTANCE0
ZNRecord record = new ZNRecord(HelixDataAccessorWrapper.PARTITION_HEALTH_KEY);
record.setMapField(TEST_PARTITION, ImmutableMap.of(HelixDataAccessorWrapper.IS_HEALTHY_KEY,
"false", HelixDataAccessorWrapper.EXPIRY_KEY, String.valueOf(validDate)));
doReturn(Arrays.asList(new HealthStat(record), null)).when(_dataAccessor).getProperty(anyList(),
anyBoolean());
when(_restClient.getPartitionStoppableCheck(anyString(), anyList(), anyMap()))
.thenReturn(ImmutableMap.of(TEST_PARTITION, true));
Map<String, Map<String, Boolean>> actual =
_dataAccessor.getAllPartitionsHealthOnLiveInstance(_restConfig, CUSTOM_PAY_LOADS);
Map<String, Map<String, Boolean>> expected =
ImmutableMap.of(TEST_INSTANCE0, ImmutableMap.of(TEST_PARTITION, false), TEST_INSTANCE1,
ImmutableMap.of(TEST_PARTITION, true));
Assert.assertEquals(actual, expected);
// query once because no partition health record for TEST_INSTANCE1
verify(_restClient, times(1)).getPartitionStoppableCheck(anyString(), anyList(), anyMap());
}
}
| 9,326 |
0 |
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/test/java/org/apache/helix/rest/client/TestCustomRestClient.java
|
package org.apache.helix.rest.client;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.StatusLine;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.impl.client.HttpClients;
import org.junit.Assert;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestCustomRestClient {
private static final String HTTP_LOCALHOST = "http://localhost:1000";
@Mock
HttpClient _httpClient;
@BeforeMethod
public void init() {
MockitoAnnotations.initMocks(this);
}
@Test
public void testGetInstanceStoppableCheck() throws IOException {
MockCustomRestClient customRestClient = new MockCustomRestClient(_httpClient);
String jsonResponse = "{\n" + " \"check1\": \"false\",\n" + " \"check2\": \"true\"\n" + "}";
HttpResponse httpResponse = mock(HttpResponse.class);
StatusLine statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_OK);
when(httpResponse.getStatusLine()).thenReturn(statusLine);
customRestClient.setJsonResponse(jsonResponse);
when(_httpClient.execute(any(HttpPost.class))).thenReturn(httpResponse);
Map<String, Boolean> healthCheck =
customRestClient.getInstanceStoppableCheck(HTTP_LOCALHOST, Collections.emptyMap());
Assert.assertFalse(healthCheck.get("check1"));
Assert.assertTrue(healthCheck.get("check2"));
}
@Test(expectedExceptions = ClientProtocolException.class)
public void testGetInstanceStoppableCheck_when_url_404() throws IOException {
MockCustomRestClient customRestClient = new MockCustomRestClient(_httpClient);
HttpResponse httpResponse = mock(HttpResponse.class);
StatusLine statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_NOT_FOUND);
when(httpResponse.getStatusLine()).thenReturn(statusLine);
when(_httpClient.execute(any(HttpPost.class))).thenReturn(httpResponse);
customRestClient.getInstanceStoppableCheck(HTTP_LOCALHOST, Collections.emptyMap());
}
@Test(expectedExceptions = IOException.class)
public void testGetInstanceStoppableCheck_when_response_empty() throws IOException {
MockCustomRestClient customRestClient = new MockCustomRestClient(_httpClient);
HttpResponse httpResponse = mock(HttpResponse.class);
StatusLine statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_NOT_FOUND);
when(httpResponse.getStatusLine()).thenReturn(statusLine);
when(_httpClient.execute(any(HttpPost.class))).thenReturn(httpResponse);
customRestClient.setJsonResponse("");
customRestClient.getInstanceStoppableCheck(HTTP_LOCALHOST, Collections.emptyMap());
}
@Test
public void testGetPartitionStoppableCheck() throws IOException {
MockCustomRestClient customRestClient = new MockCustomRestClient(_httpClient);
String jsonResponse = "\n" + "{\n" + " \"db1\": {\n" + " \"IS_HEALTHY\": \"false\"\n"
+ " },\n" + " \"db0\": {\n" + " \"IS_HEALTHY\": \"true\"\n" + " }\n" + "}";
HttpResponse httpResponse = mock(HttpResponse.class);
StatusLine statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_OK);
when(httpResponse.getStatusLine()).thenReturn(statusLine);
customRestClient.setJsonResponse(jsonResponse);
when(_httpClient.execute(any(HttpPost.class))).thenReturn(httpResponse);
Map<String, Boolean> partitionHealth = customRestClient.getPartitionStoppableCheck(HTTP_LOCALHOST,
ImmutableList.of("db0", "db1"), Collections.emptyMap());
Assert.assertTrue(partitionHealth.get("db0"));
Assert.assertFalse(partitionHealth.get("db1"));
}
@Test(expectedExceptions = ClientProtocolException.class)
public void testGetPartitionStoppableCheck_when_url_404() throws IOException {
MockCustomRestClient customRestClient = new MockCustomRestClient(_httpClient);
HttpResponse httpResponse = mock(HttpResponse.class);
StatusLine statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_NOT_FOUND);
when(httpResponse.getStatusLine()).thenReturn(statusLine);
when(_httpClient.execute(any(HttpPost.class))).thenReturn(httpResponse);
customRestClient.getPartitionStoppableCheck(HTTP_LOCALHOST,
ImmutableList.of("db0", "db1"), Collections.emptyMap());
}
@Test(expectedExceptions = IOException.class)
public void testGetPartitionStoppableCheck_when_response_empty() throws IOException {
MockCustomRestClient customRestClient = new MockCustomRestClient(_httpClient);
HttpResponse httpResponse = mock(HttpResponse.class);
StatusLine statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_NOT_FOUND);
when(httpResponse.getStatusLine()).thenReturn(statusLine);
when(_httpClient.execute(any(HttpPost.class))).thenReturn(httpResponse);
customRestClient.setJsonResponse("");
customRestClient.getPartitionStoppableCheck(HTTP_LOCALHOST,
ImmutableList.of("db0", "db1"), Collections.emptyMap());
}
@Test (description = "Validate if the post request has the correct format")
public void testPostRequestFormat() throws IOException {
// a popular echo server that echos all the inputs
// TODO: add a mock rest server
final String echoServer = "http://httpbin.org/post";
CustomRestClientImpl customRestClient = new CustomRestClientImpl(HttpClients.createDefault());
HttpResponse response = customRestClient.post(echoServer, Collections.emptyMap());
if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) {
JsonNode json = customRestClient.getJsonObject(response);
Assert.assertEquals(json.get("headers").get("Accept").asText(), "application/json");
Assert.assertEquals(json.get("data").asText(), "{}");
}
}
@Test
public void testGetPartitionStoppableCheckWhenTimeout() throws IOException {
MockCustomRestClient customRestClient = new MockCustomRestClient(_httpClient);
HttpResponse httpResponse = mock(HttpResponse.class);
StatusLine statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(HttpStatus.SC_OK);
when(httpResponse.getStatusLine()).thenReturn(statusLine);
when(_httpClient.execute(any(HttpPost.class)))
.thenThrow(new ConnectTimeoutException("Timeout Exception Happened!"));
boolean timeoutExceptionHappened = false;
try {
customRestClient.getPartitionStoppableCheck(HTTP_LOCALHOST, ImmutableList.of("db0", "db1"),
Collections.emptyMap());
} catch (ConnectTimeoutException e) {
timeoutExceptionHappened = true;
}
Assert.assertTrue(timeoutExceptionHappened);
}
private class MockCustomRestClient extends CustomRestClientImpl {
private String _jsonResponse = "";
MockCustomRestClient(HttpClient mockHttpClient) {
super(mockHttpClient);
}
void setJsonResponse(String response) {
_jsonResponse = response;
}
@Override
protected JsonNode getJsonObject(HttpResponse httpResponse) throws IOException {
return new ObjectMapper().readTree(_jsonResponse);
}
}
}
| 9,327 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/clusterMaintenanceService/MaintenanceManagementService.java
|
package org.apache.helix.rest.clusterMaintenanceService;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.SharedMetricRegistries;
import com.codahale.metrics.Timer;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.RESTConfig;
import org.apache.helix.rest.client.CustomRestClient;
import org.apache.helix.rest.client.CustomRestClientFactory;
import org.apache.helix.rest.clusterMaintenanceService.api.OperationInterface;
import org.apache.helix.rest.common.HelixDataAccessorWrapper;
import org.apache.helix.rest.common.datamodel.RestSnapShot;
import org.apache.helix.rest.server.json.instance.InstanceInfo;
import org.apache.helix.rest.server.json.instance.StoppableCheck;
import org.apache.helix.rest.server.resources.helix.PerInstanceAccessor;
import org.apache.helix.rest.server.service.InstanceService;
import org.apache.helix.util.HelixUtil;
import org.apache.helix.util.InstanceValidationUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MaintenanceManagementService {
private static final Logger LOG = LoggerFactory.getLogger(MaintenanceManagementService.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final ExecutorService POOL = Executors.newCachedThreadPool();
// Metric names for custom instance check
private static final String CUSTOM_INSTANCE_CHECK_HTTP_REQUESTS_ERROR_TOTAL =
MetricRegistry.name(InstanceService.class, "custom_instance_check_http_requests_error_total");
private static final String CUSTOM_INSTANCE_CHECK_HTTP_REQUESTS_DURATION =
MetricRegistry.name(InstanceService.class, "custom_instance_check_http_requests_duration");
public static final String ALL_HEALTH_CHECK_NONBLOCK = "allHealthCheckNonBlock";
public static final String HELIX_INSTANCE_STOPPABLE_CHECK = "HelixInstanceStoppableCheck";
public static final String HELIX_CUSTOM_STOPPABLE_CHECK = "CustomInstanceStoppableCheck";
public static final String OPERATION_CONFIG_SHARED_INPUT = "OperationConfigSharedInput";
public static final Set<StoppableCheck.Category> SKIPPABLE_HEALTH_CHECK_CATEGORIES =
ImmutableSet.of(StoppableCheck.Category.CUSTOM_INSTANCE_CHECK,
StoppableCheck.Category.CUSTOM_PARTITION_CHECK);
private final ConfigAccessor _configAccessor;
private final CustomRestClient _customRestClient;
private final String _namespace;
private final boolean _skipZKRead;
private final HelixDataAccessorWrapper _dataAccessor;
private final Set<String> _nonBlockingHealthChecks;
private final Set<StoppableCheck.Category> _skipHealthCheckCategories;
public MaintenanceManagementService(ZKHelixDataAccessor dataAccessor,
ConfigAccessor configAccessor, boolean skipZKRead, String namespace) {
this(new HelixDataAccessorWrapper(dataAccessor, CustomRestClientFactory.get(), namespace),
configAccessor, CustomRestClientFactory.get(), skipZKRead, Collections.emptySet(),
Collections.emptySet(), namespace);
}
public MaintenanceManagementService(ZKHelixDataAccessor dataAccessor,
ConfigAccessor configAccessor, boolean skipZKRead, Set<String> nonBlockingHealthChecks,
String namespace) {
this(new HelixDataAccessorWrapper(dataAccessor, CustomRestClientFactory.get(), namespace),
configAccessor, CustomRestClientFactory.get(), skipZKRead, nonBlockingHealthChecks,
Collections.emptySet(), namespace);
}
public MaintenanceManagementService(ZKHelixDataAccessor dataAccessor,
ConfigAccessor configAccessor, boolean skipZKRead, boolean continueOnFailure,
String namespace) {
this(new HelixDataAccessorWrapper(dataAccessor, CustomRestClientFactory.get(), namespace),
configAccessor, CustomRestClientFactory.get(), skipZKRead,
continueOnFailure ? Collections.singleton(ALL_HEALTH_CHECK_NONBLOCK)
: Collections.emptySet(), Collections.emptySet(), namespace);
}
public MaintenanceManagementService(ZKHelixDataAccessor dataAccessor,
ConfigAccessor configAccessor, boolean skipZKRead, boolean continueOnFailure,
Set<StoppableCheck.Category> skipHealthCheckCategories, String namespace) {
this(new HelixDataAccessorWrapper(dataAccessor, CustomRestClientFactory.get(), namespace),
configAccessor, CustomRestClientFactory.get(), skipZKRead,
continueOnFailure ? Collections.singleton(ALL_HEALTH_CHECK_NONBLOCK)
: Collections.emptySet(),
skipHealthCheckCategories != null ? skipHealthCheckCategories : Collections.emptySet(),
namespace);
}
@VisibleForTesting
MaintenanceManagementService(HelixDataAccessorWrapper dataAccessorWrapper,
ConfigAccessor configAccessor, CustomRestClient customRestClient, boolean skipZKRead,
Set<String> nonBlockingHealthChecks, Set<StoppableCheck.Category> skipHealthCheckCategories,
String namespace) {
_dataAccessor = dataAccessorWrapper;
_configAccessor = configAccessor;
_customRestClient = customRestClient;
_skipZKRead = skipZKRead;
_nonBlockingHealthChecks = nonBlockingHealthChecks;
_skipHealthCheckCategories =
skipHealthCheckCategories != null ? skipHealthCheckCategories : Collections.emptySet();
_namespace = namespace;
}
/**
* Perform health check and maintenance operation check and execution for a instance in
* one cluster.
* User need to implement OperationAbstractClass for customer operation check & execution.
* It will invoke OperationAbstractClass.operationCheckForTakeSingleInstance and
* OperationAbstractClass.operationExecForTakeSingleInstance.
* The list of check and operation will be executed in the user provided sequence.
*
* @param clusterId The cluster id
* @param instanceName The instance name
* @param healthChecks A list of healthChecks to perform
* @param healthCheckConfig The input for health Checks
* @param operations A list of operation checks or operations to execute
* @param operationConfig A map of config. Key is the operation name value if a Json
* representation of a map
* @param performOperation If this param is set to false, the function will only do a dry run
* @return MaintenanceManagementInstanceInfo
* @throws IOException in case of network failure
*/
public MaintenanceManagementInstanceInfo takeInstance(String clusterId, String instanceName,
List<String> healthChecks, Map<String, String> healthCheckConfig, List<String> operations,
Map<String, String> operationConfig, boolean performOperation) throws IOException {
if ((healthChecks == null || healthChecks.isEmpty()) && (operations == null || operations
.isEmpty())) {
MaintenanceManagementInstanceInfo result = new MaintenanceManagementInstanceInfo(
MaintenanceManagementInstanceInfo.OperationalStatus.FAILURE);
result.addMessage("Invalid input. Please provide at least one health check or operation.");
return result;
}
return takeFreeSingleInstanceHelper(clusterId, instanceName, healthChecks, healthCheckConfig,
operations, operationConfig, performOperation, true);
}
/**
* Perform health check and maintenance operation check and execution for a list of instances in
* one cluster.
* User need to implement OperationAbstractClass for customer operation check & execution.
* It will invoke OperationAbstractClass.operationCheckForTakeInstances and
* OperationAbstractClass.operationExecForTakeInstances.
* The list of check and operation will be executed in the user provided sequence.
*
* @param clusterId The cluster id
* @param instances A list of instances
* @param healthChecks A list of healthChecks to perform
* @param healthCheckConfig The input for health Checks
* @param operations A list of operation checks or operations to execute
* @param operationConfig A map of config. Key is the operation name value if a Json
* representation of a map.
* @param performOperation If this param is set to false, the function will only do a dry run
* @return A list of MaintenanceManagementInstanceInfo
* @throws IOException in case of network failure
*/
public Map<String, MaintenanceManagementInstanceInfo> takeInstances(String clusterId,
List<String> instances, List<String> healthChecks, Map<String, String> healthCheckConfig,
List<String> operations, Map<String, String> operationConfig, boolean performOperation)
throws IOException {
return null;
}
/**
* Perform health check and maintenance operation check and execution for a instance in
* one cluster.
* User need to implement OperationAbstractClass for customer operation check & execution.
* It will invoke OperationAbstractClass.operationCheckForFreeSingleInstance and
* OperationAbstractClass.operationExecForFreeSingleInstance.
* The list of check and operation will be executed in the user provided sequence.
*
* @param clusterId The cluster id
* @param instanceName The instance name
* @param healthChecks A list of healthChecks to perform
* @param healthCheckConfig The input for health Checks
* @param operations A list of operation checks or operations to execute
* @param operationConfig A map of config. Key is the operation name value if a Json
* representation of a map
* @param performOperation If this param is set to false, the function will only do a dry run
* @return MaintenanceManagementInstanceInfo
* @throws IOException in case of network failure
*/
public MaintenanceManagementInstanceInfo freeInstance(String clusterId, String instanceName,
List<String> healthChecks, Map<String, String> healthCheckConfig, List<String> operations,
Map<String, String> operationConfig, boolean performOperation) throws IOException {
return takeFreeSingleInstanceHelper(clusterId, instanceName, healthChecks, healthCheckConfig,
operations, operationConfig, performOperation, false);
}
/**
* Perform health check and maintenance operation check and execution for a list of instances in
* one cluster.
* User need to implement OperationAbstractClass for customer operation check & execution.
* It will invoke OperationAbstractClass.operationCheckForFreeInstances and
* OperationAbstractClass.operationExecForFreeInstances.
* The list of check and operation will be executed in the user provided sequence.
*
* @param clusterId The cluster id
* @param instances A list of instances
* @param healthChecks A list of healthChecks to perform
* @param healthCheckConfig The input for health Checks
* @param operations A list of operation checks or operations to execute
* @param operationConfig A map of config. Key is the operation name value if a Json
* representation of a map
* @param performOperation If this param is set to false, the function will only do a dry run
* @return A list of MaintenanceManagementInstanceInfo
* @throws IOException in case of network failure
*/
public Map<String, MaintenanceManagementInstanceInfo> freeInstances(String clusterId,
List<String> instances, List<String> healthChecks, Map<String, String> healthCheckConfig,
List<String> operations, Map<String, String> operationConfig, boolean performOperation)
throws IOException {
return null;
}
public InstanceInfo getInstanceHealthInfo(String clusterId, String instanceName,
List<HealthCheck> healthChecks) {
InstanceInfo.Builder instanceInfoBuilder = new InstanceInfo.Builder(instanceName);
InstanceConfig instanceConfig =
_dataAccessor.getProperty(_dataAccessor.keyBuilder().instanceConfig(instanceName));
LiveInstance liveInstance =
_dataAccessor.getProperty(_dataAccessor.keyBuilder().liveInstance(instanceName));
if (instanceConfig != null) {
instanceInfoBuilder.instanceConfig(instanceConfig.getRecord());
} else {
LOG.warn("Missing instance config for {}", instanceName);
}
if (liveInstance != null) {
instanceInfoBuilder.liveInstance(liveInstance.getRecord());
String sessionId = liveInstance.getEphemeralOwner();
List<String> resourceNames = _dataAccessor
.getChildNames(_dataAccessor.keyBuilder().currentStates(instanceName, sessionId));
instanceInfoBuilder.resources(resourceNames);
List<String> partitions = new ArrayList<>();
for (String resourceName : resourceNames) {
CurrentState currentState = _dataAccessor.getProperty(
_dataAccessor.keyBuilder().currentState(instanceName, sessionId, resourceName));
if (currentState != null && currentState.getPartitionStateMap() != null) {
partitions.addAll(currentState.getPartitionStateMap().keySet());
} else {
LOG.warn(
"Current state is either null or partitionStateMap is missing. InstanceName: {}, SessionId: {}, ResourceName: {}",
instanceName, sessionId, resourceName);
}
}
instanceInfoBuilder.partitions(partitions);
} else {
LOG.warn("Missing live instance for {}", instanceName);
}
try {
Map<String, Boolean> healthStatus =
getInstanceHealthStatus(clusterId, instanceName, healthChecks);
instanceInfoBuilder.healthStatus(healthStatus);
} catch (HelixException ex) {
LOG.error(
"Exception while getting health status. Cluster: {}, Instance: {}, reporting health status as unHealth",
clusterId, instanceName, ex);
instanceInfoBuilder.healthStatus(false);
}
return instanceInfoBuilder.build();
}
private List<OperationInterface> getAllOperationClasses(List<String> operations) {
List<OperationInterface> operationAbstractClassList = new ArrayList<>();
for (String operationClassName : operations) {
try {
LOG.info("Loading class: " + operationClassName);
OperationInterface userOperation =
(OperationInterface) HelixUtil.loadClass(getClass(), operationClassName)
.newInstance();
operationAbstractClassList.add(userOperation);
} catch (Exception e) {
LOG.error("No operation class found for: {}. message: ", operationClassName, e);
throw new HelixException(
String.format("No operation class found for: %s. message: %s", operationClassName, e));
}
}
return operationAbstractClassList;
}
/**
* {@inheritDoc}
* Single instance stoppable check implementation is a special case of
* {@link #batchGetInstancesStoppableChecks(String, List, String)}
* <p>
* Step 1: Perform instance level Helix own health checks
* Step 2: Perform instance level client side health checks
* Step 3: Perform partition level (all partitions on the instance) client side health checks
* <p>
* Note: if the check fails at one step, the rest steps won't be executed because the instance
* cannot be stopped
*/
public StoppableCheck getInstanceStoppableCheck(String clusterId, String instanceName,
String jsonContent) throws IOException {
return batchGetInstancesStoppableChecks(clusterId, ImmutableList.of(instanceName), jsonContent)
.get(instanceName);
}
public Map<String, StoppableCheck> batchGetInstancesStoppableChecks(String clusterId,
List<String> instances, String jsonContent) throws IOException {
Map<String, StoppableCheck> finalStoppableChecks = new HashMap<>();
// helix instance check.
List<String> instancesForCustomInstanceLevelChecks =
batchHelixInstanceStoppableCheck(clusterId, instances, finalStoppableChecks);
// custom check, includes partition check.
batchCustomInstanceStoppableCheck(clusterId, instancesForCustomInstanceLevelChecks,
finalStoppableChecks, getMapFromJsonPayload(jsonContent));
return finalStoppableChecks;
}
private MaintenanceManagementInstanceInfo takeFreeSingleInstanceHelper(String clusterId,
String instanceName, List<String> healthChecks, Map<String, String> healthCheckConfig,
List<String> operations, Map<String, String> operationConfig, boolean performOperation,
boolean isTakeInstance) {
if (operations == null) {
operations = new ArrayList<>();
}
if (healthChecks == null) {
healthChecks = new ArrayList<>();
}
try {
MaintenanceManagementInstanceInfo instanceInfo;
instanceInfo =
batchInstanceHealthCheck(clusterId, ImmutableList.of(instanceName), healthChecks,
healthCheckConfig).getOrDefault(instanceName, new MaintenanceManagementInstanceInfo(
MaintenanceManagementInstanceInfo.OperationalStatus.SUCCESS));
if (!instanceInfo.isSuccessful()) {
return instanceInfo;
}
List<OperationInterface> operationAbstractClassList = getAllOperationClasses(operations);
_dataAccessor.populateCache(OperationInterface.PROPERTY_TYPE_LIST);
RestSnapShot sp = _dataAccessor.getRestSnapShot();
String continueOnFailuresName =
PerInstanceAccessor.PerInstanceProperties.continueOnFailures.name();
Map<String, Map<String, String>> operationConfigSet = new HashMap<>();
Map<String, String> commonOperationConfig =
(operationConfig == null || !operationConfig.containsKey(OPERATION_CONFIG_SHARED_INPUT))
? Collections.emptyMap()
: getMapFromJsonPayload(operationConfig.get(OPERATION_CONFIG_SHARED_INPUT));
// perform operation check
for (OperationInterface operationClass : operationAbstractClassList) {
String operationClassName = operationClass.getClass().getName();
Map<String, String> singleOperationConfig =
(operationConfig == null || !operationConfig.containsKey(operationClassName))
? Collections.emptyMap()
: getMapFromJsonPayload(operationConfig.get(operationClassName));
commonOperationConfig
.forEach(singleOperationConfig::putIfAbsent);
operationConfigSet.put(operationClassName, singleOperationConfig);
boolean continueOnFailures =
singleOperationConfig.containsKey(continueOnFailuresName) && getBooleanFromJsonPayload(
singleOperationConfig.get(continueOnFailuresName));
MaintenanceManagementInstanceInfo checkResult = isTakeInstance ? operationClass
.operationCheckForTakeSingleInstance(instanceName, singleOperationConfig, sp)
: operationClass
.operationCheckForFreeSingleInstance(instanceName, singleOperationConfig, sp);
instanceInfo.mergeResult(checkResult, continueOnFailures);
}
// operation execution
if (performOperation && instanceInfo.isSuccessful()) {
for (OperationInterface operationClass : operationAbstractClassList) {
Map<String, String> singleOperationConfig =
operationConfigSet.get(operationClass.getClass().getName());
boolean continueOnFailures =
singleOperationConfig.containsKey(continueOnFailuresName) && Boolean
.parseBoolean(singleOperationConfig.get(continueOnFailuresName));
MaintenanceManagementInstanceInfo newResult = isTakeInstance ? operationClass
.operationExecForTakeSingleInstance(instanceName, singleOperationConfig, sp)
: operationClass
.operationExecForFreeSingleInstance(instanceName, singleOperationConfig, sp);
instanceInfo.mergeResult(newResult, continueOnFailures);
if (!instanceInfo.isSuccessful()) {
LOG.warn("Operation failed for {}, skip all following operations.",
operationClass.getClass().getName());
break;
}
}
}
return instanceInfo;
} catch (Exception ex) {
return new MaintenanceManagementInstanceInfo(
MaintenanceManagementInstanceInfo.OperationalStatus.FAILURE,
Collections.singletonList(ex.getMessage()));
}
}
private List<String> batchHelixInstanceStoppableCheck(String clusterId,
Collection<String> instances, Map<String, StoppableCheck> finalStoppableChecks) {
Map<String, Future<StoppableCheck>> helixInstanceChecks = instances.stream().collect(Collectors
.toMap(Function.identity(),
instance -> POOL.submit(() -> performHelixOwnInstanceCheck(clusterId, instance))));
// finalStoppableChecks contains instances that does not pass this health check
return filterInstancesForNextCheck(helixInstanceChecks, finalStoppableChecks);
}
private List<String> batchCustomInstanceStoppableCheck(String clusterId, List<String> instances,
Map<String, StoppableCheck> finalStoppableChecks, Map<String, String> customPayLoads) {
if (instances.isEmpty()) {
// if all instances failed at previous checks, then all following checks are not required.
return instances;
}
RESTConfig restConfig = _configAccessor.getRESTConfig(clusterId);
if (restConfig == null) {
String errorMessage = String.format(
"The cluster %s hasn't enabled client side health checks yet, "
+ "thus the stoppable check result is inaccurate", clusterId);
LOG.error(errorMessage);
throw new HelixException(errorMessage);
}
List<String> instancesForCustomPartitionLevelChecks;
if (!_skipHealthCheckCategories.contains(StoppableCheck.Category.CUSTOM_INSTANCE_CHECK)) {
Map<String, Future<StoppableCheck>> customInstanceLevelChecks = instances.stream().collect(
Collectors.toMap(Function.identity(), instance -> POOL.submit(
() -> performCustomInstanceCheck(clusterId, instance, restConfig.getBaseUrl(instance),
customPayLoads))));
instancesForCustomPartitionLevelChecks =
filterInstancesForNextCheck(customInstanceLevelChecks, finalStoppableChecks);
} else {
instancesForCustomPartitionLevelChecks = instances;
}
if (!instancesForCustomPartitionLevelChecks.isEmpty() && !_skipHealthCheckCategories.contains(
StoppableCheck.Category.CUSTOM_PARTITION_CHECK)) {
// add to finalStoppableChecks regardless of stoppable or not.
Map<String, StoppableCheck> instancePartitionLevelChecks =
performPartitionsCheck(instancesForCustomPartitionLevelChecks, restConfig,
customPayLoads);
List<String> instancesForFollowingChecks = new ArrayList<>();
for (Map.Entry<String, StoppableCheck> instancePartitionStoppableCheckEntry : instancePartitionLevelChecks.entrySet()) {
String instance = instancePartitionStoppableCheckEntry.getKey();
StoppableCheck stoppableCheck = instancePartitionStoppableCheckEntry.getValue();
addStoppableCheck(finalStoppableChecks, instance, stoppableCheck);
if (stoppableCheck.isStoppable() || isNonBlockingCheck(stoppableCheck)) {
// instance passed this around of check or mandatory all checks
// will be checked in the next round
instancesForFollowingChecks.add(instance);
}
}
return instancesForFollowingChecks;
}
// This means that we skipped
return instancesForCustomPartitionLevelChecks;
}
private Map<String, MaintenanceManagementInstanceInfo> batchInstanceHealthCheck(String clusterId,
List<String> instances, List<String> healthChecks, Map<String, String> healthCheckConfig) {
List<String> instancesForNext = new ArrayList<>(instances);
Map<String, MaintenanceManagementInstanceInfo> instanceInfos = new HashMap<>();
Map<String, StoppableCheck> finalStoppableChecks = new HashMap<>();
// TODO: Right now user can only choose from HelixInstanceStoppableCheck and
// CustomInstanceStoppableCheck. We should add finer grain check groups to choose from
// i.e. HELIX:INSTANCE_NOT_ENABLED, CUSTOM_PARTITION_HEALTH_FAILURE:PARTITION_INITIAL_STATE_FAIL etc.
for (String healthCheck : healthChecks) {
if (healthCheck.equals(HELIX_INSTANCE_STOPPABLE_CHECK)) {
// this is helix own check
instancesForNext =
batchHelixInstanceStoppableCheck(clusterId, instancesForNext, finalStoppableChecks);
} else if (healthCheck.equals(HELIX_CUSTOM_STOPPABLE_CHECK)) {
// custom check, includes custom Instance check and partition check.
instancesForNext =
batchCustomInstanceStoppableCheck(clusterId, instancesForNext, finalStoppableChecks,
healthCheckConfig);
} else {
throw new UnsupportedOperationException(healthCheck + " is not supported yet!");
}
}
// assemble result. Returned map contains all instances with pass or fail status.
Set<String> clearedInstance = new HashSet<>(instancesForNext);
for (String instance : instances) {
MaintenanceManagementInstanceInfo result = new MaintenanceManagementInstanceInfo(
clearedInstance.contains(instance)
? MaintenanceManagementInstanceInfo.OperationalStatus.SUCCESS
: MaintenanceManagementInstanceInfo.OperationalStatus.FAILURE);
if (finalStoppableChecks.containsKey(instance) && !finalStoppableChecks.get(instance)
.isStoppable()) {
// If an non blocking check failed, the we will have a stoppbale check object with
// stoppbale = false and the instance is in clearedInstance. We will sign Success state and
// a error message.
result.addMessages(finalStoppableChecks.get(instance).getFailedChecks());
}
instanceInfos.put(instance, result);
}
return instanceInfos;
}
private void addStoppableCheck(Map<String, StoppableCheck> stoppableChecks, String instance,
StoppableCheck stoppableCheck) {
if (!stoppableChecks.containsKey(instance)) {
stoppableChecks.put(instance, stoppableCheck);
} else {
// Merge two checks
stoppableChecks.get(instance).add(stoppableCheck);
}
}
private List<String> filterInstancesForNextCheck(
Map<String, Future<StoppableCheck>> futureStoppableCheckByInstance,
Map<String, StoppableCheck> finalStoppableCheckByInstance) {
List<String> instancesForNextCheck = new ArrayList<>();
for (Map.Entry<String, Future<StoppableCheck>> entry : futureStoppableCheckByInstance
.entrySet()) {
String instance = entry.getKey();
try {
StoppableCheck stoppableCheck = entry.getValue().get();
addStoppableCheck(finalStoppableCheckByInstance, instance, stoppableCheck);
if (stoppableCheck.isStoppable() || isNonBlockingCheck(stoppableCheck)) {
// instance passed this around of check or mandatory all checks
// will be checked in the next round
instancesForNextCheck.add(instance);
}
} catch (Exception e) {
String errorMessage =
String.format("Failed to get StoppableChecks in parallel. Instance: %s", instance);
LOG.error(errorMessage, e);
throw new HelixException(errorMessage);
}
}
return instancesForNextCheck;
}
private boolean isNonBlockingCheck(StoppableCheck stoppableCheck) {
if (_nonBlockingHealthChecks.isEmpty()) {
return false;
}
if (_nonBlockingHealthChecks.contains(ALL_HEALTH_CHECK_NONBLOCK)) {
return true;
}
for (String failedCheck : stoppableCheck.getFailedChecks()) {
if (failedCheck.startsWith("CUSTOM_")) {
// failed custom check will have the pattern
// "CUSTOM_PARTITION_HEALTH_FAILURE:PARTITION_INITIAL_STATE_FAIL:partition_name"
// we want to keep the first 2 parts as failed test name.
String[] checks = failedCheck.split(":", 3);
failedCheck = checks[0] + ":" + checks[1];
}
// Helix own health check name will be in this pattern "HELIX:INSTANCE_NOT_ALIVE",
// no need to preprocess.
if (!_nonBlockingHealthChecks.contains(failedCheck)) {
return false;
}
}
return true;
}
private StoppableCheck performHelixOwnInstanceCheck(String clusterId, String instanceName) {
LOG.info("Perform helix own custom health checks for {}/{}", clusterId, instanceName);
Map<String, Boolean> helixStoppableCheck =
getInstanceHealthStatus(clusterId, instanceName, HealthCheck.STOPPABLE_CHECK_LIST);
return new StoppableCheck(helixStoppableCheck, StoppableCheck.Category.HELIX_OWN_CHECK);
}
private StoppableCheck performCustomInstanceCheck(String clusterId, String instanceName,
String baseUrl, Map<String, String> customPayLoads) {
LOG.info("Perform instance level client side health checks for {}/{}", clusterId, instanceName);
MetricRegistry metrics = SharedMetricRegistries.getOrCreate(_namespace);
// Total requests metric is included as an attribute(Count) in timers
try (final Timer.Context timer = metrics.timer(CUSTOM_INSTANCE_CHECK_HTTP_REQUESTS_DURATION)
.time()) {
Map<String, Boolean> instanceStoppableCheck =
_customRestClient.getInstanceStoppableCheck(baseUrl, customPayLoads);
return new StoppableCheck(instanceStoppableCheck,
StoppableCheck.Category.CUSTOM_INSTANCE_CHECK);
} catch (IOException ex) {
LOG.error("Custom client side instance level health check for {}/{} failed.", clusterId,
instanceName, ex);
metrics.counter(CUSTOM_INSTANCE_CHECK_HTTP_REQUESTS_ERROR_TOTAL).inc();
return new StoppableCheck(false, Collections.singletonList(instanceName),
StoppableCheck.Category.CUSTOM_INSTANCE_CHECK);
}
}
private Map<String, StoppableCheck> performPartitionsCheck(List<String> instances,
RESTConfig restConfig, Map<String, String> customPayLoads) {
Map<String, Map<String, Boolean>> allPartitionsHealthOnLiveInstance =
_dataAccessor.getAllPartitionsHealthOnLiveInstance(restConfig, customPayLoads, _skipZKRead);
List<ExternalView> externalViews =
_dataAccessor.getChildValues(_dataAccessor.keyBuilder().externalViews(), true);
Map<String, StoppableCheck> instanceStoppableChecks = new HashMap<>();
for (String instanceName : instances) {
Map<String, List<String>> unHealthyPartitions = InstanceValidationUtil
.perPartitionHealthCheck(externalViews, allPartitionsHealthOnLiveInstance, instanceName,
_dataAccessor);
List<String> unHealthyPartitionsList = new ArrayList<>();
for (String partitionName : unHealthyPartitions.keySet()) {
for (String reason : unHealthyPartitions.get(partitionName)) {
unHealthyPartitionsList.add(reason.toUpperCase() + ":" + partitionName);
}
}
StoppableCheck stoppableCheck = new StoppableCheck(unHealthyPartitionsList.isEmpty(),
unHealthyPartitionsList, StoppableCheck.Category.CUSTOM_PARTITION_CHECK);
instanceStoppableChecks.put(instanceName, stoppableCheck);
}
return instanceStoppableChecks;
}
public static Map<String, String> getMapFromJsonPayload(String jsonContent) throws IOException {
Map<String, String> result = new HashMap<>();
if (jsonContent == null) {
return result;
}
JsonNode jsonNode = OBJECT_MAPPER.readTree(jsonContent);
// parsing the inputs as string key value pairs
jsonNode.fields().forEachRemaining(kv -> result.put(kv.getKey(),
kv.getValue().isValueNode() ? kv.getValue().asText() : kv.getValue().toString()));
return result;
}
public static Map<String, String> getMapFromJsonPayload(JsonNode jsonNode)
throws IllegalArgumentException {
Map<String, String> result = new HashMap<>();
if (jsonNode != null) {
jsonNode.fields().forEachRemaining(kv -> result.put(kv.getKey(),
kv.getValue().isValueNode() ? kv.getValue().asText() : kv.getValue().toString()));
}
return result;
}
public static List<String> getListFromJsonPayload(JsonNode jsonContent)
throws IllegalArgumentException {
return (jsonContent == null) ? Collections.emptyList()
: OBJECT_MAPPER.convertValue(jsonContent, List.class);
}
public static List<String> getListFromJsonPayload(String jsonString)
throws IllegalArgumentException, JsonProcessingException {
return (jsonString == null) ? Collections.emptyList()
: OBJECT_MAPPER.readValue(jsonString, List.class);
}
public static boolean getBooleanFromJsonPayload(String jsonString)
throws IllegalArgumentException, JsonProcessingException {
return OBJECT_MAPPER.readTree(jsonString).asBoolean();
}
@VisibleForTesting
protected Map<String, Boolean> getInstanceHealthStatus(String clusterId, String instanceName,
List<HealthCheck> healthChecks) {
Map<String, Boolean> healthStatus = new HashMap<>();
for (HealthCheck healthCheck : healthChecks) {
switch (healthCheck) {
case INVALID_CONFIG:
boolean validConfig;
try {
validConfig =
InstanceValidationUtil.hasValidConfig(_dataAccessor, clusterId, instanceName);
} catch (HelixException e) {
validConfig = false;
LOG.warn("Cluster {} instance {} doesn't have valid config: {}", clusterId, instanceName,
e.getMessage());
}
// TODO: should add reason to request response
healthStatus.put(HealthCheck.INVALID_CONFIG.name(), validConfig);
if (!validConfig) {
// No need to do remaining health checks.
return healthStatus;
}
break;
case INSTANCE_NOT_ENABLED:
healthStatus.put(HealthCheck.INSTANCE_NOT_ENABLED.name(),
InstanceValidationUtil.isEnabled(_dataAccessor, instanceName));
break;
case INSTANCE_NOT_ALIVE:
healthStatus.put(HealthCheck.INSTANCE_NOT_ALIVE.name(),
InstanceValidationUtil.isAlive(_dataAccessor, instanceName));
break;
case INSTANCE_NOT_STABLE:
boolean isStable = InstanceValidationUtil.isInstanceStable(_dataAccessor, instanceName);
healthStatus.put(HealthCheck.INSTANCE_NOT_STABLE.name(), isStable);
break;
case HAS_ERROR_PARTITION:
healthStatus.put(HealthCheck.HAS_ERROR_PARTITION.name(),
!InstanceValidationUtil.hasErrorPartitions(_dataAccessor, clusterId, instanceName));
break;
case HAS_DISABLED_PARTITION:
healthStatus.put(HealthCheck.HAS_DISABLED_PARTITION.name(),
!InstanceValidationUtil.hasDisabledPartitions(_dataAccessor, clusterId, instanceName));
break;
case EMPTY_RESOURCE_ASSIGNMENT:
healthStatus.put(HealthCheck.EMPTY_RESOURCE_ASSIGNMENT.name(),
InstanceValidationUtil.isResourceAssigned(_dataAccessor, instanceName));
break;
case MIN_ACTIVE_REPLICA_CHECK_FAILED:
healthStatus.put(HealthCheck.MIN_ACTIVE_REPLICA_CHECK_FAILED.name(),
InstanceValidationUtil.siblingNodesActiveReplicaCheck(_dataAccessor, instanceName));
break;
default:
LOG.error("Unsupported health check: {}", healthCheck);
break;
}
}
return healthStatus;
}
}
| 9,328 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/clusterMaintenanceService/MaintenanceManagementInstanceInfo.java
|
package org.apache.helix.rest.clusterMaintenanceService;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
public class MaintenanceManagementInstanceInfo {
public enum OperationalStatus {
SUCCESS,
FAILURE
}
private String _operationResult;
private OperationalStatus _status;
private List<String> _messages;
public MaintenanceManagementInstanceInfo(OperationalStatus status) {
this._status = status;
this._messages = new ArrayList<>();
this._operationResult = "";
}
public MaintenanceManagementInstanceInfo(OperationalStatus status, List<String> messages) {
this._status = status;
this._messages = messages;
this._operationResult = "";
}
public MaintenanceManagementInstanceInfo(OperationalStatus status, String newOperationResult) {
this._status = status;
this._operationResult = newOperationResult;
this._messages = new ArrayList<>();
}
public List<String> getMessages() {
return _messages;
}
public String getOperationResult() {
return _operationResult;
}
public boolean hasOperationResult() {
return !_operationResult.isEmpty();
}
public void setOperationResult(String result) {
_operationResult = result;
}
public void addMessages(List<String> msg) {
_messages.addAll(msg);
}
public void addMessage(String meg) {
_messages.add(meg);
}
public boolean isSuccessful() {
return _status.equals(OperationalStatus.SUCCESS);
}
public void mergeResult(MaintenanceManagementInstanceInfo info) {
mergeResult(info, false);
}
public void mergeResult(MaintenanceManagementInstanceInfo info, boolean nonBlockingFailure) {
if (info == null) {
return;
}
_messages.addAll(info.getMessages());
_status =
((info.isSuccessful() || nonBlockingFailure) && isSuccessful()) ? OperationalStatus.SUCCESS
: OperationalStatus.FAILURE;
if (info.hasOperationResult()) {
_operationResult =
this.hasOperationResult() ? _operationResult + "," + info.getOperationResult()
: info.getOperationResult();
}
}
}
| 9,329 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/clusterMaintenanceService/HealthCheck.java
|
package org.apache.helix.rest.clusterMaintenanceService;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.List;
import com.google.common.collect.ImmutableList;
public enum HealthCheck {
/**
* Check if instance is alive
*/
INSTANCE_NOT_ALIVE,
/**
* Check if instance is enabled both in instance config and cluster config
*/
INSTANCE_NOT_ENABLED,
/**
* Check if instance is stable
* Stable means all the ideal state mapping matches external view (view of current state).
*/
INSTANCE_NOT_STABLE,
/**
* Check if instance has 0 resource assigned
*/
EMPTY_RESOURCE_ASSIGNMENT,
/**
* Check if instance has disabled partitions
*/
HAS_DISABLED_PARTITION,
/**
* Check if instance has valid configuration (pre-requisite for all checks)
*/
INVALID_CONFIG,
/**
* Check if instance has error partitions
*/
HAS_ERROR_PARTITION,
/**
* Check if all resources hosted on the instance can still meet the min active replica
* constraint if this instance is shutdown
*/
MIN_ACTIVE_REPLICA_CHECK_FAILED;
/**
* Pre-defined list of checks to test if an instance can be stopped at runtime
*/
public static List<HealthCheck> STOPPABLE_CHECK_LIST = Arrays.asList(HealthCheck.values());
/**
* Pre-defined list of checks to test if an instance is in healthy running state
*/
public static List<HealthCheck> STARTED_AND_HEALTH_CHECK_LIST = ImmutableList
.of(INVALID_CONFIG, INSTANCE_NOT_ALIVE, INSTANCE_NOT_ENABLED, INSTANCE_NOT_STABLE,
EMPTY_RESOURCE_ASSIGNMENT);
}
| 9,330 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/clusterMaintenanceService
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/clusterMaintenanceService/api/OperationInterface.java
|
package org.apache.helix.rest.clusterMaintenanceService.api;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.helix.PropertyType;
import org.apache.helix.rest.clusterMaintenanceService.MaintenanceManagementInstanceInfo;
import org.apache.helix.rest.common.datamodel.RestSnapShot;
public interface OperationInterface {
List<PropertyType> PROPERTY_TYPE_LIST = new ArrayList<>(Arrays
.asList(PropertyType.IDEALSTATES, PropertyType.EXTERNALVIEW, PropertyType.STATEMODELDEFS));
// operation check
MaintenanceManagementInstanceInfo operationCheckForTakeSingleInstance(String instanceName,
Map<String, String> operationConfig, RestSnapShot sn);
MaintenanceManagementInstanceInfo operationCheckForFreeSingleInstance(String instanceName,
Map<String, String> operationConfig, RestSnapShot sn);
Map<String, MaintenanceManagementInstanceInfo> operationCheckForTakeInstances(
Collection<String> instances, Map<String, String> operationConfig, RestSnapShot sn);
Map<String, MaintenanceManagementInstanceInfo> operationCheckForFreeInstances(
Collection<String> instances, Map<String, String> operationConfig, RestSnapShot sn);
// operation execute
MaintenanceManagementInstanceInfo operationExecForTakeSingleInstance(String instanceName,
Map<String, String> operationConfig, RestSnapShot sn);
MaintenanceManagementInstanceInfo operationExecForFreeSingleInstance(String instanceName,
Map<String, String> operationConfig, RestSnapShot sn);
Map<String, MaintenanceManagementInstanceInfo> operationExecForTakeInstances(
Collection<String> instances, Map<String, String> operationConfig, RestSnapShot sn);
Map<String, MaintenanceManagementInstanceInfo> operationExecForFreeInstances(
Collection<String> instances, Map<String, String> operationConfig, RestSnapShot sn);
}
| 9,331 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/MetadataStoreDirectory.java
|
package org.apache.helix.rest.metadatastore;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
/**
* MetadataStoreDirectory interface that provides methods that are used to route requests to appropriate metadata store realm.
*
* namespace: tied to a namespace used in Helix REST (Metadata Store Directory Service endpoints will be served by Helix REST deployables)
* realm: a metadata store deployable/ensemble. for example, if an application wishes to use 3 ZK quorums, then each ZK quorum would be considered a realm (ZK realm)
* metadata store path sharding key: assuming the metadata store uses a file system APIs, this sharding key denotes the key that maps to a particular metadata store realm. an example of a key is a cluster name mapping to a particular ZK realm (ZK address)
*/
public interface MetadataStoreDirectory extends AutoCloseable {
/**
* Retrieves all existing namespaces in the routing metadata store.
* @return
*/
Collection<String> getAllNamespaces();
/**
* Returns all metadata store realms in the given namespace.
* @return
*/
Collection<String> getAllMetadataStoreRealms(String namespace);
/**
* Returns all path-based sharding keys in the given namespace.
* @return
*/
Collection<String> getAllShardingKeys(String namespace);
/**
* Returns routing data in the given namespace.
*
* @param namespace namespace in metadata store directory.
* @return Routing data map: realm -> List of sharding keys
*/
Map<String, List<String>> getNamespaceRoutingData(String namespace);
/**
* Sets and overwrites routing data in the given namespace.
*
* @param namespace namespace in metadata store directory.
* @param routingData Routing data map: realm -> List of sharding keys
* @return true if successful; false otherwise.
*/
boolean setNamespaceRoutingData(String namespace, Map<String, List<String>> routingData);
/**
* Returns all path-based sharding keys in the given namespace and the realm.
* @param namespace
* @param realm
* @return
*/
Collection<String> getAllShardingKeysInRealm(String namespace, String realm);
/**
* Returns all sharding keys that have the given path as the prefix substring.
* E.g) Given that there are sharding keys: /a/b/c, /a/b/d, /a/e,
* getAllShardingKeysUnderPath(namespace, "/a/b") returns ["/a/b/c": "realm", "/a/b/d": "realm].
* @param namespace
* @param path
* @return
*/
Map<String, String> getAllMappingUnderPath(String namespace, String path);
/**
* Returns the name of the metadata store realm based on the namespace and the sharding key given.
* @param namespace
* @param shardingKey
* @return
*/
String getMetadataStoreRealm(String namespace, String shardingKey)
throws NoSuchElementException;
/**
* Creates a realm. If the namespace does not exist, it creates one.
* @param namespace
* @param realm
* @return true if successful or if the realm already exists. false otherwise.
*/
boolean addMetadataStoreRealm(String namespace, String realm);
/**
* Deletes a realm.
* @param namespace
* @param realm
* @return true if successful or the realm or namespace does not exist. false otherwise.
*/
boolean deleteMetadataStoreRealm(String namespace, String realm);
/**
* Creates a mapping between the sharding key to the realm in the given namespace.
* @param namespace
* @param realm
* @param shardingKey
* @return false if failed
*/
boolean addShardingKey(String namespace, String realm, String shardingKey);
/**
* Deletes the mapping between the sharding key to the realm in the given namespace.
* @param namespace
* @param realm
* @param shardingKey
* @return false if failed; true if the deletion is successful or the key does not exist.
*/
boolean deleteShardingKey(String namespace, String realm, String shardingKey);
/**
* Close MetadataStoreDirectory.
*/
void close();
}
| 9,332 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/ZkMetadataStoreDirectory.java
|
package org.apache.helix.rest.metadatastore;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import com.google.common.annotations.VisibleForTesting;
import org.apache.helix.msdcommon.callback.RoutingDataListener;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.apache.helix.msdcommon.datamodel.MetadataStoreRoutingData;
import org.apache.helix.msdcommon.datamodel.TrieRoutingData;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.rest.metadatastore.accessor.MetadataStoreRoutingDataReader;
import org.apache.helix.rest.metadatastore.accessor.MetadataStoreRoutingDataWriter;
import org.apache.helix.rest.metadatastore.accessor.ZkRoutingDataReader;
import org.apache.helix.rest.metadatastore.accessor.ZkRoutingDataWriter;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.zkclient.exception.ZkNodeExistsException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* NOTE: This is a singleton class. DO NOT EXTEND!
* ZK-based MetadataStoreDirectory that listens on the routing data in routing ZKs with a update
* callback.
*/
public class ZkMetadataStoreDirectory implements MetadataStoreDirectory, RoutingDataListener {
private static final Logger LOG = LoggerFactory.getLogger(ZkMetadataStoreDirectory.class);
// The following maps' keys represent the namespace
// NOTE: made protected for testing reasons. DO NOT MODIFY!
protected final Map<String, MetadataStoreRoutingDataReader> _routingDataReaderMap;
protected final Map<String, MetadataStoreRoutingDataWriter> _routingDataWriterMap;
protected final Map<String, MetadataStoreRoutingData> _routingDataMap;
protected final Map<String, String> _routingZkAddressMap;
// <namespace, <realm, <list of sharding keys>> mappings
protected final Map<String, Map<String, List<String>>> _realmToShardingKeysMap;
private static volatile ZkMetadataStoreDirectory _zkMetadataStoreDirectoryInstance;
public static ZkMetadataStoreDirectory getInstance() {
if (_zkMetadataStoreDirectoryInstance == null) {
synchronized (ZkMetadataStoreDirectory.class) {
if (_zkMetadataStoreDirectoryInstance == null) {
_zkMetadataStoreDirectoryInstance = new ZkMetadataStoreDirectory();
}
}
}
return _zkMetadataStoreDirectoryInstance;
}
public static ZkMetadataStoreDirectory getInstance(String namespace, String zkAddress)
throws InvalidRoutingDataException {
getInstance().init(namespace, zkAddress);
return _zkMetadataStoreDirectoryInstance;
}
/**
* Note: this is a singleton class. The constructor is made protected for testing. DO NOT EXTEND!
*/
@VisibleForTesting
protected ZkMetadataStoreDirectory() {
_routingDataReaderMap = new ConcurrentHashMap<>();
_routingDataWriterMap = new ConcurrentHashMap<>();
_routingZkAddressMap = new ConcurrentHashMap<>();
_realmToShardingKeysMap = new ConcurrentHashMap<>();
_routingDataMap = new ConcurrentHashMap<>();
}
private void init(String namespace, String zkAddress) throws InvalidRoutingDataException {
if (!_routingZkAddressMap.containsKey(namespace)) {
synchronized (_routingZkAddressMap) {
if (!_routingZkAddressMap.containsKey(namespace)) {
HelixZkClient zkClient = null;
try {
// Ensure that ROUTING_DATA_PATH exists in ZK.
zkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddress),
new HelixZkClient.ZkClientConfig().setZkSerializer(new ZNRecordSerializer()));
createRoutingDataPath(zkClient, zkAddress);
} finally {
if (zkClient != null && !zkClient.isClosed()) {
zkClient.close();
}
}
try {
_routingZkAddressMap.put(namespace, zkAddress);
_routingDataReaderMap
.put(namespace, new ZkRoutingDataReader(namespace, zkAddress, this));
_routingDataWriterMap.put(namespace, new ZkRoutingDataWriter(namespace, zkAddress));
} catch (IllegalArgumentException | IllegalStateException e) {
LOG.error("ZkMetadataStoreDirectory: initializing ZkRoutingDataReader/Writer failed!",
e);
}
// Populate realmToShardingKeys with ZkRoutingDataReader
Map<String, List<String>> rawRoutingData =
_routingDataReaderMap.get(namespace).getRoutingData();
_realmToShardingKeysMap.put(namespace, rawRoutingData);
try {
_routingDataMap.put(namespace, new TrieRoutingData(rawRoutingData));
} catch (InvalidRoutingDataException e) {
LOG.warn("ZkMetadataStoreDirectory: TrieRoutingData is not created for namespace {}",
namespace, e);
}
}
}
}
}
@Override
public Collection<String> getAllNamespaces() {
return Collections.unmodifiableCollection(_routingZkAddressMap.keySet());
}
@Override
public Collection<String> getAllMetadataStoreRealms(String namespace) {
if (!_realmToShardingKeysMap.containsKey(namespace)) {
throw new NoSuchElementException("Namespace " + namespace + " does not exist!");
}
return Collections.unmodifiableCollection(_realmToShardingKeysMap.get(namespace).keySet());
}
@Override
public Collection<String> getAllShardingKeys(String namespace) {
if (!_realmToShardingKeysMap.containsKey(namespace)) {
throw new NoSuchElementException("Namespace " + namespace + " does not exist!");
}
Set<String> allShardingKeys = new HashSet<>();
_realmToShardingKeysMap.get(namespace).values().forEach(allShardingKeys::addAll);
return allShardingKeys;
}
@Override
public Map<String, List<String>> getNamespaceRoutingData(String namespace) {
Map<String, List<String>> routingData = _realmToShardingKeysMap.get(namespace);
if (routingData == null) {
throw new NoSuchElementException("Namespace " + namespace + " does not exist!");
}
return routingData;
}
@Override
public boolean setNamespaceRoutingData(String namespace, Map<String, List<String>> routingData) {
if (!_routingDataWriterMap.containsKey(namespace)) {
throw new IllegalArgumentException(
"Failed to set routing data: Namespace " + namespace + " is not found!");
}
synchronized (this) {
if (!_routingDataWriterMap.get(namespace).setRoutingData(routingData)) {
return false;
}
refreshRoutingData(namespace);
return true;
}
}
@Override
public Collection<String> getAllShardingKeysInRealm(String namespace, String realm) {
if (!_realmToShardingKeysMap.containsKey(namespace)) {
throw new NoSuchElementException("Namespace " + namespace + " does not exist!");
}
if (!_realmToShardingKeysMap.get(namespace).containsKey(realm)) {
throw new NoSuchElementException(
"Realm " + realm + " does not exist in namespace " + namespace);
}
return Collections.unmodifiableCollection(_realmToShardingKeysMap.get(namespace).get(realm));
}
@Override
public Map<String, String> getAllMappingUnderPath(String namespace, String path) {
// Check _routingZkAddressMap first to see if namespace is included
if (!_routingZkAddressMap.containsKey(namespace)) {
throw new NoSuchElementException(
"Failed to get all mapping under path: Namespace " + namespace + " is not found!");
}
// If namespace is included but not routing data, it means the routing data is invalid
if (!_routingDataMap.containsKey(namespace)) {
throw new IllegalStateException("Failed to get all mapping under path: Namespace " + namespace
+ " contains either empty or invalid routing data!");
}
return _routingDataMap.get(namespace).getAllMappingUnderPath(path);
}
@Override
public String getMetadataStoreRealm(String namespace, String shardingKey) {
// Check _routingZkAddressMap first to see if namespace is included
if (!_routingZkAddressMap.containsKey(namespace)) {
throw new NoSuchElementException(
"Failed to get metadata store realm: Namespace " + namespace + " is not found!");
}
// If namespace is included but not routing data, it means the routing data is invalid
if (!_routingDataMap.containsKey(namespace)) {
throw new IllegalStateException("Failed to get metadata store realm: Namespace " + namespace
+ " contains either empty or invalid routing data!");
}
return _routingDataMap.get(namespace).getMetadataStoreRealm(shardingKey);
}
@Override
public boolean addMetadataStoreRealm(String namespace, String realm) {
if (!_routingDataWriterMap.containsKey(namespace)) {
// throwing NoSuchElementException instead of IllegalArgumentException to differentiate the
// status code in the Accessor level
throw new NoSuchElementException(
"Failed to add metadata store realm: Namespace " + namespace + " is not found!");
}
synchronized (this) {
if (!_routingDataWriterMap.get(namespace).addMetadataStoreRealm(realm)) {
return false;
}
refreshRoutingData(namespace);
return true;
}
}
@Override
public boolean deleteMetadataStoreRealm(String namespace, String realm) {
if (!_routingDataWriterMap.containsKey(namespace)) {
// throwing NoSuchElementException instead of IllegalArgumentException to differentiate the
// status code in the Accessor level
throw new NoSuchElementException(
"Failed to delete metadata store realm: Namespace " + namespace + " is not found!");
}
synchronized (this) {
if (!_routingDataWriterMap.get(namespace).deleteMetadataStoreRealm(realm)) {
return false;
}
refreshRoutingData(namespace);
return true;
}
}
@Override
public boolean addShardingKey(String namespace, String realm, String shardingKey) {
if (!_routingDataWriterMap.containsKey(namespace)) {
// throwing NoSuchElementException instead of IllegalArgumentException to differentiate the
// status code in the Accessor level
throw new NoSuchElementException(
"Failed to add sharding key: Namespace " + namespace + " is not found!");
}
synchronized (this) {
if (_routingDataMap.containsKey(namespace) && _routingDataMap.get(namespace)
.containsKeyRealmPair(shardingKey, realm)) {
return true;
}
if (_routingDataMap.containsKey(namespace) && !_routingDataMap.get(namespace)
.isShardingKeyInsertionValid(shardingKey)) {
throw new IllegalArgumentException(
"Failed to add sharding key: Adding sharding key " + shardingKey
+ " makes routing data invalid!");
}
if (!_routingDataWriterMap.get(namespace).addShardingKey(realm, shardingKey)) {
return false;
}
refreshRoutingData(namespace);
return true;
}
}
@Override
public boolean deleteShardingKey(String namespace, String realm, String shardingKey) {
if (!_routingDataWriterMap.containsKey(namespace)) {
// throwing NoSuchElementException instead of IllegalArgumentException to differentiate the
// status code in the Accessor level
throw new NoSuchElementException(
"Failed to delete sharding key: Namespace " + namespace + " is not found!");
}
synchronized (this) {
if (!_routingDataWriterMap.get(namespace).deleteShardingKey(realm, shardingKey)) {
return false;
}
refreshRoutingData(namespace);
return true;
}
}
/**
* Callback for updating the cached routing data.
* Note: this method should not synchronize on the class or the map. We do not want namespaces
* blocking each other.
* Threadsafe map is used for _realmToShardingKeysMap.
* The global consistency of the in-memory routing data is not a requirement (eventual consistency
* is enough).
* @param namespace
*/
@Override
public void refreshRoutingData(String namespace) {
// Check if namespace exists; otherwise, return as a NOP and log it
if (!_routingZkAddressMap.containsKey(namespace)) {
LOG.error(
"Failed to refresh internally-cached routing data! Namespace not found: " + namespace);
return;
}
Map<String, List<String>> rawRoutingData;
try {
rawRoutingData = _routingDataReaderMap.get(namespace).getRoutingData();
} catch (InvalidRoutingDataException e) {
LOG.error("Failed to refresh cached routing data for namespace {}", namespace, e);
_realmToShardingKeysMap.put(namespace, Collections.emptyMap());
_routingDataMap.remove(namespace);
return;
}
_realmToShardingKeysMap.put(namespace, rawRoutingData);
TrieRoutingData trieRoutingData;
try {
trieRoutingData = new TrieRoutingData(rawRoutingData);
} catch (InvalidRoutingDataException e) {
LOG.warn("TrieRoutingData is not created for namespace {}", namespace, e);
_routingDataMap.remove(namespace);
return;
}
_routingDataMap.put(namespace, trieRoutingData);
}
@Override
public synchronized void close() {
_routingDataReaderMap.values().forEach(MetadataStoreRoutingDataReader::close);
_routingDataWriterMap.values().forEach(MetadataStoreRoutingDataWriter::close);
_routingDataReaderMap.clear();
_routingDataWriterMap.clear();
_routingZkAddressMap.clear();
_realmToShardingKeysMap.clear();
_routingDataMap.clear();
_zkMetadataStoreDirectoryInstance = null;
}
/**
* Make sure the root routing data path exists. Also, register the routing ZK address.
* @param zkClient
*/
public static void createRoutingDataPath(HelixZkClient zkClient, String zkAddress) {
try {
zkClient.createPersistent(MetadataStoreRoutingConstants.ROUTING_DATA_PATH, true);
} catch (ZkNodeExistsException e) {
// The node already exists and it's okay
}
// Make sure ROUTING_DATA_PATH is mapped to the routing ZK so that FederatedZkClient used
// in Helix REST can subscribe to the routing data path
ZNRecord znRecord = new ZNRecord(MetadataStoreRoutingConstants.ROUTING_DATA_PATH.substring(1));
znRecord.setListField(MetadataStoreRoutingConstants.ROUTING_ZK_ADDRESS_KEY,
Collections.singletonList(zkAddress));
zkClient.writeData(MetadataStoreRoutingConstants.ROUTING_DATA_PATH, znRecord);
}
}
| 9,333 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/accessor/ZkRoutingDataWriter.java
|
package org.apache.helix.rest.metadatastore.accessor;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.metadatastore.ZkMetadataStoreDirectory;
import org.apache.helix.rest.metadatastore.concurrency.ZkDistributedLeaderElection;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ZkRoutingDataWriter implements MetadataStoreRoutingDataWriter {
// Time out for http requests that are forwarded to leader instances measured in milliseconds
private static final int HTTP_REQUEST_FORWARDING_TIMEOUT = 60 * 1000;
private static final Logger LOG = LoggerFactory.getLogger(ZkRoutingDataWriter.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final String SIMPLE_FIELD_KEY_HOSTNAME = "hostname";
private static final String SIMPLE_FIELD_KEY_PORT = "port";
private static final String SIMPLE_FIELD_KEY_CONTEXT_URL_PREFIX = "contextUrlPrefix";
private final String _namespace;
private final HelixZkClient _zkClient;
private final ZkDistributedLeaderElection _leaderElection;
private final CloseableHttpClient _forwardHttpClient;
private final String _myHostName;
public ZkRoutingDataWriter(String namespace, String zkAddress) {
if (namespace == null || namespace.isEmpty()) {
throw new IllegalArgumentException("namespace cannot be null or empty!");
}
_namespace = namespace;
if (zkAddress == null || zkAddress.isEmpty()) {
throw new IllegalArgumentException("Zk address cannot be null or empty!");
}
_zkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddress),
new HelixZkClient.ZkClientConfig().setZkSerializer(new ZNRecordSerializer()));
ZkMetadataStoreDirectory.createRoutingDataPath(_zkClient, zkAddress);
// Get the hostname (REST endpoint) from System property
String hostName = System.getProperty(MetadataStoreRoutingConstants.MSDS_SERVER_HOSTNAME_KEY);
if (hostName == null || hostName.isEmpty()) {
String errMsg =
"ZkRoutingDataWriter: Hostname is not set or is empty. System.getProperty fails to fetch "
+ MetadataStoreRoutingConstants.MSDS_SERVER_HOSTNAME_KEY;
LOG.error(errMsg);
throw new IllegalStateException(errMsg);
}
_myHostName = HttpConstants.HTTP_PROTOCOL_PREFIX + hostName;
ZNRecord myServerInfo = new ZNRecord(hostName);
myServerInfo.setSimpleField(SIMPLE_FIELD_KEY_HOSTNAME, hostName);
String port = System.getProperty(MetadataStoreRoutingConstants.MSDS_SERVER_PORT_KEY);
if (port != null && !port.isEmpty()) {
myServerInfo.setSimpleField(SIMPLE_FIELD_KEY_PORT, port);
}
// One example of context url prefix is "/admin/v2". With the prefix specified, we want to
// make sure the final url is "/admin/v2/namespaces/NAMESPACE/some/endpoint"; without it
// being specified, we will skip it and go with "/namespaces/NAMESPACE/some/endpoint".
String contextUrlPrefix =
System.getProperty(MetadataStoreRoutingConstants.MSDS_CONTEXT_URL_PREFIX_KEY);
if (contextUrlPrefix != null && !contextUrlPrefix.isEmpty()) {
myServerInfo.setSimpleField(SIMPLE_FIELD_KEY_CONTEXT_URL_PREFIX, contextUrlPrefix);
}
_leaderElection = new ZkDistributedLeaderElection(_zkClient,
MetadataStoreRoutingConstants.LEADER_ELECTION_ZNODE, myServerInfo);
RequestConfig config = RequestConfig.custom().setConnectTimeout(HTTP_REQUEST_FORWARDING_TIMEOUT)
.setConnectionRequestTimeout(HTTP_REQUEST_FORWARDING_TIMEOUT)
.setSocketTimeout(HTTP_REQUEST_FORWARDING_TIMEOUT).build();
_forwardHttpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build();
}
public static String buildEndpointFromLeaderElectionNode(ZNRecord znRecord) {
List<String> urlComponents =
new ArrayList<>(Collections.singletonList(HttpConstants.HTTP_PROTOCOL_PREFIX));
urlComponents.add(znRecord.getSimpleField(SIMPLE_FIELD_KEY_HOSTNAME));
String port = znRecord.getSimpleField(SIMPLE_FIELD_KEY_PORT);
if (port != null && !port.isEmpty()) {
urlComponents.add(":");
urlComponents.add(port);
}
String contextUrlPrefix = znRecord.getSimpleField(SIMPLE_FIELD_KEY_CONTEXT_URL_PREFIX);
if (contextUrlPrefix != null && !contextUrlPrefix.isEmpty()) {
urlComponents.add(contextUrlPrefix);
}
return String.join("", urlComponents);
}
@Override
public synchronized boolean addMetadataStoreRealm(String realm) {
if (_leaderElection.isLeader()) {
if (_zkClient.isClosed()) {
throw new IllegalStateException("ZkClient is closed!");
}
return createZkRealm(realm);
}
String urlSuffix =
constructUrlSuffix(MetadataStoreRoutingConstants.MSDS_GET_ALL_REALMS_ENDPOINT, realm);
return buildAndSendRequestToLeader(urlSuffix, HttpConstants.RestVerbs.PUT,
Response.Status.CREATED.getStatusCode());
}
@Override
public synchronized boolean deleteMetadataStoreRealm(String realm) {
if (_leaderElection.isLeader()) {
if (_zkClient.isClosed()) {
throw new IllegalStateException("ZkClient is closed!");
}
return deleteZkRealm(realm);
}
String urlSuffix =
constructUrlSuffix(MetadataStoreRoutingConstants.MSDS_GET_ALL_REALMS_ENDPOINT, realm);
return buildAndSendRequestToLeader(urlSuffix, HttpConstants.RestVerbs.DELETE,
Response.Status.OK.getStatusCode());
}
@Override
public synchronized boolean addShardingKey(String realm, String shardingKey) {
if (_leaderElection.isLeader()) {
if (_zkClient.isClosed()) {
throw new IllegalStateException("ZkClient is closed!");
}
return createZkShardingKey(realm, shardingKey);
}
String urlSuffix =
constructUrlSuffix(MetadataStoreRoutingConstants.MSDS_GET_ALL_REALMS_ENDPOINT, realm,
MetadataStoreRoutingConstants.MSDS_GET_ALL_SHARDING_KEYS_ENDPOINT, shardingKey);
return buildAndSendRequestToLeader(urlSuffix, HttpConstants.RestVerbs.PUT,
Response.Status.CREATED.getStatusCode());
}
@Override
public synchronized boolean deleteShardingKey(String realm, String shardingKey) {
if (_leaderElection.isLeader()) {
if (_zkClient.isClosed()) {
throw new IllegalStateException("ZkClient is closed!");
}
return deleteZkShardingKey(realm, shardingKey);
}
String urlSuffix =
constructUrlSuffix(MetadataStoreRoutingConstants.MSDS_GET_ALL_REALMS_ENDPOINT, realm,
MetadataStoreRoutingConstants.MSDS_GET_ALL_SHARDING_KEYS_ENDPOINT, shardingKey);
return buildAndSendRequestToLeader(urlSuffix, HttpConstants.RestVerbs.DELETE,
Response.Status.OK.getStatusCode());
}
@Override
public synchronized boolean setRoutingData(Map<String, List<String>> routingData) {
if (_leaderElection.isLeader()) {
if (_zkClient.isClosed()) {
throw new IllegalStateException("ZkClient is closed!");
}
if (routingData == null) {
throw new IllegalArgumentException("routingData given is null!");
}
// Remove existing routing data
for (String zkRealm : _zkClient
.getChildren(MetadataStoreRoutingConstants.ROUTING_DATA_PATH)) {
if (!_zkClient.delete(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + zkRealm)) {
LOG.error(
"Failed to delete existing routing data in setRoutingData()! Namespace: {}, Realm: {}",
_namespace, zkRealm);
return false;
}
}
// For each ZkRealm, write the given routing data to ZooKeeper
for (Map.Entry<String, List<String>> routingDataEntry : routingData.entrySet()) {
String zkRealm = routingDataEntry.getKey();
List<String> shardingKeyList = routingDataEntry.getValue();
ZNRecord znRecord = new ZNRecord(zkRealm);
znRecord
.setListField(MetadataStoreRoutingConstants.ZNRECORD_LIST_FIELD_KEY, shardingKeyList);
String realmPath = MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + zkRealm;
try {
if (!_zkClient.exists(realmPath)) {
_zkClient.createPersistent(realmPath);
}
_zkClient.writeData(realmPath, znRecord);
} catch (Exception e) {
LOG.error("Failed to write data in setRoutingData()! Namespace: {}, Realm: {}",
_namespace, zkRealm, e);
return false;
}
}
return true;
}
String url = buildEndpointFromLeaderElectionNode(_leaderElection.getCurrentLeaderInfo())
+ constructUrlSuffix(MetadataStoreRoutingConstants.MSDS_GET_ALL_ROUTING_DATA_ENDPOINT);
HttpPut httpPut = new HttpPut(url);
String routingDataJsonString;
try {
routingDataJsonString = OBJECT_MAPPER.writeValueAsString(routingData);
} catch (JsonGenerationException | JsonMappingException e) {
throw new IllegalArgumentException(e.getMessage());
} catch (IOException e) {
LOG.error(
"setRoutingData failed before forwarding the request to leader: an exception happened while routingData is converted to json. routingData: {}",
routingData, e);
return false;
}
httpPut.setEntity(new StringEntity(routingDataJsonString, ContentType.APPLICATION_JSON));
return sendRequestToLeader(httpPut, Response.Status.CREATED.getStatusCode());
}
@Override
public synchronized void close() {
_zkClient.close();
try {
_forwardHttpClient.close();
} catch (IOException e) {
LOG.error("HttpClient failed to close. ", e);
}
}
/**
* Creates a ZK realm ZNode and populates it with an empty ZNRecord if it doesn't exist already.
* @param realm
* @return
*/
protected boolean createZkRealm(String realm) {
if (_zkClient.exists(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realm)) {
LOG.warn("createZkRealm() called for realm: {}, but this realm already exists! Namespace: {}",
realm, _namespace);
return true;
}
try {
_zkClient.createPersistent(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realm);
_zkClient.writeData(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realm,
new ZNRecord(realm));
} catch (Exception e) {
LOG.error("Failed to create ZkRealm: {}, Namespace: {}", realm, _namespace, e);
return false;
}
return true;
}
protected boolean deleteZkRealm(String realm) {
if (!_zkClient.exists(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realm)) {
LOG.warn(
"deleteZkRealm() called for realm: {}, but this realm already doesn't exist! Namespace: {}",
realm, _namespace);
return true;
}
return _zkClient.delete(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realm);
}
protected boolean createZkShardingKey(String realm, String shardingKey) {
// If the realm does not exist already, then create the realm
String realmPath = MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realm;
if (!_zkClient.exists(realmPath)) {
// Create the realm
if (!createZkRealm(realm)) {
// Failed to create the realm - log and return false
LOG.error(
"Failed to add sharding key because ZkRealm creation failed! Namespace: {}, Realm: {}, Sharding key: {}",
_namespace, realm, shardingKey);
return false;
}
}
ZNRecord znRecord;
try {
znRecord = _zkClient.readData(realmPath);
} catch (Exception e) {
LOG.error(
"Failed to read the realm ZNRecord in addShardingKey()! Namespace: {}, Realm: {}, ShardingKey: {}",
_namespace, realm, shardingKey, e);
return false;
}
List<String> shardingKeys =
znRecord.getListField(MetadataStoreRoutingConstants.ZNRECORD_LIST_FIELD_KEY);
if (shardingKeys == null || shardingKeys.isEmpty()) {
shardingKeys = new ArrayList<>();
}
shardingKeys.add(shardingKey);
znRecord.setListField(MetadataStoreRoutingConstants.ZNRECORD_LIST_FIELD_KEY, shardingKeys);
try {
_zkClient.writeData(realmPath, znRecord);
} catch (Exception e) {
LOG.error(
"Failed to write the realm ZNRecord in addShardingKey()! Namespace: {}, Realm: {}, ShardingKey: {}",
_namespace, realm, shardingKey, e);
return false;
}
return true;
}
protected boolean deleteZkShardingKey(String realm, String shardingKey) {
ZNRecord znRecord =
_zkClient.readData(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realm, true);
if (znRecord == null || !znRecord
.getListField(MetadataStoreRoutingConstants.ZNRECORD_LIST_FIELD_KEY)
.contains(shardingKey)) {
// This realm does not exist or shardingKey doesn't exist. Return true!
return true;
}
znRecord.getListField(MetadataStoreRoutingConstants.ZNRECORD_LIST_FIELD_KEY)
.remove(shardingKey);
// Overwrite this ZNRecord with the sharding key removed
try {
_zkClient.writeData(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realm, znRecord);
} catch (Exception e) {
LOG.error(
"Failed to write the data back in deleteShardingKey()! Namespace: {}, Realm: {}, ShardingKey: {}",
_namespace, realm, shardingKey, e);
return false;
}
return true;
}
private String constructUrlSuffix(String... urlParams) {
List<String> allUrlParameters = new ArrayList<>(
Arrays.asList(MetadataStoreRoutingConstants.MSDS_NAMESPACES_URL_PREFIX, "/", _namespace));
for (String urlParam : urlParams) {
if (urlParam.charAt(0) != '/') {
urlParam = "/" + urlParam;
}
allUrlParameters.add(urlParam);
}
return String.join("", allUrlParameters);
}
private boolean buildAndSendRequestToLeader(String urlSuffix,
HttpConstants.RestVerbs requestMethod, int expectedResponseCode)
throws IllegalArgumentException {
String url =
buildEndpointFromLeaderElectionNode(_leaderElection.getCurrentLeaderInfo()) + urlSuffix;
HttpUriRequest request;
switch (requestMethod) {
case PUT:
request = new HttpPut(url);
break;
case DELETE:
request = new HttpDelete(url);
break;
default:
throw new IllegalArgumentException("Unsupported requestMethod: " + requestMethod.name());
}
return sendRequestToLeader(request, expectedResponseCode);
}
// Set to be protected for testing purposes
protected boolean sendRequestToLeader(HttpUriRequest request, int expectedResponseCode) {
try {
HttpResponse response = _forwardHttpClient.execute(request);
if (response.getStatusLine().getStatusCode() != expectedResponseCode) {
HttpEntity respEntity = response.getEntity();
String errorLog = "The forwarded request to leader has failed. Uri: " + request.getURI()
+ ". Error code: " + response.getStatusLine().getStatusCode() + " Current hostname: "
+ _myHostName;
if (respEntity != null) {
errorLog += " Response: " + EntityUtils.toString(respEntity);
}
LOG.error(errorLog);
return false;
}
} catch (IOException e) {
LOG.error(
"The forwarded request to leader raised an exception. Uri: {} Current hostname: {} ",
request.getURI(), _myHostName, e);
return false;
}
return true;
}
}
| 9,334 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/accessor/ZkRoutingDataReader.java
|
package org.apache.helix.rest.metadatastore.accessor;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.msdcommon.callback.RoutingDataListener;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.rest.metadatastore.ZkMetadataStoreDirectory;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.zkclient.IZkChildListener;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.apache.helix.zookeeper.zkclient.IZkStateListener;
import org.apache.helix.zookeeper.zkclient.exception.ZkNoNodeException;
import org.apache.zookeeper.Watcher;
public class ZkRoutingDataReader implements MetadataStoreRoutingDataReader, IZkDataListener, IZkChildListener, IZkStateListener {
private final String _namespace;
private final String _zkAddress;
private final HelixZkClient _zkClient;
private final RoutingDataListener _routingDataListener;
public ZkRoutingDataReader(String namespace, String zkAddress,
RoutingDataListener routingDataListener) {
if (namespace == null || namespace.isEmpty()) {
throw new IllegalArgumentException("namespace cannot be null or empty!");
}
_namespace = namespace;
if (zkAddress == null || zkAddress.isEmpty()) {
throw new IllegalArgumentException("Zk address cannot be null or empty!");
}
_zkAddress = zkAddress;
_zkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddress),
new HelixZkClient.ZkClientConfig().setZkSerializer(new ZNRecordSerializer()));
ZkMetadataStoreDirectory.createRoutingDataPath(_zkClient, _zkAddress);
_routingDataListener = routingDataListener;
if (_routingDataListener != null) {
_zkClient.subscribeRoutingDataChanges(this, this);
}
}
/**
* Returns (realm, list of ZK path sharding keys) mappings.
* @return Map <realm, list of ZK path sharding keys>
* @throws InvalidRoutingDataException - when the node on
* MetadataStoreRoutingConstants.ROUTING_DATA_PATH is missing
*/
public Map<String, List<String>> getRoutingData() throws InvalidRoutingDataException {
Map<String, List<String>> routingData = new HashMap<>();
List<String> allRealmAddresses;
try {
allRealmAddresses = _zkClient.getChildren(MetadataStoreRoutingConstants.ROUTING_DATA_PATH);
} catch (ZkNoNodeException e) {
throw new InvalidRoutingDataException(
"Routing data directory ZNode " + MetadataStoreRoutingConstants.ROUTING_DATA_PATH
+ " does not exist. Routing ZooKeeper address: " + _zkAddress);
}
if (allRealmAddresses != null) {
for (String realmAddress : allRealmAddresses) {
ZNRecord record = _zkClient
.readData(MetadataStoreRoutingConstants.ROUTING_DATA_PATH + "/" + realmAddress, true);
if (record != null) {
List<String> shardingKeys =
record.getListField(MetadataStoreRoutingConstants.ZNRECORD_LIST_FIELD_KEY);
routingData
.put(realmAddress, shardingKeys != null ? shardingKeys : Collections.emptyList());
}
}
}
return routingData;
}
public synchronized void close() {
_zkClient.unsubscribeAll();
_zkClient.close();
}
@Override
public synchronized void handleDataChange(String s, Object o) {
if (_zkClient == null || _zkClient.isClosed()) {
return;
}
_routingDataListener.refreshRoutingData(_namespace);
}
@Override
public synchronized void handleDataDeleted(String s) {
// When a child node is deleted, this and handleChildChange will both be triggered, but the
// behavior is safe
handleResubscription();
}
@Override
public synchronized void handleChildChange(String s, List<String> list) {
handleResubscription();
}
@Override
public synchronized void handleStateChanged(Watcher.Event.KeeperState state) {
if (_zkClient == null || _zkClient.isClosed()) {
return;
}
_routingDataListener.refreshRoutingData(_namespace);
}
@Override
public synchronized void handleNewSession(String sessionId) {
if (_zkClient == null || _zkClient.isClosed()) {
return;
}
_routingDataListener.refreshRoutingData(_namespace);
}
@Override
public synchronized void handleSessionEstablishmentError(Throwable error) {
if (_zkClient == null || _zkClient.isClosed()) {
return;
}
_routingDataListener.refreshRoutingData(_namespace);
}
private void handleResubscription() {
if (_zkClient == null || _zkClient.isClosed()) {
return;
}
// Renew subscription
_zkClient.unsubscribeAll();
_zkClient.subscribeRoutingDataChanges(this, this);
_routingDataListener.refreshRoutingData(_namespace);
}
}
| 9,335 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/accessor/MetadataStoreRoutingDataReader.java
|
package org.apache.helix.rest.metadatastore.accessor;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
/**
* An interface for a DAO that fetches routing data from a source and return a key-value mapping
* that represent the said routing data.
* Note: Each data reader connects to a single namespace.
*/
public interface MetadataStoreRoutingDataReader {
/**
* Fetches routing data from the data source.
* @return a mapping from "metadata store realm addresses" to lists of "metadata store sharding
* keys", where the sharding keys in a value list all route to the realm address in the
* key
* @throws InvalidRoutingDataException - when the routing data is malformed in any way that
* disallows a meaningful mapping to be returned
*/
Map<String, List<String>> getRoutingData()
throws InvalidRoutingDataException;
/**
* Closes any stateful resources such as connections or threads.
*/
void close();
}
| 9,336 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/accessor/MetadataStoreRoutingDataWriter.java
|
package org.apache.helix.rest.metadatastore.accessor;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
/**
* An interface for a DAO that writes to the metadata store that stores routing data.
* Note: Each data writer connects to a single namespace.
*/
public interface MetadataStoreRoutingDataWriter {
/**
* Creates a realm. If the namespace does not exist, it creates one.
* @param realm
* @return true if successful or if the realm already exists. false otherwise.
*/
boolean addMetadataStoreRealm(String realm);
/**
* Deletes a realm.
* @param realm
* @return true if successful or the realm or namespace does not exist. false otherwise.
*/
boolean deleteMetadataStoreRealm(String realm);
/**
* Creates a mapping between the sharding key to the realm. If realm doesn't exist, it will be created (this call is idempotent).
* @param realm
* @param shardingKey
* @return false if failed
*/
boolean addShardingKey(String realm, String shardingKey);
/**
* Deletes the mapping between the sharding key to the realm.
* @param realm
* @param shardingKey
* @return false if failed; true if the deletion is successful or the key does not exist.
*/
boolean deleteShardingKey(String realm, String shardingKey);
/**
* Sets (overwrites) the routing data with the given <realm, list of sharding keys> mapping.
* WARNING: This overwrites all existing routing data. Use with care!
* @param routingData
* @return true if successful; false otherwise.
*/
boolean setRoutingData(Map<String, List<String>> routingData);
/**
* Closes any stateful resources such as connections or threads.
*/
void close();
}
| 9,337 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/concurrency/ZkDistributedLeaderElection.java
|
package org.apache.helix.rest.metadatastore.concurrency;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.List;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.apache.helix.zookeeper.zkclient.IZkStateListener;
import org.apache.helix.zookeeper.zkclient.exception.ZkNodeExistsException;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.Watcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ZkDistributedLeaderElection implements IZkDataListener, IZkStateListener {
private static final Logger LOG = LoggerFactory.getLogger(ZkDistributedLeaderElection.class);
private static final String PREFIX = "MSDS_SERVER_";
private final HelixZkClient _zkClient;
private final String _basePath;
private final ZNRecord _participantInfo;
private ZNRecord _currentLeaderInfo;
private String _myEphemeralSequentialPath;
private volatile boolean _isLeader;
public ZkDistributedLeaderElection(HelixZkClient zkClient, String basePath,
ZNRecord participantInfo) {
synchronized (this) {
if (zkClient == null || zkClient.isClosed()) {
throw new IllegalArgumentException("ZkClient cannot be null or closed!");
}
_zkClient = zkClient;
_zkClient.setZkSerializer(new ZNRecordSerializer());
if (basePath == null || basePath.isEmpty()) {
throw new IllegalArgumentException("lockBasePath cannot be null or empty!");
}
_basePath = basePath;
_participantInfo = participantInfo;
_isLeader = false;
}
init();
}
/**
* Create the base path if it doesn't exist and create an ephemeral sequential ZNode.
*/
private void init() {
try {
_zkClient.createPersistent(_basePath, true);
} catch (ZkNodeExistsException e) {
// Okay if it exists already
}
// Create my ephemeral sequential node with my information
_myEphemeralSequentialPath = _zkClient
.create(_basePath + "/" + PREFIX, _participantInfo, CreateMode.EPHEMERAL_SEQUENTIAL);
if (_myEphemeralSequentialPath == null) {
throw new IllegalStateException(
"Unable to create ephemeral sequential node at path: " + _basePath);
}
tryAcquiringLeadership();
}
private void tryAcquiringLeadership() {
List<String> children = _zkClient.getChildren(_basePath);
Collections.sort(children);
String leaderName = children.get(0);
_currentLeaderInfo = _zkClient.readData(_basePath + "/" + leaderName, true);
String[] myNameArray = _myEphemeralSequentialPath.split("/");
String myName = myNameArray[myNameArray.length - 1];
if (leaderName.equals(myName)) {
// My turn for leadership
_isLeader = true;
LOG.info("{} acquired leadership! Info: {}", myName, _currentLeaderInfo);
} else {
// Watch the ephemeral ZNode before me for a deletion event
String beforeMe = children.get(children.indexOf(myName) - 1);
_zkClient.subscribeDataChanges(_basePath + "/" + beforeMe, this);
}
}
public synchronized boolean isLeader() {
return _isLeader;
}
public synchronized ZNRecord getCurrentLeaderInfo() {
return _currentLeaderInfo;
}
@Override
public synchronized void handleStateChanged(Watcher.Event.KeeperState state) {
if (state == Watcher.Event.KeeperState.SyncConnected) {
init();
}
}
@Override
public void handleNewSession(String sessionId) {
return;
}
@Override
public void handleSessionEstablishmentError(Throwable error) {
return;
}
@Override
public void handleDataChange(String s, Object o) {
return;
}
@Override
public void handleDataDeleted(String s) {
tryAcquiringLeadership();
}
}
| 9,338 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/datamodel/MetadataStoreShardingKeysByRealm.java
|
package org.apache.helix.rest.metadatastore.datamodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collection;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
@JsonPropertyOrder({"realm", "shardingKeys"})
public class MetadataStoreShardingKeysByRealm {
private String realm;
private Collection<String> shardingKeys;
@JsonCreator
public MetadataStoreShardingKeysByRealm(@JsonProperty String realm,
@JsonProperty Collection<String> shardingKeys) {
this.realm = realm;
this.shardingKeys = shardingKeys;
}
@JsonProperty
public String getRealm() {
return realm;
}
@JsonProperty
public Collection<String> getShardingKeys() {
return shardingKeys;
}
@Override
public String toString() {
return "MetadataStoreShardingKeysByRealm{" + "realm='" + realm + '\'' + ", shardingKeys="
+ shardingKeys + '}';
}
}
| 9,339 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/metadatastore/datamodel/MetadataStoreShardingKey.java
|
package org.apache.helix.rest.metadatastore.datamodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* A POJO class that represents a sharding key can be easily converted to JSON
* in REST API response. The JSON object for a sharding key looks like:
* {
* "shardingKey": "/sharding/key/10/abc",
* "realm": "realm.github.com"
* }
*/
@JsonAutoDetect
public class MetadataStoreShardingKey {
private String shardingKey;
private String realm;
@JsonCreator
public MetadataStoreShardingKey(@JsonProperty String shardingKey, @JsonProperty String realm) {
this.shardingKey = shardingKey;
this.realm = realm;
}
@JsonProperty
public String getShardingKey() {
return shardingKey;
}
@JsonProperty
public String getRealm() {
return realm;
}
@Override
public String toString() {
return "MetadataStoreShardingKey{" + "shardingKey='" + shardingKey + '\'' + ", realm='" + realm
+ '\'' + '}';
}
}
| 9,340 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/HelixRestObjectNameFactory.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Hashtable;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import com.codahale.metrics.jmx.ObjectNameFactory;
import org.apache.helix.HelixException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Creates an {@link ObjectName} that has "name", "type" and "namespace" properties
* for metrics registry in Helix rest service.
*
* <p>It is recommended to only be used within Helix REST.
*/
class HelixRestObjectNameFactory implements ObjectNameFactory {
private static final Logger LOG = LoggerFactory.getLogger(HelixRestObjectNameFactory.class);
private static final String KEY_NAME = "name";
private static final String KEY_TYPE = "type";
private static final String KEY_NAMESPACE = "namespace";
private final String _namespace;
HelixRestObjectNameFactory(String nameSpace) {
_namespace = nameSpace;
}
public ObjectName createName(String type, String domain, String name) {
Hashtable<String, String> properties = new Hashtable<>();
properties.put(KEY_NAME, name);
properties.put(KEY_TYPE, type);
properties.put(KEY_NAMESPACE, _namespace);
try {
/*
* The only way we can find out if we need to quote the properties is by
* checking an ObjectName that we've constructed. Eg. when regex is used in
* object name, quoting is needed.
*/
ObjectName objectName = new ObjectName(domain, properties);
boolean needQuote = false;
if (objectName.isDomainPattern()) {
domain = ObjectName.quote(domain);
needQuote = true;
}
if (objectName.isPropertyValuePattern(KEY_NAME)) {
properties.put(KEY_NAME, ObjectName.quote(name));
needQuote = true;
}
if (objectName.isPropertyValuePattern(KEY_TYPE)) {
properties.put(KEY_TYPE, ObjectName.quote(type));
needQuote = true;
}
return needQuote ? new ObjectName(domain, properties) : objectName;
} catch (MalformedObjectNameException e) {
throw new HelixException(String
.format("Unable to register metrics: domain=%s, name=%s, namespace=%s", domain, name,
_namespace), e);
}
}
}
| 9,341 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/HelixRestMain.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.HelixException;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.server.auditlog.AuditLogger;
import org.apache.helix.rest.server.auditlog.auditloggers.FileBasedAuditLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.Yaml;
public class HelixRestMain {
private static Logger LOG = LoggerFactory.getLogger(HelixRestServer.class);
private static final String HELP = "help";
private static final String ZKSERVERADDRESS = "zkSvr";
private static final String NAMESPACE_MANIFEST_FILE = "namespace-manifest-file";
private static final String PORT = "port";
private static final int DEFAULT_PORT = 8100;
private static final String URI_PREFIX = "/admin/v2";
private static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("java " + HelixRestServer.class.getName(), cliOptions);
}
private static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(HELP).withDescription("Prints command-line options info")
.create();
helpOption.setArgs(0);
helpOption.setRequired(false);
helpOption.setArgName("print help message");
Option zkServerOption =
OptionBuilder.withLongOpt(ZKSERVERADDRESS).withDescription("Provide zookeeper address")
.create();
zkServerOption.setArgs(1);
zkServerOption.setRequired(true);
zkServerOption.setArgName("ZookeeperServerAddress(Required)");
Option helixRestNamespaceOption = OptionBuilder.withLongOpt(NAMESPACE_MANIFEST_FILE)
.withDescription("A yaml file describing helix namespace")
.create();
helixRestNamespaceOption.setArgs(1);
helixRestNamespaceOption.setRequired(false);
helixRestNamespaceOption.setArgName("NamespaceManifestFile(Optional)");
Option portOption =
OptionBuilder.withLongOpt(PORT).withDescription("Provide web service port").create();
portOption.setArgs(1);
portOption.setRequired(false);
portOption.setArgName("web service port, default: " + DEFAULT_PORT);
Options options = new Options();
options.addOption(helpOption);
options.addOption(zkServerOption);
options.addOption(portOption);
options.addOption(helixRestNamespaceOption);
return options;
}
private static void processCommandLineArgs(String[] cliArgs) throws Exception {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
LOG.error("RestAdminApplication: failed to parse command-line options: " + pe.toString());
printUsage(cliOptions);
System.exit(1);
}
int port = DEFAULT_PORT;
String zkAddr;
List<HelixRestNamespace> namespaces = new ArrayList<>();
if (cmd.hasOption(HELP)) {
printUsage(cliOptions);
return;
} else {
if (cmd.hasOption(PORT)) {
port = Integer.parseInt(cmd.getOptionValue(PORT));
}
zkAddr = String.valueOf(cmd.getOptionValue(ZKSERVERADDRESS));
namespaces.add(new HelixRestNamespace(zkAddr));
if (cmd.hasOption(NAMESPACE_MANIFEST_FILE)) {
constructNamespaceFromConfigFile(String.valueOf(cmd.getOptionValue(NAMESPACE_MANIFEST_FILE)), namespaces);
}
}
final HelixRestServer restServer = new HelixRestServer(namespaces, port, URI_PREFIX,
Arrays.<AuditLogger>asList(new FileBasedAuditLogger()));
try {
restServer.start();
restServer.join();
} catch (HelixException ex) {
LOG.error("Failed to start Helix rest server, " + ex);
} finally {
restServer.shutdown();
}
}
private static void constructNamespaceFromConfigFile(String filePath, List<HelixRestNamespace> namespaces)
throws IOException {
Yaml yaml = new Yaml();
@SuppressWarnings("unchecked")
ArrayList<Map<String, String>> configs =
(ArrayList<Map<String, String>>) yaml.load(new FileInputStream(new File(filePath)));
for (Map<String, String> config : configs) {
// Currently we don't support adding default namespace through yaml manifest so all
// namespaces created here will not be default
// TODO: support specifying default namespace from config file
namespaces.add(new HelixRestNamespace(
config.get(HelixRestNamespace.HelixRestNamespaceProperty.NAME.name()),
HelixRestNamespace.HelixMetadataStoreType.valueOf(
config.get(HelixRestNamespace.HelixRestNamespaceProperty.METADATA_STORE_TYPE.name())),
config.get(HelixRestNamespace.HelixRestNamespaceProperty.METADATA_STORE_ADDRESS.name()),
false, Boolean.parseBoolean(
config.get(HelixRestNamespace.HelixRestNamespaceProperty.MULTI_ZK_ENABLED.name())),
config.get(HelixRestNamespace.HelixRestNamespaceProperty.MSDS_ENDPOINT.name())));
}
}
/**
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
processCommandLineArgs(args);
}
}
| 9,342 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/ServerContext.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.manager.zk.ZKHelixAdmin;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.manager.zk.ZkBucketDataAccessor;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.rest.metadatastore.ZkMetadataStoreDirectory;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.constant.RoutingDataReaderType;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ByteArraySerializer;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.client.FederatedZkClient;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.zookeeper.routing.RoutingDataManager;
import org.apache.helix.zookeeper.zkclient.IZkChildListener;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.apache.helix.zookeeper.zkclient.IZkStateListener;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
import org.apache.zookeeper.Watcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ServerContext implements IZkDataListener, IZkChildListener, IZkStateListener {
private static final Logger LOG = LoggerFactory.getLogger(ServerContext.class);
private final String _zkAddr;
private final String _msdsEndpoint;
private final boolean _isMultiZkEnabled;
private volatile RealmAwareZkClient _zkClient;
private volatile RealmAwareZkClient _byteArrayZkClient;
private volatile ZKHelixAdmin _zkHelixAdmin;
private volatile ClusterSetup _clusterSetup;
private volatile ConfigAccessor _configAccessor;
// A lazily-initialized base data accessor that reads/writes byte array to ZK
// TODO: Only read (deserialize) is supported at this time. This baseDataAccessor should support write (serialize) as needs arise
private volatile ZkBaseDataAccessor<byte[]> _byteArrayZkBaseDataAccessor;
// 1 Cluster name will correspond to 1 helix data accessor
private final Map<String, HelixDataAccessor> _helixDataAccessorPool;
// 1 Cluster name will correspond to 1 task driver
private final Map<String, TaskDriver> _taskDriverPool;
// Create ZkBucketDataAccessor for ReadOnlyWagedRebalancer.
private volatile ZkBucketDataAccessor _zkBucketDataAccessor;
/**
* Multi-ZK support
*/
private final ZkMetadataStoreDirectory _zkMetadataStoreDirectory;
// Create a dedicated ZkClient for listening to data changes in routing data
private RealmAwareZkClient _zkClientForRoutingDataListener;
public ServerContext(String zkAddr) {
this(zkAddr, false, null);
}
/**
* Initializes a ServerContext for this namespace.
* @param zkAddr routing ZK address (on multi-zk mode)
* @param isMultiZkEnabled boolean flag for whether multi-zk mode is enabled
* @param msdsEndpoint if given, this server context will try to read routing data from this MSDS.
*/
public ServerContext(String zkAddr, boolean isMultiZkEnabled, String msdsEndpoint) {
_zkAddr = zkAddr;
_isMultiZkEnabled = isMultiZkEnabled;
_msdsEndpoint = msdsEndpoint; // only applicable on multi-zk mode
// We should NOT initiate _zkClient and anything that depends on _zkClient in
// constructor, as it is reasonable to start up HelixRestServer first and then
// ZooKeeper. In this case, initializing _zkClient will fail and HelixRestServer
// cannot be started correctly.
_helixDataAccessorPool = new ConcurrentHashMap<>();
_taskDriverPool = new ConcurrentHashMap<>();
// Initialize the singleton ZkMetadataStoreDirectory instance to allow it to be closed later
_zkMetadataStoreDirectory = ZkMetadataStoreDirectory.getInstance();
}
/**
* Lazy initialization of RealmAwareZkClient used throughout the REST server.
* @return
*/
public RealmAwareZkClient getRealmAwareZkClient() {
if (_zkClient == null) {
synchronized (this) {
if (_zkClient == null) {
_zkClient = createRealmAwareZkClient(new ZNRecordSerializer());
}
}
}
return _zkClient;
}
/**
* Returns a RealmAWareZkClient with ByteArraySerializer with double-checked locking.
* @return
*/
public RealmAwareZkClient getByteArrayRealmAwareZkClient() {
if (_byteArrayZkClient == null) {
synchronized (this) {
if (_byteArrayZkClient == null) {
_byteArrayZkClient = createRealmAwareZkClient(new ByteArraySerializer());
}
}
}
return _byteArrayZkClient;
}
/**
* Main creation logic for RealmAwareZkClient.
* @param zkSerializer the type of ZkSerializer to use
* @return
*/
private RealmAwareZkClient createRealmAwareZkClient(ZkSerializer zkSerializer) {
// If the multi ZK config is enabled, use FederatedZkClient on multi-realm mode
RealmAwareZkClient realmAwareZkClient;
if (_isMultiZkEnabled || Boolean
.parseBoolean(System.getProperty(SystemPropertyKeys.MULTI_ZK_ENABLED))) {
try {
initializeZkClientForRoutingData();
RealmAwareZkClient.RealmAwareZkConnectionConfig.Builder connectionConfigBuilder =
new RealmAwareZkClient.RealmAwareZkConnectionConfig.Builder();
// If MSDS endpoint is set for this namespace, use that instead.
if (_msdsEndpoint != null && !_msdsEndpoint.isEmpty()) {
connectionConfigBuilder.setRoutingDataSourceEndpoint(_msdsEndpoint)
.setRoutingDataSourceType(RoutingDataReaderType.HTTP.name());
}
realmAwareZkClient = new FederatedZkClient(connectionConfigBuilder.build(),
new RealmAwareZkClient.RealmAwareZkClientConfig().setZkSerializer(zkSerializer));
LOG.info("ServerContext: FederatedZkClient created successfully!");
} catch (InvalidRoutingDataException | IllegalStateException e) {
throw new HelixException("Failed to create FederatedZkClient!", e);
}
} else {
// If multi ZK config is not set, just connect to the ZK address given
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
clientConfig.setZkSerializer(zkSerializer);
realmAwareZkClient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(_zkAddr), clientConfig);
}
return realmAwareZkClient;
}
/**
* Initialization logic for ZkClient for routing data listener.
* NOTE: The initialization lifecycle of zkClientForRoutingDataListener is tied to the private
* volatile zkClient.
*/
private void initializeZkClientForRoutingData() {
// Make sure the ServerContext is subscribed to routing data change so that it knows
// when to reset ZkClients and Helix APIs
if (_zkClientForRoutingDataListener == null) {
// Routing data is always in the ZNRecord format
_zkClientForRoutingDataListener = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(_zkAddr),
new HelixZkClient.ZkClientConfig().setZkSerializer(new ZNRecordSerializer()));
}
// Refresh data subscription
_zkClientForRoutingDataListener.unsubscribeAll();
_zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this);
LOG.info("ServerContext: subscribed to routing data in routing ZK at {}!", _zkAddr);
}
@Deprecated
public ZkClient getZkClient() {
return (ZkClient) getRealmAwareZkClient();
}
public HelixAdmin getHelixAdmin() {
if (_zkHelixAdmin == null) {
synchronized (this) {
if (_zkHelixAdmin == null) {
_zkHelixAdmin = new ZKHelixAdmin(getRealmAwareZkClient());
}
}
}
return _zkHelixAdmin;
}
public ClusterSetup getClusterSetup() {
if (_clusterSetup == null) {
synchronized (this) {
if (_clusterSetup == null) {
_clusterSetup = new ClusterSetup(getRealmAwareZkClient(), getHelixAdmin());
}
}
}
return _clusterSetup;
}
public TaskDriver getTaskDriver(String clusterName) {
TaskDriver taskDriver = _taskDriverPool.get(clusterName);
if (taskDriver == null) {
synchronized (this) {
if (!_taskDriverPool.containsKey(clusterName)) {
_taskDriverPool.put(clusterName, new TaskDriver(getRealmAwareZkClient(), clusterName));
}
taskDriver = _taskDriverPool.get(clusterName);
}
}
return taskDriver;
}
public ConfigAccessor getConfigAccessor() {
if (_configAccessor == null) {
synchronized (this) {
if (_configAccessor == null) {
_configAccessor = new ConfigAccessor(getRealmAwareZkClient());
}
}
}
return _configAccessor;
}
public HelixDataAccessor getDataAccessor(String clusterName) {
HelixDataAccessor dataAccessor = _helixDataAccessorPool.get(clusterName);
if (dataAccessor == null) {
synchronized (this) {
if (!_helixDataAccessorPool.containsKey(clusterName)) {
ZkBaseDataAccessor<ZNRecord> baseDataAccessor =
new ZkBaseDataAccessor<>(getRealmAwareZkClient());
_helixDataAccessorPool.put(clusterName,
new ZKHelixDataAccessor(clusterName, baseDataAccessor));
}
dataAccessor = _helixDataAccessorPool.get(clusterName);
}
}
return dataAccessor;
}
/**
* Returns a lazily-instantiated ZkBaseDataAccessor for the byte array type.
* @return
*/
public BaseDataAccessor<byte[]> getByteArrayZkBaseDataAccessor() {
if (_byteArrayZkBaseDataAccessor == null) {
synchronized (this) {
if (_byteArrayZkBaseDataAccessor == null) {
_byteArrayZkBaseDataAccessor = new ZkBaseDataAccessor<>(getByteArrayRealmAwareZkClient());
}
}
}
return _byteArrayZkBaseDataAccessor;
}
public ZkBucketDataAccessor getZkBucketDataAccessor() {
// ZkBucketDataAccessor constructor will handle realmZK case (when _zkAddr is null)
if (_zkBucketDataAccessor == null) {
synchronized (this) {
if (_zkBucketDataAccessor == null) {
_zkBucketDataAccessor = new ZkBucketDataAccessor(getByteArrayRealmAwareZkClient());
}
}
}
return _zkBucketDataAccessor;
}
public void close() {
if (_zkClient != null) {
_zkClient.close();
}
if (_zkMetadataStoreDirectory != null) {
_zkMetadataStoreDirectory.close();
}
if (_zkClientForRoutingDataListener != null) {
_zkClientForRoutingDataListener.close();
}
}
@Override
public void handleChildChange(String parentPath, List<String> currentChilds) {
if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) {
return;
}
// Resubscribe
_zkClientForRoutingDataListener.unsubscribeAll();
_zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this);
resetZkResources();
}
@Override
public void handleDataChange(String dataPath, Object data) {
if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) {
return;
}
resetZkResources();
}
@Override
public void handleDataDeleted(String dataPath) {
if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) {
return;
}
// Resubscribe
_zkClientForRoutingDataListener.unsubscribeAll();
_zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this);
resetZkResources();
}
@Override
public void handleStateChanged(Watcher.Event.KeeperState state) {
if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) {
return;
}
// Resubscribe
_zkClientForRoutingDataListener.unsubscribeAll();
_zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this);
resetZkResources();
}
@Override
public void handleNewSession(String sessionId) {
if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) {
return;
}
// Resubscribe
_zkClientForRoutingDataListener.unsubscribeAll();
_zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this);
resetZkResources();
}
@Override
public void handleSessionEstablishmentError(Throwable error) {
if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) {
return;
}
// Resubscribe
_zkClientForRoutingDataListener.unsubscribeAll();
_zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this);
resetZkResources();
}
/**
* Resets all internally cached routing data by closing and nullifying both zkClient and
* byteArrayZkClient and Helix APIs.
* This operation is considered costly since it triggers reconnection of all I/O resources, but
* this is okay because routing data update should be infrequent.
*/
private void resetZkResources() {
synchronized (this) {
LOG.info("ServerContext: Resetting ZK resources due to routing data change! Routing ZK: {}",
_zkAddr);
try {
// Reset RoutingDataManager's cache
RoutingDataManager.getInstance().reset(true);
// Close all ZkClients
if (_zkClient != null && !_zkClient.isClosed()) {
_zkClient.close();
}
if (_byteArrayZkClient != null && !_byteArrayZkClient.isClosed()) {
_byteArrayZkClient.close();
}
_zkClient = null;
_byteArrayZkClient = null;
// Close all Helix APIs
if (_zkHelixAdmin != null) {
_zkHelixAdmin.close();
_zkHelixAdmin = null;
}
if (_clusterSetup != null) {
_clusterSetup.close();
_clusterSetup = null;
}
if (_configAccessor != null) {
_configAccessor.close();
_configAccessor = null;
}
if (_byteArrayZkBaseDataAccessor != null) {
_byteArrayZkBaseDataAccessor.close();
_byteArrayZkBaseDataAccessor = null;
}
if (_zkBucketDataAccessor != null) {
_zkBucketDataAccessor.close();
_zkBucketDataAccessor = null;
}
_helixDataAccessorPool.clear();
_taskDriverPool.clear();
} catch (Exception e) {
LOG.error("Failed to reset ZkClient and Helix APIs in ServerContext!", e);
}
}
}
}
| 9,343 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/HelixRestServer.java
|
package org.apache.helix.rest.server;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import javax.net.ssl.SSLContext;
import com.codahale.metrics.Clock;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Reservoir;
import com.codahale.metrics.SharedMetricRegistries;
import com.codahale.metrics.SlidingTimeWindowReservoir;
import com.codahale.metrics.jersey2.InstrumentedResourceMethodApplicationListener;
import com.codahale.metrics.jmx.JmxReporter;
import org.apache.helix.HelixException;
import org.apache.helix.rest.acl.AclRegister;
import org.apache.helix.rest.acl.NoopAclRegister;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.common.ServletType;
import org.apache.helix.rest.server.auditlog.AuditLogger;
import org.apache.helix.rest.server.authValidator.AuthValidator;
import org.apache.helix.rest.server.authValidator.NoopAuthValidator;
import org.apache.helix.rest.server.filters.AuditLogFilter;
import org.apache.helix.rest.server.filters.CORSFilter;
import org.apache.helix.rest.server.filters.ClusterAuthFilter;
import org.apache.helix.rest.server.filters.NamespaceAuthFilter;
import org.apache.helix.rest.server.resources.helix.AbstractHelixResource;
import org.apache.helix.rest.server.resources.metadata.NamespacesAccessor;
import org.apache.helix.rest.server.resources.metadatastore.MetadataStoreDirectoryAccessor;
import org.apache.helix.rest.server.resources.zookeeper.ZooKeeperAccessor;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.SecureRequestCustomizer;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.server.SslConnectionFactory;
import org.eclipse.jetty.servlet.DefaultServlet;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.server.ServerProperties;
import org.glassfish.jersey.servlet.ServletContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.swagger.jaxrs.config.DefaultJaxrsConfig;
import io.swagger.jaxrs.config.BeanConfig;
import io.swagger.jaxrs.config.DefaultReaderConfig;
import io.swagger.jaxrs.config.ReaderConfig;
import io.swagger.jaxrs.listing.ApiListingResource;
import io.swagger.jaxrs.listing.SwaggerSerializers;
import javax.servlet.ServletContext;
import javax.ws.rs.ApplicationPath;
import javax.ws.rs.core.Context;
public class HelixRestServer {
private static Logger LOG = LoggerFactory.getLogger(HelixRestServer.class);
private static final String REST_DOMAIN = "org.apache.helix.rest";
private static final String CORS_ENABLED = "cors.enabled";
// TODO: consider moving the following static context to ServerContext or any other place
public static SSLContext REST_SERVER_SSL_CONTEXT;
private int _port;
private String _urlPrefix;
private Server _server;
private List<JmxReporter> _jmxReporterList;
private List<HelixRestNamespace> _helixNamespaces;
private ServletContextHandler _servletContextHandler;
private List<AuditLogger> _auditLoggers;
private AuthValidator _clusterAuthValidator;
private AuthValidator _namespaceAuthValidator;
private AclRegister _aclRegister;
// Key is name of namespace, value of the resource config of that namespace
private Map<String, ResourceConfig> _resourceConfigMap;
public HelixRestServer(String zkAddr, int port, String urlPrefix) {
this(zkAddr, port, urlPrefix, Collections.<AuditLogger>emptyList());
}
public HelixRestServer(String zkAddr, int port, String urlPrefix, List<AuditLogger> auditLoggers) {
// Create default namespace using zkAddr
ArrayList<HelixRestNamespace> namespaces = new ArrayList<>();
namespaces.add(new HelixRestNamespace(HelixRestNamespace.DEFAULT_NAMESPACE_NAME,
HelixRestNamespace.HelixMetadataStoreType.ZOOKEEPER, zkAddr, true));
init(namespaces, port, urlPrefix, auditLoggers);
}
public HelixRestServer(List<HelixRestNamespace> namespaces, int port, String urlPrefix,
List<AuditLogger> auditLoggers) {
init(namespaces, port, urlPrefix, auditLoggers);
}
public HelixRestServer(List<HelixRestNamespace> namespaces, int port, String urlPrefix,
List<AuditLogger> auditLoggers, AuthValidator clusterAuthValidator,
AuthValidator namespaceAuthValidator, AclRegister aclRegister) {
init(namespaces, port, urlPrefix, auditLoggers, clusterAuthValidator, namespaceAuthValidator,
aclRegister);
}
private void init(List<HelixRestNamespace> namespaces, int port, String urlPrefix,
List<AuditLogger> auditLoggers) {
init(namespaces, port, urlPrefix, auditLoggers, new NoopAuthValidator(),
new NoopAuthValidator(), new NoopAclRegister());
}
private void init(List<HelixRestNamespace> namespaces, int port, String urlPrefix,
List<AuditLogger> auditLoggers, AuthValidator clusterAuthValidator,
AuthValidator namespaceAuthValidator, AclRegister aclRegister) {
if (namespaces.size() == 0) {
throw new IllegalArgumentException(
"No namespace specified! Please provide ZOOKEEPER address or namespace manifest.");
}
_port = port;
_urlPrefix = urlPrefix;
_server = new Server(_port);
_jmxReporterList = new ArrayList<>();
_auditLoggers = auditLoggers;
_resourceConfigMap = new HashMap<>();
_servletContextHandler = new ServletContextHandler(_server, _urlPrefix);
_helixNamespaces = namespaces;
_clusterAuthValidator = clusterAuthValidator;
_namespaceAuthValidator = namespaceAuthValidator;
_aclRegister = aclRegister;
// Initialize all namespaces.
// If there is not a default namespace (namespace.isDefault() is false),
// endpoint "/namespaces" will be disabled.
try {
for (HelixRestNamespace namespace : _helixNamespaces) {
if (namespace.isDefault()) {
LOG.info("Creating default servlet for default namespace");
prepareServlet(namespace, ServletType.DEFAULT_SERVLET);
} else {
LOG.info("Creating common servlet for namespace {}", namespace.getName());
prepareServlet(namespace, ServletType.COMMON_SERVLET);
}
}
initializeSwagger(_servletContextHandler.getServletContext());
// Setup Swagger API
ServletHolder swaggerServlet = _servletContextHandler.addServlet(DefaultJaxrsConfig.class, "/api/*");
swaggerServlet.setInitOrder(1);
swaggerServlet.setInitParameter("com.sun.jersey.config.property.packages",
"com.api.resources;org.apache.helix.rest.server;org.apache.helix.rest.server.resources;"
+ "org.apache.helix.rest.server.helix;io.swagger.jaxrs.json;io.swagger.jaxrs.listing");
} catch (Exception e) {
cleanupResourceConfigs();
throw e;
}
// Start special servlet for serving namespaces
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override public void run() {
shutdown();
}
}));
}
private void prepareServlet(HelixRestNamespace namespace, ServletType type) {
String resourceConfigMapKey = getResourceConfigMapKey(type, namespace);
if (_resourceConfigMap.containsKey(resourceConfigMapKey)) {
throw new IllegalArgumentException(
String.format("Duplicated namespace name \"%s\"", namespace.getName()));
}
// Prepare resource config
ResourceConfig config = getResourceConfig(namespace, type);
_resourceConfigMap.put(resourceConfigMapKey, config);
initMetricRegistry(config, namespace.getName());
// Initialize servlet
initServlet(config, String.format(type.getServletPathSpecTemplate(), namespace.getName()), namespace.isDefault());
}
private String getResourceConfigMapKey(ServletType type, HelixRestNamespace namespace) {
return String.format("%s_%s", type.name(), namespace.getName());
}
protected ResourceConfig getResourceConfig(HelixRestNamespace namespace, ServletType type) {
ResourceConfig cfg = new ResourceConfig();
cfg.packages(type.getServletPackageArray());
cfg.setApplicationName(namespace.getName());
cfg.property(ContextPropertyKeys.SERVER_CONTEXT.name(),
new ServerContext(namespace.getMetadataStoreAddress(), namespace.isMultiZkEnabled(),
namespace.getMsdsEndpoint()));
if (type == ServletType.DEFAULT_SERVLET) {
cfg.property(ContextPropertyKeys.ALL_NAMESPACES.name(), _helixNamespaces);
}
cfg.property(ContextPropertyKeys.METADATA.name(), namespace);
cfg.property(ContextPropertyKeys.ACL_REGISTER.name(), _aclRegister);
if (Boolean.getBoolean(CORS_ENABLED)) {
// NOTE: CORS is disabled by default unless otherwise specified in System Properties.
cfg.register(new CORSFilter());
}
cfg.register(new AuditLogFilter(_auditLoggers));
cfg.register(new ClusterAuthFilter(_clusterAuthValidator));
cfg.register(new NamespaceAuthFilter(_namespaceAuthValidator));
return cfg;
}
/*
* Initialize metric registry and jmx reporter for each namespace.
*/
private void initMetricRegistry(ResourceConfig cfg, String namespace) {
MetricRegistry metricRegistry = new MetricRegistry();
// Set the sliding time window to be 1 minute for now
Supplier<Reservoir> reservoirSupplier = () -> {
return new SlidingTimeWindowReservoir(60, TimeUnit.SECONDS);
};
cfg.register(
new InstrumentedResourceMethodApplicationListener(metricRegistry, Clock.defaultClock(),
false, reservoirSupplier));
SharedMetricRegistries.add(namespace, metricRegistry);
// JmxReporter doesn't have an option to specify namespace for each servlet,
// we use a customized object name factory to get and insert namespace to object name.
JmxReporter jmxReporter = JmxReporter.forRegistry(metricRegistry)
.inDomain(REST_DOMAIN)
.createsObjectNamesWith(new HelixRestObjectNameFactory(namespace))
.build();
jmxReporter.start();
_jmxReporterList.add(jmxReporter);
}
private void initServlet(ResourceConfig cfg, String servletPathSpec, boolean isDefault) {
ServletHolder servlet = new ServletHolder(new ServletContainer(cfg));
if (isDefault) {
servlet.setInitOrder(0);
}
_servletContextHandler.addServlet(servlet, servletPathSpec);
}
public void start() throws HelixException, InterruptedException {
try {
_server.start();
} catch (Exception ex) {
LOG.error("Failed to start Helix rest server, " + ex);
throw new HelixException("Failed to start Helix rest server! " + ex);
}
LOG.info("Helix rest server started!");
}
public void join() {
if (_server != null) {
try {
_server.join();
} catch (InterruptedException e) {
LOG.warn("Join on Helix rest server get interrupted!" + e);
}
}
}
public synchronized void shutdown() {
if (_server != null) {
try {
_server.stop();
LOG.info("Helix rest server stopped!");
} catch (Exception ex) {
LOG.error("Failed to stop Helix rest server, " + ex);
}
}
_jmxReporterList.forEach(JmxReporter::stop);
_jmxReporterList.clear();
cleanupResourceConfigs();
}
private void cleanupResourceConfigs() {
for (Map.Entry<String, ResourceConfig> e : _resourceConfigMap.entrySet()) {
ServerContext ctx = (ServerContext) e.getValue().getProperty(ContextPropertyKeys.SERVER_CONTEXT.name());
if (ctx == null) {
LOG.info("Server context for servlet " + e.getKey() + " is null.");
} else {
LOG.info("Closing context for servlet " + e.getKey());
ctx.close();
}
}
}
public void setupSslServer(int port, SslContextFactory.Server sslContextFactory) {
if (_server != null && port > 0) {
try {
HttpConfiguration https = new HttpConfiguration();
https.addCustomizer(new SecureRequestCustomizer());
ServerConnector sslConnector = new ServerConnector(
_server,
new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString()),
new HttpConnectionFactory(https));
sslConnector.setPort(port);
_server.addConnector(sslConnector);
LOG.info("Helix SSL rest server is ready to start.");
} catch (Exception ex) {
LOG.error("Failed to setup Helix SSL rest server, " + ex);
}
}
}
private void initializeSwagger(ServletContext servletContext) {
BeanConfig config = new BeanConfig();
config.setTitle("Helix REST API");
config.setVersion("1.0.0"); // FIXME: Pick this info from META-INF
config.setResourcePackage("org.apache.helix.rest.server");
config.setPrettyPrint(true);
config.setScan(true);
}
/**
* Register a SSLContext so that it could be used to create HTTPS clients.
* @param sslContext
*/
public void registerServerSSLContext(SSLContext sslContext) {
REST_SERVER_SSL_CONTEXT = sslContext;
}
}
| 9,344 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/filters/ClusterAuth.java
|
package org.apache.helix.rest.server.filters;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import javax.ws.rs.NameBinding;
@NameBinding
@Target({ElementType.TYPE, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface ClusterAuth {
}
| 9,345 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/filters/ClusterAuthFilter.java
|
package org.apache.helix.rest.server.filters;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerRequestFilter;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.Provider;
import org.apache.helix.rest.server.authValidator.AuthValidator;
@ClusterAuth
@Provider
public class ClusterAuthFilter implements ContainerRequestFilter {
AuthValidator _authValidator;
public ClusterAuthFilter(AuthValidator authValidator) {
_authValidator = authValidator;
}
@Override
public void filter(ContainerRequestContext request) {
if (!_authValidator.validate(request)) {
request.abortWith(Response.status(Response.Status.FORBIDDEN).build());
}
}
}
| 9,346 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/filters/NamespaceAuthFilter.java
|
package org.apache.helix.rest.server.filters;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerRequestFilter;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.Provider;
import org.apache.helix.rest.server.authValidator.AuthValidator;
@NamespaceAuth
@Provider
public class NamespaceAuthFilter implements ContainerRequestFilter {
AuthValidator _authValidator;
public NamespaceAuthFilter(AuthValidator authValidator) {
_authValidator = authValidator;
}
@Override
public void filter(ContainerRequestContext request) {
if (!_authValidator.validate(request)) {
request.abortWith(Response.status(Response.Status.FORBIDDEN).build());
}
}
}
| 9,347 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/filters/NamespaceAuth.java
|
package org.apache.helix.rest.server.filters;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import javax.ws.rs.NameBinding;
@NameBinding
@Target({ElementType.TYPE, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface NamespaceAuth {
}
| 9,348 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/filters/CORSFilter.java
|
package org.apache.helix.rest.server.filters;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerRequestFilter;
import javax.ws.rs.container.ContainerResponseContext;
import javax.ws.rs.container.ContainerResponseFilter;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.Provider;
@Provider
public class CORSFilter implements ContainerRequestFilter, ContainerResponseFilter {
@Override
public void filter(ContainerRequestContext request) throws IOException {
// handle preflight
if (request.getMethod().equalsIgnoreCase("OPTIONS")) {
Response.ResponseBuilder builder = Response.ok();
// NOTE: Allow whichever HTTP Methods requested in the incoming request because the cluster
// administrator must be able to trigger such operations
// Helix REST uses GET, PUT, DELETE, POST
String requestMethods = request.getHeaderString("Access-Control-Request-Method");
if (requestMethods != null) {
builder.header("Access-Control-Allow-Methods", requestMethods);
}
// NOTE: All headers must be allowed for preflight
String allowHeaders = request.getHeaderString("Access-Control-Request-Headers");
if (allowHeaders != null) {
builder.header("Access-Control-Allow-Headers", allowHeaders);
}
request.abortWith(builder.build());
}
}
@Override
public void filter(ContainerRequestContext request,
ContainerResponseContext response) throws IOException {
// handle origin
response.getHeaders().putSingle("Access-Control-Allow-Origin", "*");
response.getHeaders().putSingle("Access-Control-Allow-Credentials", "true");
}
}
| 9,349 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/filters/AuditLogFilter.java
|
package org.apache.helix.rest.server.filters;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerRequestFilter;
import javax.ws.rs.container.ContainerResponseContext;
import javax.ws.rs.container.ContainerResponseFilter;
import javax.ws.rs.container.PreMatching;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.Provider;
import com.google.common.io.CharStreams;
import org.apache.helix.rest.common.HelixRestUtils;
import org.apache.helix.rest.server.auditlog.AuditLog;
import org.apache.helix.rest.server.auditlog.AuditLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Provider
@PreMatching
public class AuditLogFilter implements ContainerRequestFilter, ContainerResponseFilter {
private static Logger _logger = LoggerFactory.getLogger(AuditLogFilter.class.getName());
@Context
private HttpServletRequest _servletRequest;
private List<AuditLogger> _auditLoggers;
public AuditLogFilter(List<AuditLogger> auditLoggers) {
_auditLoggers = auditLoggers;
}
@Override
public void filter(ContainerRequestContext request) throws IOException {
AuditLog.Builder auditLogBuilder = new AuditLog.Builder();
auditLogBuilder.namespace(getNamespace())
.requestPath(request.getUriInfo().getPath())
.httpMethod(request.getMethod())
.startTime(new Date())
.requestHeaders(getHeaders(request.getHeaders()))
.principal(_servletRequest.getUserPrincipal())
.clientIP(_servletRequest.getRemoteAddr())
.clientHostPort(_servletRequest.getRemoteHost() + ":" + _servletRequest.getRemotePort());
String entity = getEntity(request.getEntityStream());
auditLogBuilder.requestEntity(entity);
InputStream stream = new ByteArrayInputStream(entity.getBytes(StandardCharsets.UTF_8));
request.setEntityStream(stream);
request.setProperty(AuditLog.ATTRIBUTE_NAME, auditLogBuilder);
}
@Override
public void filter(ContainerRequestContext request, ContainerResponseContext response)
throws IOException {
AuditLog.Builder auditLogBuilder;
try {
auditLogBuilder = (AuditLog.Builder) request.getProperty(AuditLog.ATTRIBUTE_NAME);
auditLogBuilder.completeTime(new Date()).responseCode(response.getStatus());
Object entity = response.getEntity();
if(entity != null && entity instanceof String) {
auditLogBuilder.responseEntity((String) response.getEntity());
}
AuditLog auditLog = auditLogBuilder.build();
if (_auditLoggers != null) {
for (AuditLogger logger : _auditLoggers) {
logger.write(auditLog);
}
}
} catch (Exception ex) {
_logger.error("Failed to add audit log " + ex);
}
}
private List<String> getHeaders(MultivaluedMap<String, String> headersMap) {
List<String> headers = new ArrayList<>();
for (String key : headersMap.keySet()) {
headers.add(key + ":" + headersMap.get(key));
}
return headers;
}
private String getEntity(InputStream entityStream) {
if (entityStream != null) {
try {
return CharStreams.toString(new InputStreamReader(entityStream, StandardCharsets.UTF_8));
} catch (IOException e) {
_logger.warn("Failed to parse input entity stream " + e);
}
}
return null;
}
private String getNamespace() {
String servletPath = _servletRequest.getServletPath();
return HelixRestUtils.getNamespaceFromServletPath(servletPath);
}
}
| 9,350 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/AbstractResource.java
|
package org.apache.helix.rest.server.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.StringWriter;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.Consumes;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Application;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.SerializationFeature;
import org.apache.helix.HelixException;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.server.auditlog.AuditLog;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.introspect.CodehausJacksonIntrospector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Produces({MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN})
@Consumes({MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN})
public class AbstractResource {
private static Logger _logger = LoggerFactory.getLogger(AbstractResource.class.getName());
public enum Properties {
id,
disabled,
history,
count,
error
}
public enum Command {
activate,
deactivate,
addInstanceTag,
addVirtualTopologyGroup,
expand,
enable,
disable,
enableMaintenanceMode,
disableMaintenanceMode,
enablePartitions,
disablePartitions,
update,
add,
delete,
stoppable,
rebalance,
reset,
resetPartitions,
removeInstanceTag,
addResource,
addWagedResource,
getResource,
validateWeight,
enableWagedRebalance,
enableWagedRebalanceForAllResources,
purgeOfflineParticipants,
getInstance,
getAllInstances,
setInstanceOperation, // TODO: Name is just a place holder, may change in future
onDemandRebalance
}
@Context
protected Application _application;
@Context
protected HttpServletRequest _servletRequest;
protected AuditLog.Builder _auditLogBuilder;
protected void addExceptionToAuditLog(Exception ex) {
if (_auditLogBuilder == null) {
_auditLogBuilder =
(AuditLog.Builder) _servletRequest.getAttribute(AuditLog.ATTRIBUTE_NAME);
}
_auditLogBuilder.addException(ex);
}
protected Response serverError() {
return Response.serverError().build();
}
protected Response serverError(String errorMsg) {
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(errorMsgToJson(errorMsg))
.build();
}
protected Response serverError(Exception ex) {
addExceptionToAuditLog(ex);
return Response.serverError().entity(errorMsgToJson(ex.getMessage())).build();
}
protected Response notFound() {
return Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).build();
}
protected Response notFound(String errorMsg) {
return Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN)
.entity(errorMsgToJson(errorMsg)).build();
}
protected Response OK(Object entity) {
return Response.ok(entity, MediaType.APPLICATION_JSON_TYPE).build();
}
protected Response OK() {
return Response.ok().build();
}
protected Response OKWithHeader(Object entity, String headerName, Object headerValue) {
if (headerName == null || headerName.length() == 0) {
return OK(entity);
} else {
return Response.ok(entity).header(headerName, headerValue).build();
}
}
protected Response created() {
return Response.status(Response.Status.CREATED).build();
}
protected Response badRequest(String errorMsg) {
return Response.status(Response.Status.BAD_REQUEST).entity(errorMsgToJson(errorMsg))
.type(MediaType.TEXT_PLAIN).build();
}
private String errorMsgToJson(String error) {
try {
Map<String, String> errorMap = new HashMap<>();
errorMap.put(Properties.error.name(), error);
return toJson(errorMap);
} catch (IOException e) {
_logger.error("Failed to convert " + error + " to JSON string", e);
return error;
}
}
protected Response JSONRepresentation(Object entity) {
return JSONRepresentation(entity, null, null);
}
/**
* Any metadata about the response could be conveyed through the entity headers.
* More details can be found at 'REST-API-Design-Rulebook' -- Ch4 Metadata Design
*/
protected Response JSONRepresentation(Object entity, String headerName, Object headerValue) {
try {
String jsonStr = toJson(entity);
return OKWithHeader(jsonStr, headerName, headerValue);
} catch (IOException e) {
_logger.error("Failed to convert " + entity + " to JSON response", e);
return serverError();
}
}
protected static ObjectMapper OBJECT_MAPPER = new ObjectMapper();
// Needs a separate object reader for ZNRecord annotated with Jackson 1
// TODO: remove AnnotationIntrospector config once ZNRecord upgrades Jackson
protected static ObjectReader ZNRECORD_READER = new ObjectMapper()
.setAnnotationIntrospector(new CodehausJacksonIntrospector())
.readerFor(ZNRecord.class);
protected static String toJson(Object object)
throws IOException {
OBJECT_MAPPER.enable(SerializationFeature.INDENT_OUTPUT);
StringWriter sw = new StringWriter();
OBJECT_MAPPER.writeValue(sw, object);
sw.append('\n');
return sw.toString();
}
protected Command getCommand(String commandStr) throws HelixException {
if (commandStr == null) {
throw new HelixException("Command string is null!");
}
try {
return Command.valueOf(commandStr);
} catch (IllegalArgumentException ex) {
throw new HelixException("Unknown command: " + commandStr);
}
}
protected String getNamespace() {
HelixRestNamespace namespace =
(HelixRestNamespace) _application.getProperties().get(ContextPropertyKeys.METADATA.name());
return namespace.getName();
}
}
| 9,351 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/metadatastore/MetadataStoreDirectoryAccessor.java
|
package org.apache.helix.rest.server.resources.metadatastore;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.stream.Collectors;
import javax.annotation.PostConstruct;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.common.HelixRestUtils;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.metadatastore.MetadataStoreDirectory;
import org.apache.helix.rest.metadatastore.ZkMetadataStoreDirectory;
import org.apache.helix.rest.metadatastore.datamodel.MetadataStoreShardingKey;
import org.apache.helix.rest.metadatastore.datamodel.MetadataStoreShardingKeysByRealm;
import org.apache.helix.rest.server.filters.NamespaceAuth;
import org.apache.helix.rest.server.resources.AbstractResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provides REST endpoints for accessing metadata store directory service,
* which responds to read/write requests of metadata store realms, sharding keys, etc..
*/
@NamespaceAuth
@Path("")
@Api (value = "", description = "Helix REST MetadataStoreDirectory APIs")
public class MetadataStoreDirectoryAccessor extends AbstractResource {
private static final Logger LOG = LoggerFactory.getLogger(MetadataStoreDirectoryAccessor.class);
private String _namespace;
protected MetadataStoreDirectory _metadataStoreDirectory;
@PostConstruct
private void postConstruct() {
HelixRestNamespace helixRestNamespace = getHelixNamespace();
_namespace = helixRestNamespace.getName();
buildMetadataStoreDirectory(_namespace, helixRestNamespace.getMetadataStoreAddress());
}
/**
* Gets all existing namespaces in the routing metadata store at endpoint:
* "GET /metadata-store-namespaces"
*
* @return Json response of all namespaces.
*/
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("/metadata-store-namespaces")
@ApiOperation (value = "Return list of namespaces", notes = "Helix REST MetadataStoreDirectory Get API")
public Response getAllNamespaces() {
Collection<String> namespaces = _metadataStoreDirectory.getAllNamespaces();
Map<String, Collection<String>> responseMap =
ImmutableMap.of(MetadataStoreRoutingConstants.METADATA_STORE_NAMESPACES, namespaces);
return JSONRepresentation(responseMap);
}
/**
* Gets all metadata store realms in a namespace at path: "GET /metadata-store-realms",
* or gets a metadata store realm with the sharding key at path:
* "GET /metadata-store-realms?sharding-key={sharding-key}"
*
* @return Json representation of all realms.
*/
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("/metadata-store-realms")
@ApiOperation (value = "Return all metadata store realms", notes = "Helix REST MetadataStoreDirectory Get API")
public Response getAllMetadataStoreRealms(@QueryParam("sharding-key") String shardingKey) {
try {
if (shardingKey == null) {
// Get all realms: "GET /metadata-store-realms"
Collection<String> realms = _metadataStoreDirectory.getAllMetadataStoreRealms(_namespace);
Map<String, Collection<String>> responseMap =
ImmutableMap.of(MetadataStoreRoutingConstants.METADATA_STORE_REALMS, realms);
return JSONRepresentation(responseMap);
}
// Get a single realm filtered by sharding key:
// "GET /metadata-store-realms?sharding-key={sharding-key}"
String realm = _metadataStoreDirectory.getMetadataStoreRealm(_namespace, shardingKey);
return JSONRepresentation(new MetadataStoreShardingKey(shardingKey, realm));
} catch (NoSuchElementException ex) {
return notFound(ex.getMessage());
} catch (IllegalArgumentException e) {
return badRequest(e.getMessage());
}
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("/metadata-store-realms/{realm}")
public Response addMetadataStoreRealm(@PathParam("realm") String realm) {
try {
if (!_metadataStoreDirectory.addMetadataStoreRealm(_namespace, realm)) {
return serverError();
}
} catch (IllegalArgumentException ex) {
return notFound(ex.getMessage());
}
return created();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("/metadata-store-realms/{realm}")
public Response deleteMetadataStoreRealm(@PathParam("realm") String realm) {
try {
if (!_metadataStoreDirectory.deleteMetadataStoreRealm(_namespace, realm)) {
return serverError();
}
} catch (IllegalArgumentException ex) {
return notFound(ex.getMessage());
}
return OK();
}
/**
* Gets all sharding keys for following requests:
* - "HTTP GET /sharding-keys" which returns all sharding keys in a namespace.
* - "HTTP GET /sharding-keys?prefix={prefix}" which returns sharding keys that have the prefix.
* -- JSON response example for this path:
* {
* "prefix": "/sharding/key",
* "shardingKeys": [{
* "realm": "testRealm2",
* "shardingKey": "/sharding/key/1/f"
* }, {
* "realm": "testRealm2",
* "shardingKey": "/sharding/key/1/e"
* }, {
* "realm": "testRealm1",
* "shardingKey": "/sharding/key/1/b"
* }, {
* "realm": "testRealm1",
* "shardingKey": "/sharding/key/1/a"
* }]
* }
*
* @param prefix Query param in endpoint path: prefix substring of sharding key.
* @return Json representation for the sharding keys.
*/
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("/sharding-keys")
@ApiOperation (value = "Return list of sharding-keys", notes = "Helix REST MetadataStoreDirectory Get Sharding Key API")
public Response getShardingKeys(@QueryParam("prefix") String prefix) {
try {
if (prefix == null) {
// For endpoint: "/sharding-keys" to get all sharding keys in a namespace.
return getAllShardingKeys();
}
// For endpoint: "/sharding-keys?prefix={prefix}"
return getAllShardingKeysUnderPath(prefix);
} catch (NoSuchElementException ex) {
return notFound(ex.getMessage());
} catch (IllegalArgumentException e) {
return badRequest(e.getMessage());
}
}
/**
* Gets routing data in current namespace.
*
* - "HTTP GET /routing-data"
* -- Response example:
* {
* "namespace" : "my-namespace",
* "routingData" : [ {
* "realm" : "realm-1",
* "shardingKeys" : [ "/sharding/key/1/d", "/sharding/key/1/e", "/sharding/key/1/f" ]
* }, {
* "realm" : "realm-2",
* "shardingKeys" : [ "/sharding/key/1/a", "/sharding/key/1/b", "/sharding/key/1/c" ]
* } ]
* }
*/
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@ApiOperation (value = "Get Routing Data", notes = "Helix REST MetadataStoreDirectory Get Routing Data API")
@Path("/routing-data")
public Response getRoutingData() {
Map<String, List<String>> rawRoutingData;
try {
rawRoutingData = _metadataStoreDirectory.getNamespaceRoutingData(_namespace);
} catch (NoSuchElementException ex) {
return notFound(ex.getMessage());
}
List<MetadataStoreShardingKeysByRealm> shardingKeysByRealm = rawRoutingData.entrySet().stream()
.map(entry -> new MetadataStoreShardingKeysByRealm(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
Map<String, Object> responseMap = ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_NAMESPACE, _namespace,
MetadataStoreRoutingConstants.ROUTING_DATA, shardingKeysByRealm);
return JSONRepresentation(responseMap);
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("/routing-data")
@Consumes(MediaType.APPLICATION_JSON)
public Response setRoutingData(String jsonContent) {
try {
Map<String, List<String>> routingData =
OBJECT_MAPPER.readValue(jsonContent, new TypeReference<HashMap<String, List<String>>>() {
});
if (!_metadataStoreDirectory.setNamespaceRoutingData(_namespace, routingData)) {
return serverError();
}
} catch (JsonMappingException | JsonParseException | IllegalArgumentException e) {
return badRequest(e.getMessage());
} catch (IOException e) {
return serverError(e);
}
return created();
}
/**
* Gets all path-based sharding keys for a queried realm at endpoint:
* "GET /metadata-store-realms/{realm}/sharding-keys"
* <p>
* "GET /metadata-store-realms/{realm}/sharding-keys?prefix={prefix}" is also supported,
* which is helpful when you want to check what sharding keys have the prefix substring.
*
* @param realm Queried metadata store realm to get sharding keys.
* @param prefix Query param in endpoint path: prefix substring of sharding key.
* @return All path-based sharding keys in the queried realm.
*/
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("/metadata-store-realms/{realm}/sharding-keys")
@ApiOperation (value = "Return all path-based sharding-keys ", notes = "Helix REST MetadataStoreDirectory Get API")
public Response getRealmShardingKeys(@PathParam("realm") String realm,
@QueryParam("prefix") String prefix) {
try {
if (prefix == null) {
return getAllShardingKeysInRealm(realm);
}
// For "GET /metadata-store-realms/{realm}/sharding-keys?prefix={prefix}"
return getRealmShardingKeysUnderPath(realm, prefix);
} catch (NoSuchElementException ex) {
return notFound(ex.getMessage());
} catch (IllegalArgumentException e) {
return badRequest(e.getMessage());
}
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("/metadata-store-realms/{realm}/sharding-keys/{sharding-key: .+}")
public Response addShardingKey(@PathParam("realm") String realm,
@PathParam("sharding-key") String shardingKey) {
shardingKey = "/" + shardingKey;
try {
if (!_metadataStoreDirectory.addShardingKey(_namespace, realm, shardingKey)) {
return serverError();
}
} catch (NoSuchElementException ex) {
return notFound(ex.getMessage());
} catch (IllegalArgumentException e) {
return badRequest(e.getMessage());
}
return created();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("/metadata-store-realms/{realm}/sharding-keys/{sharding-key: .+}")
public Response deleteShardingKey(@PathParam("realm") String realm,
@PathParam("sharding-key") String shardingKey) {
shardingKey = "/" + shardingKey;
try {
if (!_metadataStoreDirectory.deleteShardingKey(_namespace, realm, shardingKey)) {
return serverError();
}
} catch (IllegalArgumentException ex) {
return notFound(ex.getMessage());
}
return OK();
}
private HelixRestNamespace getHelixNamespace() {
HelixRestNamespace helixRestNamespace = null;
// A default servlet does not have context property key METADATA, so the namespace
// is retrieved from property ALL_NAMESPACES.
if (HelixRestUtils.isDefaultServlet(_servletRequest.getServletPath())) {
// It is safe to ignore uncheck warnings for this cast.
@SuppressWarnings("unchecked")
List<HelixRestNamespace> namespaces = (List<HelixRestNamespace>) _application.getProperties()
.get(ContextPropertyKeys.ALL_NAMESPACES.name());
for (HelixRestNamespace ns : namespaces) {
if (HelixRestNamespace.DEFAULT_NAMESPACE_NAME.equals(ns.getName())) {
helixRestNamespace = ns;
break;
}
}
} else {
// Get namespace from property METADATA for a common servlet.
helixRestNamespace = (HelixRestNamespace) _application.getProperties()
.get(ContextPropertyKeys.METADATA.name());
}
return helixRestNamespace;
}
protected void buildMetadataStoreDirectory(String namespace, String address) {
try {
_metadataStoreDirectory = ZkMetadataStoreDirectory.getInstance(namespace, address);
} catch (InvalidRoutingDataException ex) {
LOG.warn("Unable to create metadata store directory for namespace: {}, ZK address: {}",
namespace, address, ex);
}
}
private Response getAllShardingKeys() {
Collection<String> shardingKeys = _metadataStoreDirectory.getAllShardingKeys(_namespace);
Map<String, Object> responseMap = ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_NAMESPACE, _namespace,
MetadataStoreRoutingConstants.SHARDING_KEYS, shardingKeys);
return JSONRepresentation(responseMap);
}
private Response getAllShardingKeysInRealm(String realm) {
Collection<String> shardingKeys =
_metadataStoreDirectory.getAllShardingKeysInRealm(_namespace, realm);
Map<String, Object> responseMap = ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, realm,
MetadataStoreRoutingConstants.SHARDING_KEYS, shardingKeys);
return JSONRepresentation(responseMap);
}
private Response getAllShardingKeysUnderPath(String prefix) {
List<MetadataStoreShardingKey> shardingKeyList =
_metadataStoreDirectory.getAllMappingUnderPath(_namespace, prefix).entrySet().stream()
.map(entry -> new MetadataStoreShardingKey(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
Map<String, Object> responseMap = ImmutableMap
.of(MetadataStoreRoutingConstants.SHARDING_KEY_PATH_PREFIX, prefix,
MetadataStoreRoutingConstants.SHARDING_KEYS, shardingKeyList);
return JSONRepresentation(responseMap);
}
private Response getRealmShardingKeysUnderPath(String realm, String prefix) {
List<String> shardingKeyList =
_metadataStoreDirectory.getAllMappingUnderPath(_namespace, prefix).entrySet().stream()
.filter(entry -> entry.getValue().equals(realm)).map(Map.Entry::getKey)
.collect(Collectors.toList());
Map<String, Object> responseMap = ImmutableMap
.of(MetadataStoreRoutingConstants.SHARDING_KEY_PATH_PREFIX, prefix,
MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, realm,
MetadataStoreRoutingConstants.SHARDING_KEYS, shardingKeyList);
return JSONRepresentation(responseMap);
}
}
| 9,352 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/exceptions/HelixHealthException.java
|
package org.apache.helix.rest.server.resources.exceptions;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class HelixHealthException extends RuntimeException {
public HelixHealthException(String message) {
super(message);
}
public HelixHealthException(Throwable cause) {
super(cause);
}
public HelixHealthException(String message, Throwable cause) {
super(message, cause);
}
}
| 9,353 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/zookeeper/ZooKeeperAccessor.java
|
package org.apache.helix.rest.server.resources.zookeeper;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.google.common.base.Enums;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.AccessOption;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.manager.zk.ZKUtil;
import org.apache.helix.msdcommon.util.ZkValidationUtil;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.ServerContext;
import org.apache.helix.rest.server.filters.NamespaceAuth;
import org.apache.helix.rest.server.resources.AbstractResource;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
/**
* ZooKeeperAccessor provides methods for accessing ZooKeeper resources (ZNodes).
* It provides basic ZooKeeper features supported by ZkClient.
*/
@NamespaceAuth
@Path("/zookeeper{path: /.+}")
@Api (value = "", description = "Helix REST zookeeper APIs")
public class ZooKeeperAccessor extends AbstractResource {
private static final Logger LOG = LoggerFactory.getLogger(ZooKeeperAccessor.class.getName());
private BaseDataAccessor<byte[]> _zkBaseDataAccessor;
public enum ZooKeeperCommand {
exists,
getBinaryData,
getStringData,
getChildren,
getStat
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@ApiOperation (value = "Return ZNode", notes = "Helix REST Zookeeper Get API")
public Response get(@PathParam("path") String path, @QueryParam("command") String commandStr) {
ZooKeeperCommand cmd = getZooKeeperCommandIfPresent(commandStr);
if (cmd == null) {
return badRequest("Invalid ZooKeeper command: " + commandStr);
}
// Lazily initialize ZkBaseDataAccessor
ServerContext _serverContext =
(ServerContext) _application.getProperties().get(ContextPropertyKeys.SERVER_CONTEXT.name());
_zkBaseDataAccessor = _serverContext.getByteArrayZkBaseDataAccessor();
// Check that the path supplied is valid
if (!ZkValidationUtil.isPathValid(path)) {
String errMsg = "The given path is not a valid ZooKeeper path: " + path;
LOG.info(errMsg);
return badRequest(errMsg);
}
switch (cmd) {
case exists:
return exists(_zkBaseDataAccessor, path);
case getBinaryData:
return getBinaryData(_zkBaseDataAccessor, path);
case getStringData:
return getStringData(_zkBaseDataAccessor, path);
case getChildren:
return getChildren(_zkBaseDataAccessor, path);
case getStat:
return getStat(_zkBaseDataAccessor, path);
default:
String errMsg = "Unsupported command: " + commandStr;
LOG.error(errMsg);
return badRequest(errMsg);
}
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
public Response delete(@PathParam("path") String path) {
// Lazily initialize ZkBaseDataAccessor
ServerContext _serverContext =
(ServerContext) _application.getProperties().get(ContextPropertyKeys.SERVER_CONTEXT.name());
_zkBaseDataAccessor = _serverContext.getByteArrayZkBaseDataAccessor();
// Check that the path supplied is valid
if (!ZkValidationUtil.isPathValid(path)) {
String errMsg = "The given path is not a valid ZooKeeper path: " + path;
LOG.info(errMsg);
return badRequest(errMsg);
}
return delete(_zkBaseDataAccessor, path);
}
/**
* Checks if a ZNode exists in the given path.
* @param zkBaseDataAccessor
* @param path
* @return true if a ZNode exists, false otherwise
*/
private Response exists(BaseDataAccessor<byte[]> zkBaseDataAccessor, String path) {
Map<String, Boolean> result = ImmutableMap.of(ZooKeeperCommand.exists.name(),
zkBaseDataAccessor.exists(path, AccessOption.PERSISTENT));
return JSONRepresentation(result);
}
/**
* Returns a response containing the binary data and Stat.
* @param zkBaseDataAccessor
* @param path
* @return
*/
private Response getBinaryData(BaseDataAccessor<byte[]> zkBaseDataAccessor, String path) {
Stat stat = new Stat();
byte[] bytes = readBinaryDataFromZK(zkBaseDataAccessor, path, stat);
Map<String, Object> binaryResult = ImmutableMap
.of(ZooKeeperCommand.getBinaryData.name(), bytes, ZooKeeperCommand.getStat.name(),
ZKUtil.fromStatToMap(stat));
// Note: this serialization (using ObjectMapper) will convert this byte[] into
// a Base64 String! The REST client (user) must convert the resulting String back into
// a byte[] using Base64.
return JSONRepresentation(binaryResult);
}
/**
* Returns a response containing the string data and Stat.
* @param zkBaseDataAccessor
* @param path
* @return
*/
private Response getStringData(BaseDataAccessor<byte[]> zkBaseDataAccessor, String path) {
Stat stat = new Stat();
byte[] bytes = readBinaryDataFromZK(zkBaseDataAccessor, path, stat);
Map<String, Object> stringResult = ImmutableMap
.of(ZooKeeperCommand.getStringData.name(), new String(bytes),
ZooKeeperCommand.getStat.name(), ZKUtil.fromStatToMap(stat));
return JSONRepresentation(stringResult);
}
/**
* Returns byte[] from ZooKeeper.
* @param zkBaseDataAccessor
* @param path
* @return
*/
private byte[] readBinaryDataFromZK(BaseDataAccessor<byte[]> zkBaseDataAccessor, String path,
Stat stat) {
if (zkBaseDataAccessor.exists(path, AccessOption.PERSISTENT)) {
return zkBaseDataAccessor.get(path, stat, AccessOption.PERSISTENT);
} else {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND)
.entity(String.format("The ZNode at path %s does not exist!", path))
.type(MediaType.TEXT_PLAIN).build());
}
}
/**
* Returns a list of children ZNode names given the path for the parent ZNode.
* @param zkBaseDataAccessor
* @param path
* @return list of child ZNodes
*/
private Response getChildren(BaseDataAccessor<byte[]> zkBaseDataAccessor, String path) {
if (zkBaseDataAccessor.exists(path, AccessOption.PERSISTENT)) {
Map<String, List<String>> result = ImmutableMap.of(ZooKeeperCommand.getChildren.name(),
zkBaseDataAccessor.getChildNames(path, AccessOption.PERSISTENT));
return JSONRepresentation(result);
} else {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND)
.type(MediaType.TEXT_PLAIN)
.entity(String.format("The ZNode at path %s does not exist", path)).build());
}
}
/**
* Returns the ZNode Stat object given the path.
* @param zkBaseDataAccessor
* @param path
* @return
*/
private Response getStat(BaseDataAccessor<byte[]> zkBaseDataAccessor, String path) {
Stat stat = zkBaseDataAccessor.getStat(path, AccessOption.PERSISTENT);
if (stat == null) {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND)
.type(MediaType.TEXT_PLAIN)
.entity(String.format("The ZNode at path %s does not exist!", path)).build());
}
Map<String, String> result = ZKUtil.fromStatToMap(stat);
result.put("path", path);
return JSONRepresentation(result);
}
/**
* Delete the ZNode at the given path if exists.
* @param zkBaseDataAccessor
* @param path
* @return The delete result and the operated path.
*/
private Response delete(BaseDataAccessor zkBaseDataAccessor, String path) {
Stat stat = zkBaseDataAccessor.getStat(path, AccessOption.PERSISTENT);
if (stat == null) {
return notFound();
} else if (stat.getEphemeralOwner() <= 0) {
// TODO: Remove this restriction once we have audit and ACL for the API calls.
// TODO: This method is added pre-maturely to support removing the live instance of a zombie
// TODO: instance. It is risky to allow all deleting requests before audit and ACL are done.
throw new WebApplicationException(Response.status(Response.Status.FORBIDDEN)
.entity(String.format("Deleting a non-ephemeral node is not allowed.")).build());
}
if (zkBaseDataAccessor.remove(path, AccessOption.PERSISTENT)) {
return OK();
} else {
throw new WebApplicationException(Response.status(Response.Status.INTERNAL_SERVER_ERROR)
.entity(String.format("Failed to delete %s.", path)).build());
}
}
private ZooKeeperCommand getZooKeeperCommandIfPresent(String command) {
return Enums.getIfPresent(ZooKeeperCommand.class, command).orNull();
}
}
| 9,354 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/ResourceAssignmentOptimizerAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.security.InvalidParameterException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.controller.rebalancer.strategy.AutoRebalanceStrategy;
import org.apache.helix.controller.rebalancer.strategy.RebalanceStrategy;
import org.apache.helix.controller.rebalancer.waged.WagedRebalancer;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.ResourceAssignment;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.util.HelixUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ClusterAuth
@Path("/clusters/{clusterId}/partitionAssignment")
public class ResourceAssignmentOptimizerAccessor extends AbstractHelixResource {
private static Logger LOG = LoggerFactory.getLogger(
org.apache.helix.rest.server.resources.helix.ResourceAssignmentOptimizerAccessor.class
.getName());
public static String RESPONSE_HEADER_KEY = "Setting";
public static String[] RESPONSE_HEADER_FIELDS =
new String[]{"instanceFilter", "resourceFilter", "returnFormat"};
private static class InputFields {
Set<String> activatedInstances = new HashSet<>(); // active = online + enabled.
Set<String> deactivatedInstances = new HashSet<>(); // deactivate = offline + disabled.
Set<String> instanceConfigs = new HashSet<>(); // instance configs to be overriden.
Set<String> instanceFilter = new HashSet<>();
Set<String> resourceFilter = new HashSet<>();
AssignmentFormat returnFormat = AssignmentFormat.IdealStateFormat;
}
// TODO: We could add a data cache here to avoid read latency.
private static class ClusterState {
List<InstanceConfig> instanceConfigs = new ArrayList<>();
ClusterConfig clusterConfig;
List<String> resources = new ArrayList<>();
List<String> liveInstances; // cluster LiveInstance + activatedInstances - deactivatedInstances
}
// Result format. User can choose from IdealState or CurrentState format,
// IdealState format : Map of resource -> partition -> instance -> state. (default)
// CurrentState format : Map of instance -> resource -> partition -> state.
private static class AssignmentResult extends HashMap<String, Map<String, Map<String, String>>> {
public AssignmentResult() {
super();
}
}
private static class InputJsonContent {
@JsonProperty("InstanceChange")
InstanceChangeMap instanceChangeMap;
@JsonProperty("Options")
OptionsMap optionsMap;
}
private static class InstanceChangeMap {
@JsonProperty("InstanceConfigs")
JsonNode instanceConfigs;
@JsonProperty("ActivateInstances")
List<String> activateInstances;
@JsonProperty("DeactivateInstances")
List<String> deactivateInstances;
}
private enum AssignmentFormat {
IdealStateFormat,
CurrentStateFormat
}
private static class OptionsMap {
@JsonProperty("InstanceFilter")
Set<String> instanceFilter;
@JsonProperty("ResourceFilter")
Set<String> resourceFilter;
@JsonProperty("ReturnFormat")
AssignmentFormat returnFormat;
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
public Response computePotentialAssignment(@PathParam("clusterId") String clusterId,
String content) {
InputFields inputFields;
ClusterState clusterState;
AssignmentResult result;
try {
// 1. Try to parse the content string. If parseable, use it as a KV map. Otherwise, return a REASON String
inputFields = readInput(content);
// 2. Read cluster status from ZK.
clusterState = readClusterStateAndValidateInput(clusterId, inputFields);
// 3. Call rebalancer tools for each resource.
result = computeOptimalAssignmentForResources(inputFields, clusterState, clusterId);
// 4. Serialize result to JSON and return.
// TODO: We will need to include user input to response header since user may do async call.
return JSONRepresentation(result, RESPONSE_HEADER_KEY, buildResponseHeaders(inputFields));
} catch (InvalidParameterException ex) {
return badRequest(ex.getMessage());
} catch (JsonProcessingException e) {
return badRequest("Invalid input: Input can not be parsed into a KV map." + e.getMessage());
} catch (OutOfMemoryError e) {
LOG.error("OutOfMemoryError while calling partitionAssignment", e);
return badRequest(
"Response size is too large to serialize. Please query by resources or instance filter");
} catch (Exception e) {
LOG.error("Failed to compute partition assignment", e);
return badRequest("Failed to compute partition assignment: " + e);
}
}
private InputFields readInput(String content)
throws JsonProcessingException, IllegalArgumentException {
ObjectMapper objectMapper = new ObjectMapper();
InputJsonContent inputJsonContent = objectMapper.readValue(content, InputJsonContent.class);
InputFields inputFields = new InputFields();
if (inputJsonContent.instanceChangeMap != null) {
Optional.ofNullable(inputJsonContent.instanceChangeMap.activateInstances)
.ifPresent(inputFields.activatedInstances::addAll);
Optional.ofNullable(inputJsonContent.instanceChangeMap.deactivateInstances)
.ifPresent(inputFields.deactivatedInstances::addAll);
Optional.ofNullable(inputJsonContent.instanceChangeMap.instanceConfigs).ifPresent(
configs -> configs.forEach(
instanceConfig -> inputFields.instanceConfigs.add(instanceConfig.toString())));
}
if (inputJsonContent.optionsMap != null) {
Optional.ofNullable(inputJsonContent.optionsMap.resourceFilter)
.ifPresent(inputFields.resourceFilter::addAll);
Optional.ofNullable(inputJsonContent.optionsMap.instanceFilter)
.ifPresent(inputFields.instanceFilter::addAll);
inputFields.returnFormat = Optional.ofNullable(inputJsonContent.optionsMap.returnFormat)
.orElse(AssignmentFormat.IdealStateFormat);
}
return inputFields;
}
private ClusterState readClusterStateAndValidateInput(String clusterId, InputFields inputFields)
throws InvalidParameterException {
// One instance can only exist in one of the list in InstanceChange.
// Validate the intersection is empty.
validateNoIntxnInstanceChange(inputFields);
// Add instances to current liveInstances
ClusterState clusterState = new ClusterState();
ConfigAccessor cfgAccessor = getConfigAccessor();
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
clusterState.resources = dataAccessor.getChildNames(dataAccessor.keyBuilder().idealStates());
// Add existing live instances and new instances from user input to instances list.
Set<String> liveInstancesSet =
new HashSet<>(dataAccessor.getChildNames(dataAccessor.keyBuilder().liveInstances()));
liveInstancesSet.addAll(inputFields.activatedInstances);
liveInstancesSet.removeAll(inputFields.deactivatedInstances);
Map<String, InstanceConfig> instanceConfigMap =
dataAccessor.getChildValuesMap(dataAccessor.keyBuilder().instanceConfigs(), true);
// Override instance config with inputFields.instanceConfigs
for (String instanceConfig : inputFields.instanceConfigs) {
try {
InstanceConfig instanceConfigOverride = new InstanceConfig(toZNRecord(instanceConfig));
instanceConfigMap.put(instanceConfigOverride.getInstanceName(), instanceConfigOverride);
} catch (Exception e) {
throw new InvalidParameterException(
"instanceConfig: " + instanceConfig + "is not a valid instanceConfig");
}
}
// Read instance and cluster config.
// Throw exception if there is no instanceConfig for activatedInstances instance.
for (String instance : inputFields.activatedInstances) {
if (instanceConfigMap.containsKey(instance)) {
instanceConfigMap.get(instance).setInstanceEnabled(true);
} else {
throw new InvalidParameterException(
"instance: " + instance + "does not have instanceConfig");
}
}
for (String instance : inputFields.deactivatedInstances) {
if (instanceConfigMap.containsKey(instance)) {
instanceConfigMap.get(instance).setInstanceEnabled(false);
}
}
// We will not consider delayed rebalance. The current implementation of
// 'getIdealAssignmentForFullAuto' for ChrushEd resources will not consider delayed rebalancer
// but `getImmediateAssignmentForWagedFullAuto` will honor current timestamp and delayed
// rebalance window. We are disabling delayed rebalance for now. Could add a cluster option to
// honor delayed rebalance window in the future.
ClusterConfig clusterConfig = cfgAccessor.getClusterConfig(clusterId);
clusterConfig.setDelayRebalaceEnabled(false);
clusterState.clusterConfig = clusterConfig;
clusterState.liveInstances = new ArrayList<>(liveInstancesSet);
clusterState.instanceConfigs.addAll(instanceConfigMap.values());
return clusterState;
}
private AssignmentResult computeOptimalAssignmentForResources(InputFields inputFields,
ClusterState clusterState, String clusterId) throws Exception {
AssignmentResult result = new AssignmentResult();
// Iterate through resources, read resource level info and get potential assignment.
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
List<IdealState> wagedResourceIdealState = new ArrayList<>();
for (String resource : clusterState.resources) {
IdealState idealState =
dataAccessor.getProperty(dataAccessor.keyBuilder().idealStates(resource));
// Compute all Waged resources in a batch later.
if (idealState.getRebalancerClassName() != null && idealState.getRebalancerClassName()
.equals(WagedRebalancer.class.getName())) {
wagedResourceIdealState.add(idealState);
continue;
}
// For non Waged resources, we don't compute resources not in white list.
if (!inputFields.resourceFilter.isEmpty() && !inputFields.resourceFilter.contains(resource)) {
continue;
}
// Use getIdealAssignmentForFullAuto for FULL_AUTO resource.
Map<String, Map<String, String>> partitionAssignments;
if (idealState.getRebalanceMode() == IdealState.RebalanceMode.FULL_AUTO) {
String rebalanceStrategy = idealState.getRebalanceStrategy();
if (rebalanceStrategy == null || rebalanceStrategy
.equalsIgnoreCase(RebalanceStrategy.DEFAULT_REBALANCE_STRATEGY)) {
rebalanceStrategy = AutoRebalanceStrategy.class.getName();
}
partitionAssignments = new TreeMap<>(HelixUtil
.getIdealAssignmentForFullAuto(clusterState.clusterConfig, clusterState.instanceConfigs,
clusterState.liveInstances, idealState, new ArrayList<>(idealState.getPartitionSet()),
rebalanceStrategy));
instanceFilter(inputFields, partitionAssignments, resource, result);
} else if (idealState.getRebalanceMode() == IdealState.RebalanceMode.SEMI_AUTO) {
LOG.error(
"Resource" + resource + "is in SEMI_AUTO mode. Skip partition assignment computation.");
}
}
if (!wagedResourceIdealState.isEmpty()) {
computeWagedAssignmentResult(wagedResourceIdealState, inputFields, clusterState, clusterId,
result);
}
return updateAssignmentFormat(inputFields, result);
}
// IdealState format : Map of resource -> partition -> instance -> state. (default)
// CurrentState format : Map of instance -> resource -> partition -> state.
private AssignmentResult updateAssignmentFormat(InputFields inputFields,
AssignmentResult idealStateFormatResult) {
if (inputFields.returnFormat.equals(AssignmentFormat.CurrentStateFormat)) {
AssignmentResult currentStateFormatResult = new AssignmentResult();
idealStateFormatResult.forEach((resourceKey, partitionMap) -> partitionMap.forEach(
(partitionKey, instanceMap) -> instanceMap.forEach(
(instanceKey, instanceState) -> currentStateFormatResult
.computeIfAbsent(instanceKey, x -> new HashMap<>())
.computeIfAbsent(resourceKey, y -> new HashMap<>())
.put(partitionKey, instanceState))));
return currentStateFormatResult;
}
return idealStateFormatResult;
}
/*
* Return if there are instance exists in more than one lists in InstanceChangeMap.
*/
private void validateNoIntxnInstanceChange(InputFields inputFields) {
Set<String> tempSet = new HashSet<>();
List<Collection<String>> inputs = new ArrayList<>();
inputs.add(inputFields.activatedInstances);
inputs.add(inputFields.deactivatedInstances);
inputs.sort(Comparator.comparingInt(Collection::size));
for (int i = 0; i < inputs.size() - 1; ++i) {
for (String s : inputs.get(i)) {
if (!tempSet.add(s)) {
throw new InvalidParameterException("Invalid input: instance [" + s
+ "] exist in more than one field in InstanceChange.");
}
}
}
for (String s : inputs.get(inputs.size() - 1)) {
if (tempSet.contains(s)) {
throw new InvalidParameterException(
"Invalid input: instance [" + s + "] exist in more than one field in InstanceChange.");
}
}
}
private void computeWagedAssignmentResult(List<IdealState> wagedResourceIdealState,
InputFields inputFields, ClusterState clusterState, String clusterId,
AssignmentResult result) {
// If the cluster is in Maintenance mode, throw an exception
// TODO: we should return the partitionAssignment regardless of the cluster is in Maintenance
// mode or not
if (getHelixAdmin().isInMaintenanceMode(clusterId)) {
throw new UnsupportedOperationException(
"Can not query potential Assignment when cluster is in Maintenance mode.");
}
// Use getTargetAssignmentForWagedFullAuto for Waged resources.
ConfigAccessor cfgAccessor = getConfigAccessor();
List<ResourceConfig> wagedResourceConfigs = new ArrayList<>();
for (IdealState idealState : wagedResourceIdealState) {
ResourceConfig resourceConfig = cfgAccessor.getResourceConfig(clusterId, idealState.getResourceName());
if (resourceConfig != null) {
wagedResourceConfigs.add(resourceConfig);
}
}
Map<String, ResourceAssignment> wagedAssignmentResult;
wagedAssignmentResult = HelixUtil.getTargetAssignmentForWagedFullAuto(getZkBucketDataAccessor(),
new ZkBaseDataAccessor<>(getRealmAwareZkClient()), clusterState.clusterConfig,
clusterState.instanceConfigs, clusterState.liveInstances, wagedResourceIdealState,
wagedResourceConfigs);
// Convert ResourceAssignment to plain map.
for (Map.Entry<String, ResourceAssignment> wagedAssignment : wagedAssignmentResult.entrySet()) {
String resource = wagedAssignment.getKey();
if (!inputFields.resourceFilter.isEmpty() && !inputFields.resourceFilter.contains(resource)) {
continue;
}
Map<String, Map<String, String>> partitionAssignments = new TreeMap<>();
wagedAssignment.getValue().getMappedPartitions().forEach(partition -> partitionAssignments
.put(partition.getPartitionName(), wagedAssignment.getValue().getReplicaMap(partition)));
instanceFilter(inputFields, partitionAssignments, resource, result);
}
}
private void instanceFilter(InputFields inputFields,
Map<String, Map<String, String>> partitionAssignments, String resource,
AssignmentResult result) {
if (!inputFields.instanceFilter.isEmpty()) {
for (Iterator<Map.Entry<String, Map<String, String>>> partitionAssignmentIt =
partitionAssignments.entrySet().iterator(); partitionAssignmentIt.hasNext(); ) {
Map.Entry<String, Map<String, String>> partitionAssignment = partitionAssignmentIt.next();
Map<String, String> instanceStates = partitionAssignment.getValue();
// Only keep instance in instanceFilter
instanceStates.entrySet().removeIf(e ->
(!inputFields.instanceFilter.isEmpty() && !inputFields.instanceFilter
.contains(e.getKey())));
if (instanceStates.isEmpty()) {
partitionAssignmentIt.remove();
}
}
}
result.put(resource, partitionAssignments);
}
private Map<String, Object> buildResponseHeaders(InputFields inputFields) {
Map<String, Object> headers = new HashMap<>();
headers.put(RESPONSE_HEADER_FIELDS[0], inputFields.instanceFilter);
headers.put(RESPONSE_HEADER_FIELDS[1], inputFields.resourceFilter);
headers.put(RESPONSE_HEADER_FIELDS[2], inputFields.returnFormat.name());
return headers;
}
}
| 9,355 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/ClusterAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.helix.AccessOption;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.api.exceptions.HelixConflictException;
import org.apache.helix.api.status.ClusterManagementMode;
import org.apache.helix.api.status.ClusterManagementModeRequest;
import org.apache.helix.manager.zk.ZKUtil;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ControllerHistory;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.MaintenanceSignal;
import org.apache.helix.model.Message;
import org.apache.helix.model.RESTConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.rest.acl.AclRegister;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.rest.server.filters.NamespaceAuth;
import org.apache.helix.rest.server.json.cluster.ClusterTopology;
import org.apache.helix.rest.server.service.ClusterService;
import org.apache.helix.rest.server.service.ClusterServiceImpl;
import org.apache.helix.rest.server.service.VirtualTopologyGroupService;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
@Path("/clusters")
@Api (value = "", description = "Helix REST Clusters APIs")
public class ClusterAccessor extends AbstractHelixResource {
private static Logger LOG = LoggerFactory.getLogger(ClusterAccessor.class.getName());
public enum ClusterProperties {
controller,
instances,
liveInstances,
resources,
paused,
maintenance,
messages,
stateModelDefinitions,
clusters,
maintenanceSignal,
maintenanceHistory,
clusterName
}
@NamespaceAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@ApiOperation (value = "Return list of all clusters", notes = "Helix REST Cluster Get API")
public Response getClusters() {
HelixAdmin helixAdmin = getHelixAdmin();
List<String> clusters = helixAdmin.getClusters();
Map<String, List<String>> dataMap = new HashMap<>();
dataMap.put(ClusterProperties.clusters.name(), clusters);
return JSONRepresentation(dataMap);
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@ApiOperation (value = "Return information for particular cluster", notes = "Helix REST Cluster Get API")
@Path("{clusterId}")
public Response getClusterInfo(@PathParam("clusterId") String clusterId) {
if (!doesClusterExist(clusterId)) {
return notFound();
}
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
Map<String, Object> clusterInfo = new HashMap<>();
clusterInfo.put(Properties.id.name(), clusterId);
LiveInstance controller = dataAccessor.getProperty(keyBuilder.controllerLeader());
if (controller != null) {
clusterInfo.put(ClusterProperties.controller.name(), controller.getInstanceName());
} else {
clusterInfo.put(ClusterProperties.controller.name(), "No Lead Controller!");
}
boolean paused = dataAccessor.getBaseDataAccessor()
.exists(keyBuilder.pause().getPath(), AccessOption.PERSISTENT);
clusterInfo.put(ClusterProperties.paused.name(), paused);
boolean maintenance = getHelixAdmin().isInMaintenanceMode(clusterId);
clusterInfo.put(ClusterProperties.maintenance.name(), maintenance);
List<String> idealStates = dataAccessor.getChildNames(keyBuilder.idealStates());
clusterInfo.put(ClusterProperties.resources.name(), idealStates);
List<String> instances = dataAccessor.getChildNames(keyBuilder.instanceConfigs());
clusterInfo.put(ClusterProperties.instances.name(), instances);
List<String> liveInstances = dataAccessor.getChildNames(keyBuilder.liveInstances());
clusterInfo.put(ClusterProperties.liveInstances.name(), liveInstances);
return JSONRepresentation(clusterInfo);
}
@NamespaceAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{clusterId}")
public Response createCluster(@PathParam("clusterId") String clusterId,
@DefaultValue("false") @QueryParam("recreate") String recreate,
@DefaultValue("false") @QueryParam("addCloudConfig") String addCloudConfig,
String cloudConfigManifest) {
boolean recreateIfExists = Boolean.parseBoolean(recreate);
boolean cloudConfigIncluded = Boolean.parseBoolean(addCloudConfig);
ClusterSetup clusterSetup = getClusterSetup();
CloudConfig cloudConfig = null;
if (cloudConfigIncluded) {
ZNRecord record;
try {
record = toZNRecord(cloudConfigManifest);
cloudConfig = new CloudConfig.Builder(record).build();
} catch (IOException | HelixException e) {
String errMsg = "Failed to generate a valid CloudConfig from " + cloudConfigManifest;
LOG.error(errMsg, e);
return badRequest(errMsg + " Exception: " + e.getMessage());
}
}
try {
getAclRegister().createACL(_servletRequest);
} catch (Exception ex) {
LOG.error("Failed to create ACL for cluster {}. Exception: {}.", clusterId, ex);
return serverError(ex);
}
try {
clusterSetup.addCluster(clusterId, recreateIfExists, cloudConfig);
} catch (Exception ex) {
LOG.error("Failed to create cluster {}. Exception: {}.", clusterId, ex);
return serverError(ex);
}
return created();
}
@NamespaceAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{clusterId}")
public Response deleteCluster(@PathParam("clusterId") String clusterId) {
ClusterSetup clusterSetup = getClusterSetup();
try {
clusterSetup.deleteCluster(clusterId);
} catch (HelixException ex) {
LOG.info("Failed to delete cluster {}, cluster is still in use. Exception: {}.", clusterId,
ex);
return badRequest(ex.getMessage());
} catch (Exception ex) {
LOG.error("Failed to delete cluster {}. Exception: {}.", clusterId, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{clusterId}")
public Response updateCluster(@PathParam("clusterId") String clusterId,
@QueryParam("command") String commandStr, @QueryParam("superCluster") String superCluster,
@QueryParam("duration") Long duration, String content) {
Command command;
try {
command = getCommand(commandStr);
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
ClusterSetup clusterSetup = getClusterSetup();
HelixAdmin helixAdmin = getHelixAdmin();
switch (command) {
case activate:
if (superCluster == null) {
return badRequest("Super Cluster name is missing!");
}
try {
clusterSetup.activateCluster(clusterId, superCluster, true);
} catch (Exception ex) {
LOG.error("Failed to add cluster {} to super cluster {}.", clusterId, superCluster);
return serverError(ex);
}
break;
case deactivate:
if (superCluster == null) {
return badRequest("Super Cluster name is missing!");
}
try {
clusterSetup.activateCluster(clusterId, superCluster, false);
} catch (Exception ex) {
LOG.error("Failed to deactivate cluster {} from super cluster {}.", clusterId, superCluster);
return serverError(ex);
}
break;
case addVirtualTopologyGroup:
try {
addVirtualTopologyGroup(clusterId, content);
} catch (JsonProcessingException ex) {
LOG.error("Failed to parse json string: {}", content, ex);
return badRequest("Invalid payload json body: " + content);
} catch (IllegalArgumentException ex) {
LOG.error("Illegal input {} for command {}.", content, command, ex);
return badRequest(String.format("Illegal input %s for command %s", content, command));
} catch (Exception ex) {
LOG.error("Failed to add virtual topology group to cluster {}", clusterId, ex);
return serverError(ex);
}
break;
case expand:
try {
clusterSetup.expandCluster(clusterId);
} catch (Exception ex) {
LOG.error("Failed to expand cluster {}.", clusterId);
return serverError(ex);
}
break;
case enable:
try {
helixAdmin.enableCluster(clusterId, true);
} catch (Exception ex) {
LOG.error("Failed to enable cluster {}.", clusterId);
return serverError(ex);
}
break;
case disable:
try {
helixAdmin.enableCluster(clusterId, false);
} catch (Exception ex) {
LOG.error("Failed to disable cluster {}.", clusterId);
return serverError(ex);
}
break;
case enableMaintenanceMode:
case disableMaintenanceMode:
// Try to parse the content string. If parseable, use it as a KV mapping. Otherwise, treat it
// as a REASON String
Map<String, String> customFieldsMap = null;
try {
// Try to parse content
customFieldsMap =
OBJECT_MAPPER.readValue(content, new TypeReference<HashMap<String, String>>() {
});
// content is given as a KV mapping. Nullify content unless (case-insensitive) reason key present in map
content = null;
for (Map.Entry<String, String> entry : customFieldsMap.entrySet()) {
if ("reason".equalsIgnoreCase(entry.getKey())) {
content = entry.getValue();
}
}
} catch (Exception e) {
// NOP
}
helixAdmin
.manuallyEnableMaintenanceMode(clusterId, command == Command.enableMaintenanceMode,
content, customFieldsMap);
break;
case enableWagedRebalanceForAllResources:
// Enable WAGED rebalance for all resources in the cluster
List<String> resources = helixAdmin.getResourcesInCluster(clusterId);
try {
helixAdmin.enableWagedRebalance(clusterId, resources);
} catch (HelixException e) {
return badRequest(e.getMessage());
}
break;
case purgeOfflineParticipants:
if (duration == null || duration < 0) {
helixAdmin
.purgeOfflineInstances(clusterId, ClusterConfig.OFFLINE_DURATION_FOR_PURGE_NOT_SET);
} else {
helixAdmin.purgeOfflineInstances(clusterId, duration);
}
break;
case onDemandRebalance:
try {
helixAdmin.onDemandRebalance(clusterId);
} catch (Exception ex) {
LOG.error(
"Cannot start on-demand rebalance for cluster: {}, Exception: {}", clusterId, ex);
return serverError(ex);
}
break;
default:
return badRequest("Unsupported command {}." + command);
}
return OK();
}
private void addVirtualTopologyGroup(String clusterId, String content) throws JsonProcessingException {
ClusterService clusterService = new ClusterServiceImpl(getDataAccssor(clusterId), getConfigAccessor());
VirtualTopologyGroupService service = new VirtualTopologyGroupService(
getHelixAdmin(), clusterService, getConfigAccessor(), getDataAccssor(clusterId));
Map<String, String> customFieldsMap =
OBJECT_MAPPER.readValue(content, new TypeReference<HashMap<String, String>>() { });
service.addVirtualTopologyGroup(clusterId, customFieldsMap);
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/management-mode")
public Response getClusterManagementMode(@PathParam("clusterId") String clusterId,
@QueryParam("showDetails") boolean showDetails) {
ClusterManagementMode mode = getHelixAdmin().getClusterManagementMode(clusterId);
if (mode == null) {
return notFound("Cluster " + clusterId + " is not in management mode");
}
Map<String, Object> responseMap = new HashMap<>();
responseMap.put("cluster", clusterId);
responseMap.put("mode", mode.getMode());
responseMap.put("status", mode.getStatus());
if (showDetails) {
// To show details, query participants that are in progress to management mode.
responseMap.put("details", getManagementModeDetails(clusterId, mode));
}
return JSONRepresentation(responseMap);
}
private Map<String, Object> getManagementModeDetails(String clusterId,
ClusterManagementMode mode) {
Map<String, Object> details = new HashMap<>();
Map<String, Object> participantDetails = new HashMap<>();
ClusterManagementMode.Status status = mode.getStatus();
details.put("cluster", ImmutableMap.of("cluster", clusterId, "status", status.name()));
boolean hasPendingST = false;
Set<String> liveInstancesInProgress = new HashSet<>();
if (ClusterManagementMode.Status.IN_PROGRESS.equals(status)) {
HelixDataAccessor accessor = getDataAccssor(clusterId);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
List<LiveInstance> liveInstances = accessor.getChildValues(keyBuilder.liveInstances());
BaseDataAccessor<ZNRecord> baseAccessor = accessor.getBaseDataAccessor();
if (ClusterManagementMode.Type.CLUSTER_FREEZE.equals(mode.getMode())) {
// Entering cluster freeze mode, check live instance freeze status and pending ST
for (LiveInstance liveInstance : liveInstances) {
String instanceName = liveInstance.getInstanceName();
if (!LiveInstance.LiveInstanceStatus.FROZEN.equals(liveInstance.getStatus())) {
liveInstancesInProgress.add(instanceName);
}
Stat stat = baseAccessor
.getStat(keyBuilder.messages(instanceName).getPath(), AccessOption.PERSISTENT);
if (stat.getNumChildren() > 0) {
hasPendingST = true;
liveInstancesInProgress.add(instanceName);
}
}
} else if (ClusterManagementMode.Type.NORMAL.equals(mode.getMode())) {
// Exiting freeze mode, check live instance unfreeze status
for (LiveInstance liveInstance : liveInstances) {
if (LiveInstance.LiveInstanceStatus.FROZEN.equals(liveInstance.getStatus())) {
liveInstancesInProgress.add(liveInstance.getInstanceName());
}
}
}
}
participantDetails.put("status", status.name());
participantDetails.put("liveInstancesInProgress", liveInstancesInProgress);
if (ClusterManagementMode.Type.CLUSTER_FREEZE.equals(mode.getMode())) {
// Add pending ST result for cluster freeze mode
participantDetails.put("hasPendingStateTransition", hasPendingST);
}
details.put(ClusterProperties.liveInstances.name(), participantDetails);
return details;
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{clusterId}/management-mode")
public Response updateClusterManagementMode(@PathParam("clusterId") String clusterId,
@DefaultValue("{}") String content) {
ClusterManagementModeRequest request;
try {
request = OBJECT_MAPPER.readerFor(ClusterManagementModeRequest.class).readValue(content);
} catch (JsonProcessingException e) {
LOG.warn("Failed to parse json string: {}", content, e);
return badRequest("Invalid payload json body: " + content);
}
// Need to add cluster name
request = ClusterManagementModeRequest.newBuilder()
.withClusterName(clusterId)
.withMode(request.getMode())
.withCancelPendingST(request.isCancelPendingST())
.withReason(request.getReason())
.build();
try {
getHelixAdmin().setClusterManagementMode(request);
} catch (HelixConflictException e) {
return Response.status(Response.Status.CONFLICT).entity(e.getMessage()).build();
} catch (HelixException e) {
return serverError(e.getMessage());
}
return JSONRepresentation(ImmutableMap.of("acknowledged", true));
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/configs")
public Response getClusterConfig(@PathParam("clusterId") String clusterId) {
ConfigAccessor accessor = getConfigAccessor();
ClusterConfig config = null;
try {
config = accessor.getClusterConfig(clusterId);
} catch (HelixException ex) {
// cluster not found.
LOG.info("Failed to get cluster config for cluster {}, cluster not found. Exception: {}.",
clusterId, ex);
} catch (Exception ex) {
LOG.error("Failed to get cluster config for cluster {}. Exception: {}", clusterId, ex);
return serverError(ex);
}
if (config == null) {
return notFound();
}
return JSONRepresentation(config.getRecord());
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{clusterId}/customized-state-config")
public Response addCustomizedStateConfig(@PathParam("clusterId") String clusterId,
String content) {
if (!doesClusterExist(clusterId)) {
return notFound(String.format("Cluster %s does not exist", clusterId));
}
HelixAdmin admin = getHelixAdmin();
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
return badRequest("Input is not a vaild ZNRecord!");
}
try {
CustomizedStateConfig customizedStateConfig =
new CustomizedStateConfig.Builder(record).build();
admin.addCustomizedStateConfig(clusterId, customizedStateConfig);
} catch (Exception ex) {
LOG.error("Cannot add CustomizedStateConfig to cluster: {} Exception: {}",
clusterId, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{clusterId}/customized-state-config")
public Response removeCustomizedStateConfig(@PathParam("clusterId") String clusterId) {
if (!doesClusterExist(clusterId)) {
return notFound(String.format("Cluster %s does not exist", clusterId));
}
HelixAdmin admin = getHelixAdmin();
try {
admin.removeCustomizedStateConfig(clusterId);
} catch (Exception ex) {
LOG.error(
"Cannot remove CustomizedStateConfig from cluster: {}, Exception: {}",
clusterId, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/customized-state-config")
public Response getCustomizedStateConfig(@PathParam("clusterId") String clusterId) {
if (!doesClusterExist(clusterId)) {
return notFound(String.format("Cluster %s does not exist", clusterId));
}
ConfigAccessor configAccessor = getConfigAccessor();
CustomizedStateConfig customizedStateConfig =
configAccessor.getCustomizedStateConfig(clusterId);
if (customizedStateConfig != null) {
return JSONRepresentation(customizedStateConfig.getRecord());
}
return notFound();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{clusterId}/customized-state-config")
public Response updateCustomizedStateConfig(@PathParam("clusterId") String clusterId,
@QueryParam("command") String commandStr, @QueryParam("type") String type) {
if (!doesClusterExist(clusterId)) {
return notFound(String.format("Cluster %s does not exist", clusterId));
}
Command command;
if (commandStr == null || commandStr.isEmpty()) {
command = Command.add; // Default behavior
} else {
try {
command = getCommand(commandStr);
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
}
HelixAdmin admin = getHelixAdmin();
try {
switch (command) {
case delete:
admin.removeTypeFromCustomizedStateConfig(clusterId, type);
break;
case add:
admin.addTypeToCustomizedStateConfig(clusterId, type);
break;
default:
return badRequest("Unsupported command " + commandStr);
}
} catch (Exception ex) {
LOG.error("Failed to {} CustomizedStateConfig for cluster {} new type: {}, Exception: {}", command, clusterId, type, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/topology")
public Response getClusterTopology(@PathParam("clusterId") String clusterId) throws IOException {
//TODO reduce the GC by dependency injection
ClusterService clusterService =
new ClusterServiceImpl(getDataAccssor(clusterId), getConfigAccessor());
ObjectMapper objectMapper = new ObjectMapper();
ClusterTopology clusterTopology = clusterService.getClusterTopology(clusterId);
return OK(objectMapper.writeValueAsString(clusterTopology));
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/topologymap")
public Response getClusterTopologyMap(@PathParam("clusterId") String clusterId) {
HelixAdmin admin = getHelixAdmin();
Map<String, List<String>> topologyMap;
try {
topologyMap = admin.getClusterTopology(clusterId).getTopologyMap();
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
return JSONRepresentation(topologyMap);
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/faultzonemap")
public Response getClusterFaultZoneMap(@PathParam("clusterId") String clusterId) {
HelixAdmin admin = getHelixAdmin();
Map<String, List<String>> faultZoneMap;
try {
faultZoneMap = admin.getClusterTopology(clusterId).getFaultZoneMap();
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
return JSONRepresentation(faultZoneMap);
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{clusterId}/configs")
public Response updateClusterConfig(@PathParam("clusterId") String clusterId,
@QueryParam("command") String commandStr, String content) {
Command command;
try {
command = getCommand(commandStr);
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input {}. Exception: {}.", content, e);
return badRequest("Input is not a valid ZNRecord!");
}
if (!clusterId.equals(record.getId())) {
return badRequest("ID does not match the cluster name in input!");
}
ClusterConfig config = new ClusterConfig(record);
ConfigAccessor configAccessor = getConfigAccessor();
try {
switch (command) {
case update:
configAccessor.updateClusterConfig(clusterId, config);
break;
case delete: {
HelixConfigScope clusterScope =
new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.CLUSTER)
.forCluster(clusterId).build();
configAccessor.remove(clusterScope, config.getRecord());
}
break;
default:
return badRequest("Unsupported command " + commandStr);
}
} catch (HelixException ex) {
return notFound(ex.getMessage());
} catch (Exception ex) {
LOG
.error("Failed to {} cluster config, cluster {}, new config: {}. Exception: {}.", command,
clusterId, content, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/controller")
public Response getClusterController(@PathParam("clusterId") String clusterId) {
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
Map<String, Object> controllerInfo = new HashMap<>();
controllerInfo.put(Properties.id.name(), clusterId);
LiveInstance leader = dataAccessor.getProperty(dataAccessor.keyBuilder().controllerLeader());
if (leader != null) {
controllerInfo.put(ClusterProperties.controller.name(), leader.getInstanceName());
controllerInfo.putAll(leader.getRecord().getSimpleFields());
} else {
controllerInfo.put(ClusterProperties.controller.name(), "No Lead Controller!");
}
return JSONRepresentation(controllerInfo);
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/controller/history")
public Response getClusterControllerLeadershipHistory(@PathParam("clusterId") String clusterId) {
return JSONRepresentation(
getControllerHistory(clusterId, ControllerHistory.HistoryType.CONTROLLER_LEADERSHIP));
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/controller/maintenanceHistory")
public Response getClusterMaintenanceHistory(@PathParam("clusterId") String clusterId) {
return JSONRepresentation(
getControllerHistory(clusterId, ControllerHistory.HistoryType.MAINTENANCE));
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/controller/maintenanceSignal")
public Response getClusterMaintenanceSignal(@PathParam("clusterId") String clusterId) {
boolean inMaintenanceMode = getHelixAdmin().isInMaintenanceMode(clusterId);
if (inMaintenanceMode) {
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
MaintenanceSignal maintenanceSignal = dataAccessor.getProperty(dataAccessor.keyBuilder().maintenance());
Map<String, String> maintenanceInfo = (maintenanceSignal != null) ?
maintenanceSignal.getRecord().getSimpleFields() : new HashMap<>();
maintenanceInfo.put(ClusterProperties.clusterName.name(), clusterId);
return JSONRepresentation(maintenanceInfo);
}
return notFound(String.format("Cluster %s is not in maintenance mode!", clusterId));
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/controller/messages")
public Response getClusterControllerMessages(@PathParam("clusterId") String clusterId) {
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
Map<String, Object> controllerMessages = new HashMap<>();
controllerMessages.put(Properties.id.name(), clusterId);
List<String> messages =
dataAccessor.getChildNames(dataAccessor.keyBuilder().controllerMessages());
controllerMessages.put(ClusterProperties.messages.name(), messages);
controllerMessages.put(Properties.count.name(), messages.size());
return JSONRepresentation(controllerMessages);
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/controller/messages/{messageId}")
public Response getClusterControllerMessages(@PathParam("clusterId") String clusterId,
@PathParam("messageId") String messageId) {
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
Message message =
dataAccessor.getProperty(dataAccessor.keyBuilder().controllerMessage(messageId));
return JSONRepresentation(message.getRecord());
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/statemodeldefs")
public Response getClusterStateModelDefinitions(@PathParam("clusterId") String clusterId) {
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
List<String> stateModelDefs =
dataAccessor.getChildNames(dataAccessor.keyBuilder().stateModelDefs());
Map<String, Object> clusterStateModelDefs = new HashMap<>();
clusterStateModelDefs.put(Properties.id.name(), clusterId);
clusterStateModelDefs.put(ClusterProperties.stateModelDefinitions.name(), stateModelDefs);
return JSONRepresentation(clusterStateModelDefs);
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/statemodeldefs/{statemodel}")
public Response getClusterStateModelDefinition(@PathParam("clusterId") String clusterId,
@PathParam("statemodel") String statemodel) {
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
StateModelDefinition stateModelDef =
dataAccessor.getProperty(dataAccessor.keyBuilder().stateModelDef(statemodel));
if (stateModelDef == null) {
return badRequest("Statemodel not found!");
}
return JSONRepresentation(stateModelDef.getRecord());
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{clusterId}/statemodeldefs/{statemodel}")
public Response createClusterStateModelDefinition(@PathParam("clusterId") String clusterId,
@PathParam("statemodel") String statemodel, String content) {
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input {}. Exception: {}.", content, e);
return badRequest("Input is not a valid ZNRecord!");
}
RealmAwareZkClient zkClient = getRealmAwareZkClient();
String path = PropertyPathBuilder.stateModelDef(clusterId);
try {
ZKUtil.createChildren(zkClient, path, record);
} catch (Exception e) {
LOG.error("Failed to create zk node with path {}. Exception: {}", path, e);
return badRequest("Failed to create a Znode for stateModel! " + e);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{clusterId}/statemodeldefs/{statemodel}")
public Response setClusterStateModelDefinition(@PathParam("clusterId") String clusterId,
@PathParam("statemodel") String statemodel, String content) {
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input {}. Exception: {}.", content, e);
return badRequest("Input is not a valid ZNRecord!");
}
StateModelDefinition stateModelDefinition = new StateModelDefinition(record);
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
PropertyKey key = dataAccessor.keyBuilder().stateModelDef(stateModelDefinition.getId());
boolean retcode = true;
try {
retcode = dataAccessor.setProperty(key, stateModelDefinition);
} catch (Exception e) {
LOG.error("Failed to set StateModelDefinition key: {}. Exception: {}.", key, e);
return badRequest("Failed to set the content " + content);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{clusterId}/statemodeldefs/{statemodel}")
public Response removeClusterStateModelDefinition(@PathParam("clusterId") String clusterId,
@PathParam("statemodel") String statemodel) {
//Shall we validate the statemodel string not having special character such as ../ etc?
if (!StringUtils.isAlphanumeric(statemodel)) {
return badRequest("Invalid statemodel name!");
}
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
PropertyKey key = dataAccessor.keyBuilder().stateModelDef(statemodel);
boolean retcode = true;
try {
retcode = dataAccessor.removeProperty(key);
} catch (Exception e) {
LOG.error("Failed to remove StateModelDefinition key: {}. Exception: {}.", key, e);
retcode = false;
}
if (!retcode) {
return badRequest("Failed to remove!");
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{clusterId}/restconfig")
public Response createRESTConfig(@PathParam("clusterId") String clusterId,
String content) {
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input {}. Exception: {}.", content, e);
return badRequest("Input is not a valid ZNRecord!");
}
if (!record.getId().equals(clusterId)) {
return badRequest("ID does not match the cluster name in input!");
}
RESTConfig config = new RESTConfig(record);
ConfigAccessor configAccessor = getConfigAccessor();
try {
configAccessor.setRESTConfig(clusterId, config);
} catch (HelixException ex) {
// TODO: Could use a more generic error for HelixException
return notFound(ex.getMessage());
} catch (Exception ex) {
LOG.error("Failed to create rest config, cluster {}, new config: {}. Exception: {}.", clusterId, content, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{clusterId}/restconfig")
public Response updateRESTConfig(@PathParam("clusterId") String clusterId,
@QueryParam("command") String commandStr, String content) {
//TODO: abstract out the logic that is duplicated from cluster config methods
Command command;
try {
command = getCommand(commandStr);
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input {}. Exception: {}", content, e);
return badRequest("Input is not a valid ZNRecord!");
}
RESTConfig config = new RESTConfig(record);
ConfigAccessor configAccessor = getConfigAccessor();
try {
switch (command) {
case update:
configAccessor.updateRESTConfig(clusterId, config);
break;
case delete: {
HelixConfigScope scope =
new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.REST)
.forCluster(clusterId).build();
configAccessor.remove(scope, config.getRecord());
}
break;
default:
return badRequest("Unsupported command " + commandStr);
}
} catch (HelixException ex) {
return notFound(ex.getMessage());
} catch (Exception ex) {
LOG.error(
"Failed to {} rest config, cluster {}, new config: {}. Exception: {}", command, clusterId, content, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/restconfig")
public Response getRESTConfig(@PathParam("clusterId") String clusterId) {
ConfigAccessor accessor = getConfigAccessor();
RESTConfig config = null;
try {
config = accessor.getRESTConfig(clusterId);
} catch (HelixException ex) {
LOG.info(
"Failed to get rest config for cluster {}, cluster not found. Exception: {}.", clusterId, ex);
} catch (Exception ex) {
LOG.error("Failed to get rest config for cluster {}. Exception: {}.", clusterId, ex);
return serverError(ex);
}
if (config == null) {
return notFound();
}
return JSONRepresentation(config.getRecord());
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{clusterId}/restconfig")
public Response deleteRESTConfig(@PathParam("clusterId") String clusterId) {
ConfigAccessor accessor = getConfigAccessor();
try {
accessor.deleteRESTConfig(clusterId);
} catch (HelixException ex) {
LOG.info("Failed to delete rest config for cluster {}, cluster rest config is not found. Exception: {}.", clusterId, ex);
return notFound(ex.getMessage());
} catch (Exception ex) {
LOG.error("Failed to delete rest config, cluster {}, Exception: {}.", clusterId, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/maintenance")
public Response getClusterMaintenanceMode(@PathParam("clusterId") String clusterId) {
return JSONRepresentation(ImmutableMap
.of(ClusterProperties.maintenance.name(), getHelixAdmin().isInMaintenanceMode(clusterId)));
}
private boolean doesClusterExist(String cluster) {
RealmAwareZkClient zkClient = getRealmAwareZkClient();
return ZKUtil.isClusterSetup(cluster, zkClient);
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{clusterId}/cloudconfig")
public Response addCloudConfig(@PathParam("clusterId") String clusterId, String content) {
RealmAwareZkClient zkClient = getRealmAwareZkClient();
if (!ZKUtil.isClusterSetup(clusterId, zkClient)) {
return notFound("Cluster is not properly setup!");
}
HelixAdmin admin = getHelixAdmin();
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input " + content + ", Exception: " + e);
return badRequest("Input is not a vaild ZNRecord!");
}
try {
CloudConfig cloudConfig = new CloudConfig.Builder(record).build();
admin.addCloudConfig(clusterId, cloudConfig);
} catch (HelixException ex) {
LOG.error("Error in adding a CloudConfig to cluster: " + clusterId, ex);
return badRequest(ex.getMessage());
} catch (Exception ex) {
LOG.error("Cannot add CloudConfig to cluster: " + clusterId, ex);
return serverError(ex);
}
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{clusterId}/cloudconfig")
public Response getCloudConfig(@PathParam("clusterId") String clusterId) {
RealmAwareZkClient zkClient = getRealmAwareZkClient();
if (!ZKUtil.isClusterSetup(clusterId, zkClient)) {
return notFound();
}
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
CloudConfig cloudConfig = configAccessor.getCloudConfig(clusterId);
if (cloudConfig != null) {
return JSONRepresentation(cloudConfig.getRecord());
}
return notFound();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{clusterId}/cloudconfig")
public Response deleteCloudConfig(@PathParam("clusterId") String clusterId) {
HelixAdmin admin = getHelixAdmin();
admin.removeCloudConfig(clusterId);
return OK();
}
@ClusterAuth
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{clusterId}/cloudconfig")
public Response updateCloudConfig(@PathParam("clusterId") String clusterId,
@QueryParam("command") String commandStr, String content) {
RealmAwareZkClient zkClient = getRealmAwareZkClient();
if (!ZKUtil.isClusterSetup(clusterId, zkClient)) {
return notFound();
}
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
// Here to update cloud config
Command command;
if (commandStr == null || commandStr.isEmpty()) {
command = Command.update; // Default behavior
} else {
try {
command = getCommand(commandStr);
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
}
ZNRecord record;
CloudConfig cloudConfig;
try {
record = toZNRecord(content);
cloudConfig = new CloudConfig(record);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input " + content + ", Exception: " + e);
return badRequest("Input is not a vaild ZNRecord!");
}
try {
switch (command) {
case delete: {
configAccessor.deleteCloudConfigFields(clusterId, cloudConfig);
}
break;
case update: {
try {
configAccessor.updateCloudConfig(clusterId, cloudConfig);
} catch (HelixException ex) {
LOG.error("Error in updating a CloudConfig to cluster: " + clusterId, ex);
return badRequest(ex.getMessage());
} catch (Exception ex) {
LOG.error("Cannot update CloudConfig for cluster: " + clusterId, ex);
return serverError(ex);
}
}
break;
default:
return badRequest("Unsupported command " + commandStr);
}
} catch (Exception ex) {
LOG.error("Failed to " + command + " cloud config, cluster " + clusterId + " new config: "
+ content + ", Exception: " + ex);
return serverError(ex);
}
return OK();
}
/**
* Reads HISTORY ZNode from the metadata store and generates a Map object that contains the
* pertinent history entries depending on the history type.
* @param clusterId
* @param historyType
* @return
*/
private Map<String, Object> getControllerHistory(String clusterId,
ControllerHistory.HistoryType historyType) {
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
Map<String, Object> history = new HashMap<>();
history.put(Properties.id.name(), clusterId);
ControllerHistory historyRecord =
dataAccessor.getProperty(dataAccessor.keyBuilder().controllerLeaderHistory());
switch (historyType) {
case CONTROLLER_LEADERSHIP:
history.put(Properties.history.name(),
historyRecord != null ? historyRecord.getHistoryList() : Collections.emptyList());
break;
case MAINTENANCE:
history.put(ClusterProperties.maintenanceHistory.name(),
historyRecord != null ? historyRecord.getMaintenanceHistoryList()
: Collections.emptyList());
break;
}
return history;
}
private AclRegister getAclRegister() {
return (AclRegister) _application.getProperties().get(ContextPropertyKeys.ACL_REGISTER.name());
}
}
| 9,356 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/PropertyStoreAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.helix.AccessOption;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.msdcommon.util.ZkValidationUtil;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ClusterAuth
@Path("/clusters/{clusterId}/propertyStore")
public class PropertyStoreAccessor extends AbstractHelixResource {
private static Logger LOG = LoggerFactory.getLogger(PropertyStoreAccessor.class);
private static final String CONTENT_KEY = "content";
private static final ZNRecordSerializer ZN_RECORD_SERIALIZER = new ZNRecordSerializer();
/**
* Sample HTTP URLs:
* http://<HOST>/clusters/{clusterId}/propertyStore/<PATH>
* It refers to the /PROPERTYSTORE/<PATH> in Helix metadata store
* @param clusterId The cluster Id
* @param path path parameter is like "abc/abc/abc" in the URL
* @return If the payload is ZNRecord format, return ZnRecord json response;
* Otherwise, return json object {<PATH>: raw string}
*/
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{path: .+}")
public Response getPropertyByPath(@PathParam("clusterId") String clusterId,
@PathParam("path") String path) {
path = "/" + path;
if (!ZkValidationUtil.isPathValid(path)) {
LOG.info("The propertyStore path {} is invalid for cluster {}", path, clusterId);
return badRequest(
"Invalid path string. Valid path strings use slash as the directory separator and names the location of ZNode");
}
final String recordPath = PropertyPathBuilder.propertyStore(clusterId) + path;
BaseDataAccessor<byte[]> propertyStoreDataAccessor = getByteArrayDataAccessor();
if (!propertyStoreDataAccessor.exists(recordPath, AccessOption.PERSISTENT)) {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND)
.type(MediaType.TEXT_PLAIN)
.entity(String.format("The property store path %s doesn't exist", recordPath))
.build());
}
byte[] bytes = propertyStoreDataAccessor.get(recordPath, null, AccessOption.PERSISTENT);
if (bytes == null) {
throw new WebApplicationException(Response.status(Response.Status.NO_CONTENT).build());
}
ZNRecord znRecord = (ZNRecord) ZN_RECORD_SERIALIZER.deserialize(bytes);
// The ZNRecordSerializer returns null when exception occurs in deserialization method
if (znRecord == null) {
// If the zk node cannot be deserialized, return the content directly.
ObjectNode jsonNode = OBJECT_MAPPER.createObjectNode();
jsonNode.put(CONTENT_KEY, new String(bytes));
return JSONRepresentation(jsonNode);
} else {
return JSONRepresentation(znRecord);
}
}
/**
* Sample HTTP URLs:
* http://<HOST>/clusters/{clusterId}/propertyStore/<PATH>
* It refers to the /PROPERTYSTORE/<PATH> in Helix metadata store
* @param clusterId The cluster Id
* @param path path parameter is like "abc/abc/abc" in the URL
* @param isZNRecord true if the content represents a ZNRecord. false means byte array.
* @param content
* @return Response
*/
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{path: .+}")
public Response putPropertyByPath(@PathParam("clusterId") String clusterId,
@PathParam("path") String path,
@QueryParam("isZNRecord") @DefaultValue("true") String isZNRecord, String content) {
path = "/" + path;
if (!ZkValidationUtil.isPathValid(path)) {
LOG.info("The propertyStore path {} is invalid for cluster {}", path, clusterId);
return badRequest(
"Invalid path string. Valid path strings use slash as the directory separator and names the location of ZNode");
}
final String recordPath = PropertyPathBuilder.propertyStore(clusterId) + path;
try {
if (Boolean.parseBoolean(isZNRecord)) {
try {
ZNRecord record = toZNRecord(content);
BaseDataAccessor<ZNRecord> propertyStoreDataAccessor =
getDataAccssor(clusterId).getBaseDataAccessor();
if (!propertyStoreDataAccessor.set(recordPath, record, AccessOption.PERSISTENT)) {
return serverError(
"Failed to set content: " + content + " in PropertyStore path: " + path);
}
} catch (IOException e) {
LOG.error("Failed to deserialize content " + content + " into a ZNRecord!", e);
return badRequest(
"Failed to write to path: " + recordPath + "! Content is not a valid ZNRecord!");
}
} else {
BaseDataAccessor<byte[]> propertyStoreDataAccessor = getByteArrayDataAccessor();
if (!propertyStoreDataAccessor
.set(recordPath, content.getBytes(), AccessOption.PERSISTENT)) {
return serverError(
"Failed to set content: " + content + " in PropertyStore path: " + path);
}
}
return OK();
} catch (Exception e) {
return serverError(e);
}
}
/**
* Recursively deletes the PropertyStore path. If the node does not exist, it returns OK().
* @param clusterId
* @param path
* @return
*/
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{path: .+}")
public Response deletePropertyByPath(@PathParam("clusterId") String clusterId,
@PathParam("path") String path) {
path = "/" + path;
if (!ZkValidationUtil.isPathValid(path)) {
LOG.info("The propertyStore path {} is invalid for cluster {}", path, clusterId);
return badRequest(
"Invalid path string. Valid path strings use slash as the directory separator and names the location of ZNode");
}
final String recordPath = PropertyPathBuilder.propertyStore(clusterId) + path;
BaseDataAccessor<byte[]> propertyStoreDataAccessor = getByteArrayDataAccessor();
if (!propertyStoreDataAccessor.remove(recordPath, AccessOption.PERSISTENT)) {
return serverError("Failed to delete PropertyStore record in path: " + path);
}
return OK();
}
}
| 9,357 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/JobAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.helix.HelixException;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.WorkflowConfig;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.zkclient.exception.ZkNoNodeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ClusterAuth
@Path("/clusters/{clusterId}/workflows/{workflowName}/jobs")
public class JobAccessor extends AbstractHelixResource {
private static Logger _logger = LoggerFactory.getLogger(JobAccessor.class.getName());
public enum JobProperties {
Jobs,
JobConfig,
JobContext,
TASK_COMMAND
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
public Response getJobs(@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName) {
TaskDriver driver = getTaskDriver(clusterId);
WorkflowConfig workflowConfig = driver.getWorkflowConfig(workflowName);
ObjectNode root = JsonNodeFactory.instance.objectNode();
if (workflowConfig == null) {
return badRequest(String.format("Workflow %s is not found!", workflowName));
}
Set<String> jobs = workflowConfig.getJobDag().getAllNodes();
root.put(Properties.id.name(), JobProperties.Jobs.name());
ArrayNode jobsNode = root.putArray(JobProperties.Jobs.name());
if (jobs != null) {
jobsNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(jobs));
}
return JSONRepresentation(root);
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{jobName}")
public Response getJob(@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName, @PathParam("jobName") String jobName) {
TaskDriver driver = getTaskDriver(clusterId);
Map<String, ZNRecord> jobMap = new HashMap<>();
JobConfig jobConfig = driver.getJobConfig(jobName);
if (jobConfig != null) {
jobMap.put(JobProperties.JobConfig.name(), jobConfig.getRecord());
} else {
return badRequest(String.format("Job config for %s does not exists", jobName));
}
JobContext jobContext =
driver.getJobContext(jobName);
jobMap.put(JobProperties.JobContext.name(), null);
if (jobContext != null) {
jobMap.put(JobProperties.JobContext.name(), jobContext.getRecord());
}
return JSONRepresentation(jobMap);
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{jobName}")
public Response addJob(@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName, @PathParam("jobName") String jobName,
String content) {
ZNRecord record;
TaskDriver driver = getTaskDriver(clusterId);
try {
record = toZNRecord(content);
JobConfig.Builder jobConfig = JobAccessor.getJobConfig(record);
driver.enqueueJob(workflowName, jobName, jobConfig);
} catch (HelixException e) {
return badRequest(
String.format("Failed to enqueue job %s for reason : %s", jobName, e.getMessage()));
} catch (IOException e) {
return badRequest(String.format("Invalid input for Job Config of Job : %s", jobName));
}
return OK();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{jobName}")
public Response deleteJob(@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName, @PathParam("jobName") String jobName,
@QueryParam("force") @DefaultValue("false") String forceDelete) {
boolean force = Boolean.parseBoolean(forceDelete);
TaskDriver driver = getTaskDriver(clusterId);
try {
driver.deleteJob(workflowName, jobName, force);
} catch (Exception e) {
return badRequest(e.getMessage());
}
return OK();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{jobName}/configs")
public Response getJobConfig(@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName, @PathParam("jobName") String jobName) {
TaskDriver driver = getTaskDriver(clusterId);
JobConfig jobConfig = driver.getJobConfig(jobName);
if (jobConfig != null) {
return JSONRepresentation(jobConfig.getRecord());
}
return badRequest("Job config for " + jobName + " does not exists");
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{jobName}/context")
public Response getJobContext(@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName, @PathParam("jobName") String jobName) {
TaskDriver driver = getTaskDriver(clusterId);
JobContext jobContext =
driver.getJobContext(jobName);
if (jobContext != null) {
return JSONRepresentation(jobContext.getRecord());
}
return badRequest("Job context for " + jobName + " does not exists");
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{jobName}/userContent")
public Response getJobUserContent(@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName, @PathParam("jobName") String jobName) {
TaskDriver taskDriver = getTaskDriver(clusterId);
try {
Map<String, String> contentStore =
taskDriver.getJobUserContentMap(workflowName, jobName);
if (contentStore == null) {
return notFound(String.format(
"Unable to find content store. Workflow (%s) or Job (%s) does not exist.",
workflowName, jobName));
}
return JSONRepresentation(contentStore);
} catch (ZkNoNodeException e) {
return notFound("Unable to find content store");
} catch (Exception e) {
return serverError(e);
}
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{jobName}/userContent")
public Response updateJobUserContent(
@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName,
@PathParam("jobName") String jobName,
@QueryParam("command") String commandStr,
String content
) {
Command cmd;
Map<String, String> contentMap = Collections.emptyMap();
try {
contentMap = OBJECT_MAPPER.readValue(content, new TypeReference<Map<String, String>>() {
});
cmd = (commandStr == null || commandStr.isEmpty())
? Command.update
: Command.valueOf(commandStr);
} catch (IOException e) {
return badRequest(String
.format("Content %s cannot be deserialized to Map<String, String>. Err: %s", content,
e.getMessage()));
} catch (IllegalArgumentException ie) {
return badRequest(String.format("Invalid command: %s. Err: %s", commandStr, ie.getMessage()));
}
TaskDriver driver = getTaskDriver(clusterId);
try {
switch (cmd) {
case update:
driver.addOrUpdateJobUserContentMap(workflowName, jobName, contentMap);
return OK();
default:
return badRequest(String.format("Command \"%s\" is not supported!", cmd));
}
} catch (NullPointerException npe) {
// ZkCacheBasedDataAccessor would throw npe if workflow or job does not exist
return notFound(String.format(
"Unable to find content store. Workflow (%s) or Job (%s) does not exist.",
workflowName, jobName));
} catch (Exception e) {
_logger.error("Failed to update user content store", e);
return serverError(e);
}
}
protected static JobConfig.Builder getJobConfig(Map<String, String> cfgMap) {
return new JobConfig.Builder().fromMap(cfgMap);
}
protected static JobConfig.Builder getJobConfig(ZNRecord record) {
JobConfig.Builder jobConfig = new JobConfig.Builder().fromMap(record.getSimpleFields());
jobConfig.addTaskConfigMap(getTaskConfigMap(record.getMapFields()));
return jobConfig;
}
private static Map<String, TaskConfig> getTaskConfigMap(
Map<String, Map<String, String>> taskConfigs) {
Map<String, TaskConfig> taskConfigsMap = new HashMap<>();
if (taskConfigs == null || taskConfigs.isEmpty()) {
return Collections.emptyMap();
}
for (Map<String, String> taskConfigMap : taskConfigs.values()) {
if (!taskConfigMap.containsKey(JobProperties.TASK_COMMAND.name())) {
continue;
}
TaskConfig taskConfig =
new TaskConfig(taskConfigMap.get(JobProperties.TASK_COMMAND.name()), taskConfigMap);
taskConfigsMap.put(taskConfig.getId(), taskConfig);
}
return taskConfigsMap;
}
}
| 9,358 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/WorkflowAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import com.fasterxml.jackson.databind.type.TypeFactory;
import org.apache.helix.HelixException;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobDag;
import org.apache.helix.task.JobQueue;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.Workflow;
import org.apache.helix.task.WorkflowConfig;
import org.apache.helix.task.WorkflowContext;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.zkclient.exception.ZkNoNodeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ClusterAuth
@Path("/clusters/{clusterId}/workflows")
public class WorkflowAccessor extends AbstractHelixResource {
private static Logger _logger = LoggerFactory.getLogger(WorkflowAccessor.class.getName());
public enum WorkflowProperties {
Workflows,
WorkflowConfig,
WorkflowContext,
Jobs,
ParentJobs,
LastScheduledTask
}
public enum TaskCommand {
stop,
resume,
clean
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
public Response getWorkflows(@PathParam("clusterId") String clusterId) {
TaskDriver taskDriver = getTaskDriver(clusterId);
Map<String, WorkflowConfig> workflowConfigMap = taskDriver.getWorkflows();
Map<String, List<String>> dataMap = new HashMap<>();
dataMap.put(WorkflowProperties.Workflows.name(), new ArrayList<>(workflowConfigMap.keySet()));
return JSONRepresentation(dataMap);
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{workflowId}")
public Response getWorkflow(@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId) {
TaskDriver taskDriver = getTaskDriver(clusterId);
WorkflowConfig workflowConfig = taskDriver.getWorkflowConfig(workflowId);
WorkflowContext workflowContext = taskDriver.getWorkflowContext(workflowId);
ObjectNode root = JsonNodeFactory.instance.objectNode();
TextNode id = JsonNodeFactory.instance.textNode(workflowId);
root.put(Properties.id.name(), id);
ObjectNode workflowConfigNode = JsonNodeFactory.instance.objectNode();
ObjectNode workflowContextNode = JsonNodeFactory.instance.objectNode();
if (workflowConfig != null) {
getWorkflowConfigNode(workflowConfigNode, workflowConfig.getRecord());
}
if (workflowContext != null) {
getWorkflowContextNode(workflowContextNode, workflowContext.getRecord());
}
root.put(WorkflowProperties.WorkflowConfig.name(), workflowConfigNode);
root.put(WorkflowProperties.WorkflowContext.name(), workflowContextNode);
JobDag jobDag = workflowConfig.getJobDag();
ArrayNode jobs = OBJECT_MAPPER.valueToTree(jobDag.getAllNodes());
ObjectNode parentJobs = OBJECT_MAPPER.valueToTree(jobDag.getChildrenToParents());
root.put(WorkflowProperties.Jobs.name(), jobs);
root.put(WorkflowProperties.ParentJobs.name(), parentJobs);
root.put(WorkflowProperties.LastScheduledTask.name(), OBJECT_MAPPER.valueToTree(taskDriver.getLastScheduledTaskExecutionInfo(workflowId)));
return JSONRepresentation(root);
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{workflowId}")
public Response createWorkflow(@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId, String content) {
TaskDriver driver = getTaskDriver(clusterId);
Map<String, String> cfgMap;
try {
JsonNode root = OBJECT_MAPPER.readTree(content);
cfgMap = OBJECT_MAPPER
.readValue(root.get(WorkflowProperties.WorkflowConfig.name()).toString(),
TypeFactory.defaultInstance()
.constructMapType(HashMap.class, String.class, String.class));
WorkflowConfig workflowConfig = WorkflowConfig.Builder.fromMap(cfgMap).build();
// Since JobQueue can keep adding jobs, Helix create JobQueue will ignore the jobs
if (workflowConfig.isJobQueue()) {
driver.start(new JobQueue.Builder(workflowId).setWorkflowConfig(workflowConfig).build());
return OK();
}
Workflow.Builder workflow = new Workflow.Builder(workflowId);
workflow.setWorkflowConfig(workflowConfig);
if (root.get(WorkflowProperties.Jobs.name()) != null) {
Map<String, JobConfig.Builder> jobConfigs =
getJobConfigs((ArrayNode) root.get(WorkflowProperties.Jobs.name()));
for (Map.Entry<String, JobConfig.Builder> job : jobConfigs.entrySet()) {
workflow.addJob(job.getKey(), job.getValue());
}
}
if (root.get(WorkflowProperties.ParentJobs.name()) != null) {
Map<String, List<String>> parentJobs = OBJECT_MAPPER
.readValue(root.get(WorkflowProperties.ParentJobs.name()).toString(),
TypeFactory.defaultInstance()
.constructMapType(HashMap.class, String.class, List.class));
for (Map.Entry<String, List<String>> entry : parentJobs.entrySet()) {
String parentJob = entry.getKey();
for (String childJob : entry.getValue()) {
workflow.addParentChildDependency(parentJob, childJob);
}
}
}
driver.start(workflow.build());
} catch (IOException e) {
return badRequest(String
.format("Invalid input of Workflow %s for reason : %s", workflowId, e.getMessage()));
} catch (HelixException e) {
return badRequest(String
.format("Failed to create workflow %s for reason : %s", workflowId, e.getMessage()));
}
return OK();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{workflowId}")
public Response deleteWorkflow(@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId,
@QueryParam("force") @DefaultValue("false") String forceDelete) {
boolean force = Boolean.valueOf(forceDelete);
TaskDriver driver = getTaskDriver(clusterId);
try {
driver.delete(workflowId, force);
} catch (HelixException e) {
return badRequest(String
.format("Failed to delete workflow %s for reason : %s", workflowId, e.getMessage()));
}
return OK();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{workflowId}")
public Response updateWorkflow(@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId, @QueryParam("command") String command) {
TaskDriver driver = getTaskDriver(clusterId);
try {
TaskCommand cmd = TaskCommand.valueOf(command);
switch (cmd) {
case stop:
driver.stop(workflowId);
break;
case resume:
driver.resume(workflowId);
break;
case clean:
driver.cleanupQueue(workflowId);
break;
default:
return badRequest(String.format("Invalid command : %s", command));
}
} catch (HelixException e) {
return badRequest(
String.format("Failed to execute operation %s for reason : %s", command, e.getMessage()));
} catch (Exception e) {
return serverError(e);
}
return OK();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{workflowId}/configs")
public Response getWorkflowConfig(@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId) {
TaskDriver taskDriver = getTaskDriver(clusterId);
WorkflowConfig workflowConfig = taskDriver.getWorkflowConfig(workflowId);
ObjectNode workflowConfigNode = JsonNodeFactory.instance.objectNode();
if (workflowConfig != null) {
getWorkflowConfigNode(workflowConfigNode, workflowConfig.getRecord());
}
return JSONRepresentation(workflowConfigNode);
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{workflowId}/configs")
public Response updateWorkflowConfig(@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId, String content) {
ZNRecord record;
TaskDriver driver = getTaskDriver(clusterId);
try {
record = toZNRecord(content);
WorkflowConfig workflowConfig = driver.getWorkflowConfig(workflowId);
if (workflowConfig == null) {
return badRequest(
String.format("WorkflowConfig for workflow %s does not exists!", workflowId));
}
workflowConfig.getRecord().update(record);
driver.updateWorkflow(workflowId, workflowConfig);
} catch (HelixException e) {
return badRequest(
String.format("Failed to update WorkflowConfig for workflow %s", workflowId));
} catch (Exception e) {
return badRequest(String.format("Invalid WorkflowConfig for workflow %s", workflowId));
}
return OK();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{workflowId}/userContent")
public Response getWorkflowUserContent(
@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId
) {
TaskDriver taskDriver = getTaskDriver(clusterId);
try {
Map<String, String> contentStore =
taskDriver.getWorkflowUserContentMap(workflowId);
if (contentStore == null) {
// ZkCacheBasedDataAccessor would throw npe if workflow or job does not exist
return notFound(String.format(
"Unable to find content store. Workflow (%s) does not exist.",
workflowId));
}
return JSONRepresentation(contentStore);
} catch (ZkNoNodeException e) {
return notFound("Unable to find content store");
} catch (Exception e) {
return serverError(e);
}
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{workflowId}/userContent")
public Response updateWorkflowUserContent(
@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId,
@QueryParam("command") String commandStr,
String content
) {
Command cmd;
Map<String, String> contentMap = Collections.emptyMap();
try {
contentMap = OBJECT_MAPPER.readValue(content, new TypeReference<Map<String, String>>() {});
cmd = Command.valueOf(commandStr);
} catch (IOException e) {
return badRequest(String.format("Content %s cannot be deserialized to Map<String, String>. Err: %s", content, e.getMessage()));
} catch (IllegalArgumentException ie) {
return badRequest(String.format("Invalid command: %s. Err: %s", commandStr, ie.getMessage()));
} catch (NullPointerException npe) {
cmd = Command.update;
}
TaskDriver driver = getTaskDriver(clusterId);
try {
switch (cmd) {
case update:
driver.addOrUpdateWorkflowUserContentMap(workflowId, contentMap);
return OK();
default:
return badRequest(String.format("Command \"%s\" is not supported!", cmd));
}
} catch (NullPointerException npe) {
// ZkCacheBasedDataAccessor would throw npe if workflow or job does not exist
return notFound(String.format(
"Unable to find content store. Workflow (%s) does not exist.",
workflowId));
} catch (Exception e) {
_logger.error("Failed to update user content store", e);
return serverError(e);
}
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{workflowId}/context")
public Response getWorkflowContext(@PathParam("clusterId") String clusterId,
@PathParam("workflowId") String workflowId) {
TaskDriver taskDriver = getTaskDriver(clusterId);
WorkflowContext workflowContext = taskDriver.getWorkflowContext(workflowId);
ObjectNode workflowContextNode = JsonNodeFactory.instance.objectNode();
if (workflowContext != null) {
getWorkflowContextNode(workflowContextNode, workflowContext.getRecord());
}
return JSONRepresentation(workflowContextNode);
}
private void getWorkflowConfigNode(ObjectNode workflowConfigNode, ZNRecord record) {
for (Map.Entry<String, String> entry : record.getSimpleFields().entrySet()) {
if (!entry.getKey().equals(WorkflowConfig.WorkflowConfigProperty.Dag)) {
workflowConfigNode.put(entry.getKey(), JsonNodeFactory.instance.textNode(entry.getValue()));
}
}
}
private void getWorkflowContextNode(ObjectNode workflowContextNode, ZNRecord record) {
if (record.getMapFields() != null) {
for (String fieldName : record.getMapFields().keySet()) {
JsonNode node = OBJECT_MAPPER.valueToTree(record.getMapField(fieldName));
workflowContextNode.put(fieldName, node);
}
}
if (record.getSimpleFields() != null) {
for (Map.Entry<String, String> entry : record.getSimpleFields().entrySet()) {
workflowContextNode
.put(entry.getKey(), JsonNodeFactory.instance.textNode(entry.getValue()));
}
}
}
private Map<String, JobConfig.Builder> getJobConfigs(ArrayNode root)
throws HelixException, IOException {
Map<String, JobConfig.Builder> jobConfigsMap = new HashMap<>();
for (Iterator<JsonNode> it = root.elements(); it.hasNext(); ) {
JsonNode job = it.next();
ZNRecord record = null;
try {
record = toZNRecord(job.toString());
} catch (IOException e) {
// Ignore the parse since it could be just simple fields
}
if (record == null || record.getSimpleFields().isEmpty()) {
Map<String, String> cfgMap = OBJECT_MAPPER.readValue(job.toString(),
TypeFactory.defaultInstance()
.constructMapType(HashMap.class, String.class, String.class));
jobConfigsMap
.put(job.get(Properties.id.name()).textValue(), JobAccessor.getJobConfig(cfgMap));
} else {
jobConfigsMap
.put(job.get(Properties.id.name()).textValue(), JobAccessor.getJobConfig(record));
}
}
return jobConfigsMap;
}
}
| 9,359 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/AbstractHelixResource.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.manager.zk.ZkBucketDataAccessor;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.server.ServerContext;
import org.apache.helix.rest.server.resources.AbstractResource;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
/**
* This class provides methods to access Helix specific objects
* such as cluster, instance, job, resource, workflow, etc in
* metadata store.
*/
public class AbstractHelixResource extends AbstractResource {
public RealmAwareZkClient getRealmAwareZkClient() {
ServerContext serverContext = getServerContext();
return serverContext.getRealmAwareZkClient();
}
@Deprecated
public ZkClient getZkClient() {
return (ZkClient) getRealmAwareZkClient();
}
public HelixAdmin getHelixAdmin() {
ServerContext serverContext = getServerContext();
return serverContext.getHelixAdmin();
}
public ClusterSetup getClusterSetup() {
ServerContext serverContext = getServerContext();
return serverContext.getClusterSetup();
}
public TaskDriver getTaskDriver(String clusterName) {
ServerContext serverContext = getServerContext();
return serverContext.getTaskDriver(clusterName);
}
public ConfigAccessor getConfigAccessor() {
ServerContext serverContext = getServerContext();
return serverContext.getConfigAccessor();
}
public HelixDataAccessor getDataAccssor(String clusterName) {
ServerContext serverContext = getServerContext();
return serverContext.getDataAccessor(clusterName);
}
protected BaseDataAccessor<byte[]> getByteArrayDataAccessor() {
return getServerContext().getByteArrayZkBaseDataAccessor();
}
protected ZkBucketDataAccessor getZkBucketDataAccessor() {
return getServerContext().getZkBucketDataAccessor();
}
protected static ZNRecord toZNRecord(String data)
throws IOException {
return ZNRECORD_READER.readValue(data);
}
private ServerContext getServerContext() {
return (ServerContext) _application.getProperties()
.get(ContextPropertyKeys.SERVER_CONTEXT.name());
}
}
| 9,360 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/InstancesAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.rest.clusterMaintenanceService.MaintenanceManagementService;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.rest.server.json.cluster.ClusterTopology;
import org.apache.helix.rest.server.json.instance.StoppableCheck;
import org.apache.helix.rest.server.resources.exceptions.HelixHealthException;
import org.apache.helix.rest.server.service.ClusterService;
import org.apache.helix.rest.server.service.ClusterServiceImpl;
import org.apache.helix.util.InstanceValidationUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ClusterAuth
@Path("/clusters/{clusterId}/instances")
public class InstancesAccessor extends AbstractHelixResource {
private final static Logger _logger = LoggerFactory.getLogger(InstancesAccessor.class);
// This type does not belongs to real HealthCheck failed reason. Also if we add this type
// to HealthCheck enum, it could introduce more unnecessary check step since the InstanceServiceImpl
// loops all the types to do corresponding checks.
private final static String INSTANCE_NOT_EXIST = "HELIX:INSTANCE_NOT_EXIST";
public enum InstancesProperties {
instances,
online,
disabled,
selection_base,
zone_order,
customized_values,
instance_stoppable_parallel,
instance_not_stoppable_with_reasons
}
public enum InstanceHealthSelectionBase {
instance_based,
zone_based
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
public Response getAllInstances(@PathParam("clusterId") String clusterId,
@DefaultValue("getAllInstances") @QueryParam("command") String command) {
// Get the command. If not provided, the default would be "getAllInstances"
Command cmd;
try {
cmd = Command.valueOf(command);
} catch (Exception e) {
return badRequest("Invalid command : " + command);
}
HelixDataAccessor accessor = getDataAccssor(clusterId);
List<String> instances = accessor.getChildNames(accessor.keyBuilder().instanceConfigs());
if (instances == null) {
return notFound();
}
switch (cmd) {
case getAllInstances:
ObjectNode root = JsonNodeFactory.instance.objectNode();
root.put(Properties.id.name(), JsonNodeFactory.instance.textNode(clusterId));
ArrayNode instancesNode =
root.putArray(InstancesAccessor.InstancesProperties.instances.name());
instancesNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(instances));
ArrayNode onlineNode = root.putArray(InstancesAccessor.InstancesProperties.online.name());
ArrayNode disabledNode = root.putArray(InstancesAccessor.InstancesProperties.disabled.name());
List<String> liveInstances = accessor.getChildNames(accessor.keyBuilder().liveInstances());
ClusterConfig clusterConfig = accessor.getProperty(accessor.keyBuilder().clusterConfig());
for (String instanceName : instances) {
InstanceConfig instanceConfig =
accessor.getProperty(accessor.keyBuilder().instanceConfig(instanceName));
if (instanceConfig != null) {
if (!InstanceValidationUtil.isInstanceEnabled(instanceConfig, clusterConfig)) {
disabledNode.add(JsonNodeFactory.instance.textNode(instanceName));
}
if (liveInstances.contains(instanceName)) {
onlineNode.add(JsonNodeFactory.instance.textNode(instanceName));
}
}
}
return JSONRepresentation(root);
case validateWeight:
// Validate all instances for WAGED rebalance
HelixAdmin admin = getHelixAdmin();
Map<String, Boolean> validationResultMap;
try {
validationResultMap = admin.validateInstancesForWagedRebalance(clusterId, instances);
} catch (HelixException e) {
return badRequest(e.getMessage());
}
return JSONRepresentation(validationResultMap);
default:
_logger.error("Unsupported command :" + command);
return badRequest("Unsupported command :" + command);
}
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
public Response instancesOperations(@PathParam("clusterId") String clusterId,
@QueryParam("command") String command,
@QueryParam("continueOnFailures") boolean continueOnFailures,
@QueryParam("skipZKRead") boolean skipZKRead,
@QueryParam("skipHealthCheckCategories") String skipHealthCheckCategories, String content) {
Command cmd;
try {
cmd = Command.valueOf(command);
} catch (Exception e) {
return badRequest("Invalid command : " + command);
}
Set<StoppableCheck.Category> skipHealthCheckCategorySet;
try {
skipHealthCheckCategorySet = skipHealthCheckCategories != null
? StoppableCheck.Category.categorySetFromCommaSeperatedString(skipHealthCheckCategories)
: Collections.emptySet();
if (!MaintenanceManagementService.SKIPPABLE_HEALTH_CHECK_CATEGORIES.containsAll(
skipHealthCheckCategorySet)) {
throw new IllegalArgumentException(
"Some of the provided skipHealthCheckCategories are not skippable. The supported skippable categories are: "
+ MaintenanceManagementService.SKIPPABLE_HEALTH_CHECK_CATEGORIES);
}
} catch (Exception e) {
return badRequest("Invalid skipHealthCheckCategories: " + skipHealthCheckCategories + "\n"
+ e.getMessage());
}
HelixAdmin admin = getHelixAdmin();
try {
JsonNode node = null;
if (content.length() != 0) {
node = OBJECT_MAPPER.readTree(content);
}
if (node == null) {
return badRequest("Invalid input for content : " + content);
}
List<String> enableInstances = OBJECT_MAPPER
.readValue(node.get(InstancesAccessor.InstancesProperties.instances.name()).toString(),
OBJECT_MAPPER.getTypeFactory().constructCollectionType(List.class, String.class));
switch (cmd) {
case enable:
admin.enableInstance(clusterId, enableInstances, true);
break;
case disable:
admin.enableInstance(clusterId, enableInstances, false);
break;
case stoppable:
return batchGetStoppableInstances(clusterId, node, skipZKRead, continueOnFailures,
skipHealthCheckCategorySet);
default:
_logger.error("Unsupported command :" + command);
return badRequest("Unsupported command :" + command);
}
} catch (HelixHealthException e) {
_logger
.error(String.format("Current cluster %s has issue with health checks!", clusterId), e);
return serverError(e);
} catch (Exception e) {
_logger.error("Failed in updating instances : " + content, e);
return badRequest(e.getMessage());
}
return OK();
}
private Response batchGetStoppableInstances(String clusterId, JsonNode node, boolean skipZKRead,
boolean continueOnFailures, Set<StoppableCheck.Category> skipHealthCheckCategories)
throws IOException {
try {
// TODO: Process input data from the content
InstancesAccessor.InstanceHealthSelectionBase selectionBase =
InstancesAccessor.InstanceHealthSelectionBase.valueOf(
node.get(InstancesAccessor.InstancesProperties.selection_base.name()).textValue());
List<String> instances = OBJECT_MAPPER.readValue(
node.get(InstancesAccessor.InstancesProperties.instances.name()).toString(),
OBJECT_MAPPER.getTypeFactory().constructCollectionType(List.class, String.class));
List<String> orderOfZone = null;
String customizedInput = null;
if (node.get(InstancesAccessor.InstancesProperties.customized_values.name()) != null) {
customizedInput =
node.get(InstancesAccessor.InstancesProperties.customized_values.name()).toString();
}
if (node.get(InstancesAccessor.InstancesProperties.zone_order.name()) != null) {
orderOfZone = OBJECT_MAPPER.readValue(
node.get(InstancesAccessor.InstancesProperties.zone_order.name()).toString(),
OBJECT_MAPPER.getTypeFactory().constructCollectionType(List.class, String.class));
}
// Prepare output result
ObjectNode result = JsonNodeFactory.instance.objectNode();
ArrayNode stoppableInstances =
result.putArray(InstancesAccessor.InstancesProperties.instance_stoppable_parallel.name());
ObjectNode failedStoppableInstances = result.putObject(
InstancesAccessor.InstancesProperties.instance_not_stoppable_with_reasons.name());
MaintenanceManagementService maintenanceService =
new MaintenanceManagementService((ZKHelixDataAccessor) getDataAccssor(clusterId),
getConfigAccessor(), skipZKRead, continueOnFailures, skipHealthCheckCategories,
getNamespace());
ClusterService clusterService =
new ClusterServiceImpl(getDataAccssor(clusterId), getConfigAccessor());
ClusterTopology clusterTopology = clusterService.getClusterTopology(clusterId);
switch (selectionBase) {
case zone_based:
List<String> zoneBasedInstance =
getZoneBasedInstances(instances, orderOfZone, clusterTopology.toZoneMapping());
Map<String, StoppableCheck> instancesStoppableChecks =
maintenanceService.batchGetInstancesStoppableChecks(clusterId, zoneBasedInstance,
customizedInput);
for (Map.Entry<String, StoppableCheck> instanceStoppableCheck : instancesStoppableChecks.entrySet()) {
String instance = instanceStoppableCheck.getKey();
StoppableCheck stoppableCheck = instanceStoppableCheck.getValue();
if (!stoppableCheck.isStoppable()) {
ArrayNode failedReasonsNode = failedStoppableInstances.putArray(instance);
for (String failedReason : stoppableCheck.getFailedChecks()) {
failedReasonsNode.add(JsonNodeFactory.instance.textNode(failedReason));
}
} else {
stoppableInstances.add(instance);
}
}
// Adding following logic to check whether instances exist or not. An instance exist could be
// checking following scenario:
// 1. Instance got dropped. (InstanceConfig is gone.)
// 2. Instance name has typo.
// If we dont add this check, the instance, which does not exist, will be disappeared from
// result since Helix skips instances for instances not in the selected zone. User may get
// confused with the output.
Set<String> nonSelectedInstances = new HashSet<>(instances);
nonSelectedInstances.removeAll(clusterTopology.getAllInstances());
for (String nonSelectedInstance : nonSelectedInstances) {
ArrayNode failedReasonsNode = failedStoppableInstances.putArray(nonSelectedInstance);
failedReasonsNode.add(JsonNodeFactory.instance.textNode(INSTANCE_NOT_EXIST));
}
break;
case instance_based:
default:
throw new UnsupportedOperationException("instance_based selection is not supported yet!");
}
return JSONRepresentation(result);
} catch (HelixException e) {
_logger
.error(String.format("Current cluster %s has issue with health checks!", clusterId), e);
throw new HelixHealthException(e);
} catch (Exception e) {
_logger.error(String.format(
"Failed to get parallel stoppable instances for cluster %s with a HelixException!",
clusterId), e);
throw e;
}
}
/**
* Get instances belongs to the first zone. If the zone is already empty, Helix will iterate zones
* by order until find the zone contains instances.
*
* The order of zones can directly come from user input. If user did not specify it, Helix will order
* zones with alphabetical order.
*
* @param instances
* @param orderedZones
* @return
*/
private List<String> getZoneBasedInstances(List<String> instances, List<String> orderedZones,
Map<String, Set<String>> zoneMapping) {
// If the orderedZones is not specified, we will order all zones in alphabetical order.
if (orderedZones == null) {
orderedZones = new ArrayList<>(zoneMapping.keySet());
Collections.sort(orderedZones);
}
if (orderedZones.isEmpty()) {
return orderedZones;
}
Set<String> instanceSet = null;
for (String zone : orderedZones) {
instanceSet = new TreeSet<>(instances);
Set<String> currentZoneInstanceSet = new HashSet<>(zoneMapping.get(zone));
instanceSet.retainAll(currentZoneInstanceSet);
if (instanceSet.size() > 0) {
return new ArrayList<>(instanceSet);
}
}
return Collections.EMPTY_LIST;
}
}
| 9,361 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/PerInstanceAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.constants.InstanceConstants;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.Error;
import org.apache.helix.model.HealthStat;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.ParticipantHistory;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.rest.clusterMaintenanceService.HealthCheck;
import org.apache.helix.rest.clusterMaintenanceService.MaintenanceManagementService;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.rest.server.json.instance.InstanceInfo;
import org.apache.helix.rest.server.json.instance.StoppableCheck;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.eclipse.jetty.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ClusterAuth
@Path("/clusters/{clusterId}/instances/{instanceName}")
public class PerInstanceAccessor extends AbstractHelixResource {
private final static Logger LOG = LoggerFactory.getLogger(PerInstanceAccessor.class);
public enum PerInstanceProperties {
config,
liveInstance,
resource,
resources,
partitions,
errors,
new_messages,
read_messages,
total_message_count,
read_message_count,
healthreports,
instanceTags,
health_check_list,
health_check_config,
operation_list,
operation_config,
continueOnFailures,
skipZKRead,
performOperation
}
private static class MaintenanceOpInputFields {
List<String> healthChecks = null;
Map<String, String> healthCheckConfig = null;
List<String> operations = null;
Map<String, String> operationConfig = null;
Set<String> nonBlockingHelixCheck = new HashSet<>();
boolean skipZKRead = false;
boolean performOperation = true;
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
public Response getInstanceById(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName, @QueryParam("skipZKRead") String skipZKRead,
@DefaultValue("getInstance") @QueryParam("command") String command) {
// Get the command. If not provided, the default would be "getInstance"
Command cmd;
try {
cmd = Command.valueOf(command);
} catch (Exception e) {
return badRequest("Invalid command : " + command);
}
switch (cmd) {
case getInstance:
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
// TODO reduce GC by dependency injection
MaintenanceManagementService service =
new MaintenanceManagementService((ZKHelixDataAccessor) dataAccessor, getConfigAccessor(),
Boolean.parseBoolean(skipZKRead), getNamespace());
InstanceInfo instanceInfo = service.getInstanceHealthInfo(clusterId, instanceName,
HealthCheck.STARTED_AND_HEALTH_CHECK_LIST);
String instanceInfoString;
try {
instanceInfoString = OBJECT_MAPPER.writeValueAsString(instanceInfo);
} catch (JsonProcessingException e) {
return serverError(e);
}
return OK(instanceInfoString);
case validateWeight:
// Validates instanceConfig for WAGED rebalance
HelixAdmin admin = getHelixAdmin();
Map<String, Boolean> validationResultMap;
try {
validationResultMap = admin.validateInstancesForWagedRebalance(clusterId,
Collections.singletonList(instanceName));
} catch (HelixException e) {
return badRequest(e.getMessage());
}
return JSONRepresentation(validationResultMap);
default:
LOG.error("Unsupported command :" + command);
return badRequest("Unsupported command :" + command);
}
}
/**
* Performs health checks for an instance to answer if it is stoppable.
*
* @param jsonContent json payload
* @param clusterId cluster id
* @param instanceName Instance name to be checked
* @param skipZKRead skip reading from zk server
* @param continueOnFailures whether or not continue to perform the subsequent checks if previous
* check fails. If false, when helix own check fails, the subsequent
* custom checks will not be performed.
* @param skipHealthCheckCategories StoppableCheck Categories to skip.
* @return json response representing if queried instance is stoppable
* @throws IOException if there is any IO/network error
*/
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("stoppable")
@Consumes(MediaType.APPLICATION_JSON)
public Response isInstanceStoppable(String jsonContent, @PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName, @QueryParam("skipZKRead") boolean skipZKRead,
@QueryParam("continueOnFailures") boolean continueOnFailures,
@QueryParam("skipHealthCheckCategories") String skipHealthCheckCategories)
throws IOException {
Set<StoppableCheck.Category> skipHealthCheckCategorySet;
try {
skipHealthCheckCategorySet = skipHealthCheckCategories != null
? StoppableCheck.Category.categorySetFromCommaSeperatedString(skipHealthCheckCategories)
: Collections.emptySet();
if (!MaintenanceManagementService.SKIPPABLE_HEALTH_CHECK_CATEGORIES.containsAll(
skipHealthCheckCategorySet)) {
throw new IllegalArgumentException(
"Some of the provided skipHealthCheckCategories are not skippable. The supported skippable categories are: "
+ MaintenanceManagementService.SKIPPABLE_HEALTH_CHECK_CATEGORIES);
}
} catch (Exception e) {
return badRequest("Invalid skipHealthCheckCategories: " + skipHealthCheckCategories + "\n"
+ e.getMessage());
}
HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
MaintenanceManagementService maintenanceService =
new MaintenanceManagementService((ZKHelixDataAccessor) dataAccessor, getConfigAccessor(),
skipZKRead, continueOnFailures, skipHealthCheckCategorySet, getNamespace());
StoppableCheck stoppableCheck;
try {
JsonNode node = null;
if (jsonContent.length() != 0) {
node = OBJECT_MAPPER.readTree(jsonContent);
}
if (node == null) {
return badRequest("Invalid input for content : " + jsonContent);
}
String customizedInput = null;
if (node.get(InstancesAccessor.InstancesProperties.customized_values.name()) != null) {
customizedInput = node.get(InstancesAccessor.InstancesProperties.customized_values.name()).toString();
}
stoppableCheck =
maintenanceService.getInstanceStoppableCheck(clusterId, instanceName, customizedInput);
} catch (HelixException e) {
LOG.error("Current cluster: {}, instance: {} has issue with health checks!", clusterId,
instanceName, e);
return serverError(e);
}
return OK(OBJECT_MAPPER.writeValueAsString(stoppableCheck));
}
/**
* Performs health checks, user designed operation check and execution for take an instance.
*
* @param jsonContent json payload
* @param clusterId cluster id
* @param instanceName Instance name to be checked
* @return json response representing if queried instance is stoppable
* @throws IOException if there is any IO/network error
*/
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("takeInstance")
@Consumes(MediaType.APPLICATION_JSON)
public Response takeSingleInstance(
String jsonContent,
@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName){
try {
MaintenanceOpInputFields inputFields = readMaintenanceInputFromJson(jsonContent);
if (inputFields == null) {
return badRequest("Invalid input for content : " + jsonContent);
}
MaintenanceManagementService maintenanceManagementService =
new MaintenanceManagementService((ZKHelixDataAccessor) getDataAccssor(clusterId),
getConfigAccessor(), inputFields.skipZKRead, inputFields.nonBlockingHelixCheck,
getNamespace());
return JSONRepresentation(maintenanceManagementService
.takeInstance(clusterId, instanceName, inputFields.healthChecks,
inputFields.healthCheckConfig,
inputFields.operations,
inputFields.operationConfig, inputFields.performOperation));
} catch (Exception e) {
LOG.error("Failed to takeInstances:", e);
return badRequest("Failed to takeInstances: " + e.getMessage());
}
}
/**
* Performs health checks, user designed operation check and execution for free an instance.
*
* @param jsonContent json payload
* @param clusterId cluster id
* @param instanceName Instance name to be checked
* @return json response representing if queried instance is stoppable
* @throws IOException if there is any IO/network error
*/
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("freeInstance")
@Consumes(MediaType.APPLICATION_JSON)
public Response freeSingleInstance(
String jsonContent,
@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName){
try {
MaintenanceOpInputFields inputFields = readMaintenanceInputFromJson(jsonContent);
if (inputFields == null) {
return badRequest("Invalid input for content : " + jsonContent);
}
if (inputFields.healthChecks.size() != 0) {
LOG.warn("freeSingleInstance won't perform user passed health check.");
}
MaintenanceManagementService maintenanceManagementService =
new MaintenanceManagementService((ZKHelixDataAccessor) getDataAccssor(clusterId),
getConfigAccessor(), inputFields.skipZKRead, inputFields.nonBlockingHelixCheck,
getNamespace());
return JSONRepresentation(maintenanceManagementService
.freeInstance(clusterId, instanceName, inputFields.healthChecks,
inputFields.healthCheckConfig,
inputFields.operations,
inputFields.operationConfig, inputFields.performOperation));
} catch (Exception e) {
LOG.error("Failed to takeInstances:", e);
return badRequest("Failed to takeInstances: " + e.getMessage());
}
}
private MaintenanceOpInputFields readMaintenanceInputFromJson(String jsonContent) throws IOException {
JsonNode node = null;
if (jsonContent.length() != 0) {
node = OBJECT_MAPPER.readTree(jsonContent);
}
if (node == null) {
return null;
}
MaintenanceOpInputFields inputFields = new MaintenanceOpInputFields();
String continueOnFailuresName = PerInstanceProperties.continueOnFailures.name();
String skipZKReadName = PerInstanceProperties.skipZKRead.name();
String performOperation = PerInstanceProperties.performOperation.name();
inputFields.healthChecks = MaintenanceManagementService
.getListFromJsonPayload(node.get(PerInstanceProperties.health_check_list.name()));
inputFields.healthCheckConfig = MaintenanceManagementService
.getMapFromJsonPayload(node.get(PerInstanceProperties.health_check_config.name()));
if (inputFields.healthCheckConfig != null) {
if (inputFields.healthCheckConfig.containsKey(continueOnFailuresName)) {
inputFields.nonBlockingHelixCheck = new HashSet<String>(MaintenanceManagementService
.getListFromJsonPayload(inputFields.healthCheckConfig.get(continueOnFailuresName)));
// healthCheckConfig will be passed to customer's health check directly, we need to
// remove unrelated kc paris.
inputFields.healthCheckConfig.remove(continueOnFailuresName);
}
if (inputFields.healthCheckConfig.containsKey(skipZKReadName)) {
inputFields.skipZKRead =
Boolean.parseBoolean(inputFields.healthCheckConfig.get(skipZKReadName));
inputFields.healthCheckConfig.remove(skipZKReadName);
}
}
inputFields.operations = MaintenanceManagementService
.getListFromJsonPayload(node.get(PerInstanceProperties.operation_list.name()));
inputFields.operationConfig = MaintenanceManagementService
.getMapFromJsonPayload(node.get(PerInstanceProperties.operation_config.name()));
if (inputFields.operationConfig != null && inputFields.operationConfig
.containsKey(performOperation)) {
inputFields.performOperation =
Boolean.parseBoolean(inputFields.operationConfig.get(performOperation));
}
LOG.debug("Input fields for take/free Instance" + inputFields.toString());
return inputFields;
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
public Response addInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName, String content) {
HelixAdmin admin = getHelixAdmin();
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input " + content + ", Exception: " + e);
return badRequest("Input is not a vaild ZNRecord!");
}
try {
admin.addInstance(clusterId, new InstanceConfig(record));
} catch (Exception ex) {
LOG.error("Error in adding an instance: " + instanceName, ex);
return serverError(ex);
}
return OK();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
public Response updateInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName, @QueryParam("command") String command,
@QueryParam("instanceOperation") InstanceConstants.InstanceOperation state,
@QueryParam("instanceDisabledType") String disabledType,
@QueryParam("instanceDisabledReason") String disabledReason, String content) {
Command cmd;
try {
cmd = Command.valueOf(command);
} catch (Exception e) {
return badRequest("Invalid command : " + command);
}
HelixAdmin admin = getHelixAdmin();
try {
JsonNode node = null;
if (content.length() != 0) {
node = OBJECT_MAPPER.readTree(content);
}
switch (cmd) {
case enable:
admin.enableInstance(clusterId, instanceName, true);
break;
case disable:
InstanceConstants.InstanceDisabledType disabledTypeEnum = null;
if (disabledType != null) {
try {
disabledTypeEnum = InstanceConstants.InstanceDisabledType.valueOf(disabledType);
} catch (IllegalArgumentException ex) {
return badRequest("Invalid instanceDisabledType!");
}
}
admin.enableInstance(clusterId, instanceName, false, disabledTypeEnum, disabledReason);
break;
case reset:
case resetPartitions:
if (!validInstance(node, instanceName)) {
return badRequest("Instance names are not match!");
}
admin.resetPartition(clusterId, instanceName,
node.get(PerInstanceProperties.resource.name()).textValue(),
(List<String>) OBJECT_MAPPER
.readValue(node.get(PerInstanceProperties.partitions.name()).toString(),
OBJECT_MAPPER.getTypeFactory()
.constructCollectionType(List.class, String.class)));
break;
case setInstanceOperation:
admin.setInstanceOperation(clusterId, instanceName, state);
break;
case addInstanceTag:
if (!validInstance(node, instanceName)) {
return badRequest("Instance names are not match!");
}
for (String tag : (List<String>) OBJECT_MAPPER
.readValue(node.get(PerInstanceProperties.instanceTags.name()).toString(),
OBJECT_MAPPER.getTypeFactory().constructCollectionType(List.class, String.class))) {
admin.addInstanceTag(clusterId, instanceName, tag);
}
break;
case removeInstanceTag:
if (!validInstance(node, instanceName)) {
return badRequest("Instance names are not match!");
}
for (String tag : (List<String>) OBJECT_MAPPER
.readValue(node.get(PerInstanceProperties.instanceTags.name()).toString(),
OBJECT_MAPPER.getTypeFactory().constructCollectionType(List.class, String.class))) {
admin.removeInstanceTag(clusterId, instanceName, tag);
}
break;
case enablePartitions:
admin.enablePartition(true, clusterId, instanceName,
node.get(PerInstanceProperties.resource.name()).textValue(),
(List<String>) OBJECT_MAPPER
.readValue(node.get(PerInstanceProperties.partitions.name()).toString(),
OBJECT_MAPPER.getTypeFactory()
.constructCollectionType(List.class, String.class)));
break;
case disablePartitions:
admin.enablePartition(false, clusterId, instanceName,
node.get(PerInstanceProperties.resource.name()).textValue(),
(List<String>) OBJECT_MAPPER
.readValue(node.get(PerInstanceProperties.partitions.name()).toString(),
OBJECT_MAPPER.getTypeFactory().constructCollectionType(List.class, String.class)));
break;
default:
LOG.error("Unsupported command :" + command);
return badRequest("Unsupported command :" + command);
}
} catch (Exception e) {
LOG.error("Failed in updating instance : " + instanceName, e);
return badRequest(e.getMessage());
}
return OK();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
public Response deleteInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName) {
HelixAdmin admin = getHelixAdmin();
try {
InstanceConfig instanceConfig = admin.getInstanceConfig(clusterId, instanceName);
admin.dropInstance(clusterId, instanceConfig);
} catch (HelixException e) {
return badRequest(e.getMessage());
}
return OK();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("configs")
public Response getInstanceConfig(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
InstanceConfig instanceConfig =
accessor.getProperty(accessor.keyBuilder().instanceConfig(instanceName));
if (instanceConfig != null) {
return JSONRepresentation(instanceConfig.getRecord());
}
return notFound();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("configs")
public Response updateInstanceConfig(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName, @QueryParam("command") String commandStr,
String content) {
Command command;
if (commandStr == null || commandStr.isEmpty()) {
command = Command.update; // Default behavior to keep it backward-compatible
} else {
try {
command = getCommand(commandStr);
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
}
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
LOG.error("Failed to deserialize user's input " + content + ", Exception: " + e);
return badRequest("Input is not a vaild ZNRecord!");
}
InstanceConfig instanceConfig = new InstanceConfig(record);
ConfigAccessor configAccessor = getConfigAccessor();
try {
switch (command) {
case update:
/*
* The new instanceConfig will be merged with existing one.
* Even if the instance is disabled, non-valid instance topology config will cause rebalance
* failure. We are doing the check whenever user updates InstanceConfig.
*/
validateDeltaTopologySettingInInstanceConfig(clusterId, instanceName, configAccessor,
instanceConfig, command);
configAccessor.updateInstanceConfig(clusterId, instanceName, instanceConfig);
break;
case delete:
validateDeltaTopologySettingInInstanceConfig(clusterId, instanceName, configAccessor,
instanceConfig, command);
HelixConfigScope instanceScope =
new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.PARTICIPANT)
.forCluster(clusterId).forParticipant(instanceName).build();
configAccessor.remove(instanceScope, record);
break;
default:
return badRequest(String.format("Unsupported command: %s", command));
}
} catch (IllegalArgumentException ex) {
LOG.error(String.format("Invalid topology setting for Instance : {}. Fail the config update",
instanceName), ex);
return serverError(ex);
} catch (HelixException ex) {
return notFound(ex.getMessage());
} catch (Exception ex) {
LOG.error(String.format("Error in update instance config for instance: %s", instanceName),
ex);
return serverError(ex);
}
return OK();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("resources")
public Response getResourcesOnInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
ObjectNode root = JsonNodeFactory.instance.objectNode();
root.put(Properties.id.name(), instanceName);
ArrayNode resourcesNode = root.putArray(PerInstanceProperties.resources.name());
List<String> liveInstances = accessor.getChildNames(accessor.keyBuilder().liveInstances());
if (!liveInstances.contains(instanceName)) {
return null;
}
LiveInstance liveInstance =
accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName));
// get the current session id
String currentSessionId = liveInstance.getEphemeralOwner();
List<String> resources =
accessor.getChildNames(accessor.keyBuilder().currentStates(instanceName, currentSessionId));
resources.addAll(accessor
.getChildNames(accessor.keyBuilder().taskCurrentStates(instanceName, currentSessionId)));
if (resources.size() > 0) {
resourcesNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(resources));
}
return JSONRepresentation(root);
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET @Path("resources/{resourceName}")
public Response getResourceOnInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName,
@PathParam("resourceName") String resourceName) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
List<String> liveInstances = accessor.getChildNames(accessor.keyBuilder().liveInstances());
if (!liveInstances.contains(instanceName)) {
return notFound();
}
LiveInstance liveInstance =
accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName));
// get the current session id
String currentSessionId = liveInstance.getEphemeralOwner();
CurrentState resourceCurrentState = accessor.getProperty(
accessor.keyBuilder().currentState(instanceName, currentSessionId, resourceName));
if (resourceCurrentState == null) {
resourceCurrentState = accessor.getProperty(
accessor.keyBuilder().taskCurrentState(instanceName, currentSessionId, resourceName));
}
if (resourceCurrentState != null) {
return JSONRepresentation(resourceCurrentState.getRecord());
}
return notFound();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("errors")
public Response getErrorsOnInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
ObjectNode root = JsonNodeFactory.instance.objectNode();
root.put(Properties.id.name(), instanceName);
ObjectNode errorsNode = JsonNodeFactory.instance.objectNode();
List<String> sessionIds = accessor.getChildNames(accessor.keyBuilder().errors(instanceName));
if (sessionIds == null || sessionIds.size() == 0) {
return notFound();
}
for (String sessionId : sessionIds) {
List<String> resources =
accessor.getChildNames(accessor.keyBuilder().errors(instanceName, sessionId));
if (resources != null) {
ObjectNode resourcesNode = JsonNodeFactory.instance.objectNode();
for (String resourceName : resources) {
List<String> partitions = accessor
.getChildNames(accessor.keyBuilder().errors(instanceName, sessionId, resourceName));
if (partitions != null) {
ArrayNode partitionsNode = resourcesNode.putArray(resourceName);
partitionsNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(partitions));
}
}
errorsNode.put(sessionId, resourcesNode);
}
}
root.put(PerInstanceProperties.errors.name(), errorsNode);
return JSONRepresentation(root);
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("errors/{sessionId}/{resourceName}/{partitionName}")
public Response getErrorsOnInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName, @PathParam("sessionId") String sessionId,
@PathParam("resourceName") String resourceName,
@PathParam("partitionName") String partitionName) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
Error error = accessor.getProperty(accessor.keyBuilder()
.stateTransitionError(instanceName, sessionId, resourceName, partitionName));
if (error != null) {
return JSONRepresentation(error.getRecord());
}
return notFound();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("history")
public Response getHistoryOnInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
ParticipantHistory history =
accessor.getProperty(accessor.keyBuilder().participantHistory(instanceName));
if (history != null) {
return JSONRepresentation(history.getRecord());
}
return notFound();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("messages")
public Response getMessagesOnInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName,
@QueryParam("stateModelDef") String stateModelDef) {
HelixDataAccessor accessor = getDataAccssor(clusterId);
ObjectNode root = JsonNodeFactory.instance.objectNode();
root.put(Properties.id.name(), instanceName);
ArrayNode newMessages = root.putArray(PerInstanceProperties.new_messages.name());
ArrayNode readMessages = root.putArray(PerInstanceProperties.read_messages.name());
List<String> messageNames =
accessor.getChildNames(accessor.keyBuilder().messages(instanceName));
if (messageNames == null || messageNames.size() == 0) {
LOG.warn("Unable to get any messages on instance: " + instanceName);
return notFound();
}
for (String messageName : messageNames) {
Message message = accessor.getProperty(accessor.keyBuilder().message(instanceName, messageName));
if (message == null) {
LOG.warn("Message is deleted given message name: ", messageName);
continue;
}
// if stateModelDef is valid, keep messages with StateModelDef equals to the parameter
if (StringUtil.isNotBlank(stateModelDef) && !stateModelDef.equals(message.getStateModelDef())) {
continue;
}
if (Message.MessageState.NEW.equals(message.getMsgState())) {
newMessages.add(messageName);
} else if (Message.MessageState.READ.equals(message.getMsgState())) {
readMessages.add(messageName);
}
}
root.put(PerInstanceProperties.total_message_count.name(),
newMessages.size() + readMessages.size());
root.put(PerInstanceProperties.read_message_count.name(), readMessages.size());
return JSONRepresentation(root);
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("messages/{messageId}")
public Response getMessageOnInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName,
@PathParam("messageId") String messageId) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
Message message = accessor.getProperty(accessor.keyBuilder().message(instanceName, messageId));
if (message != null) {
return JSONRepresentation(message.getRecord());
}
return notFound();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("healthreports")
public Response getHealthReportsOnInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
ObjectNode root = JsonNodeFactory.instance.objectNode();
root.put(Properties.id.name(), instanceName);
ArrayNode healthReportsNode = root.putArray(PerInstanceProperties.healthreports.name());
List<String> healthReports =
accessor.getChildNames(accessor.keyBuilder().healthReports(instanceName));
if (healthReports != null && healthReports.size() > 0) {
healthReportsNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(healthReports));
}
return JSONRepresentation(root);
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("healthreports/{reportName}")
public Response getHealthReportsOnInstance(
@PathParam("clusterId") String clusterId, @PathParam("instanceName") String instanceName,
@PathParam("reportName") String reportName) throws IOException {
HelixDataAccessor accessor = getDataAccssor(clusterId);
HealthStat healthStat =
accessor.getProperty(accessor.keyBuilder().healthReport(instanceName, reportName));
if (healthStat != null) {
return JSONRepresentation(healthStat);
}
return notFound();
}
private boolean validInstance(JsonNode node, String instanceName) {
return instanceName.equals(node.get(Properties.id.name()).textValue());
}
private boolean validateDeltaTopologySettingInInstanceConfig(String clusterName,
String instanceName, ConfigAccessor configAccessor, InstanceConfig newInstanceConfig,
Command command) {
InstanceConfig originalInstanceConfigCopy =
configAccessor.getInstanceConfig(clusterName, instanceName);
if (command == Command.delete) {
for (Map.Entry<String, String> entry : newInstanceConfig.getRecord().getSimpleFields()
.entrySet()) {
originalInstanceConfigCopy.getRecord().getSimpleFields().remove(entry.getKey());
}
} else {
originalInstanceConfigCopy.getRecord().update(newInstanceConfig.getRecord());
}
return originalInstanceConfigCopy
.validateTopologySettingInInstanceConfig(configAccessor.getClusterConfig(clusterName),
instanceName);
}
}
| 9,362 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/ResourceAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.model.CustomizedView;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ClusterAuth
@Path("/clusters/{clusterId}/resources")
public class ResourceAccessor extends AbstractHelixResource {
private final static Logger _logger = LoggerFactory.getLogger(ResourceAccessor.class);
public enum ResourceProperties {
idealState,
idealStates,
externalView,
externalViews,
resourceConfig,
}
public enum HealthStatus {
HEALTHY,
PARTIAL_HEALTHY,
UNHEALTHY
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
public Response getResources(@PathParam("clusterId") String clusterId) {
ObjectNode root = JsonNodeFactory.instance.objectNode();
root.put(Properties.id.name(), JsonNodeFactory.instance.textNode(clusterId));
RealmAwareZkClient zkClient = getRealmAwareZkClient();
ArrayNode idealStatesNode = root.putArray(ResourceProperties.idealStates.name());
ArrayNode externalViewsNode = root.putArray(ResourceProperties.externalViews.name());
List<String> idealStates = zkClient.getChildren(PropertyPathBuilder.idealState(clusterId));
List<String> externalViews = zkClient.getChildren(PropertyPathBuilder.externalView(clusterId));
if (idealStates != null) {
idealStatesNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(idealStates));
} else {
return notFound();
}
if (externalViews != null) {
externalViewsNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(externalViews));
}
return JSONRepresentation(root);
}
/**
* Returns health profile of all resources in the cluster
* @param clusterId
* @return JSON result
*/
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("health")
public Response getResourceHealth(@PathParam("clusterId") String clusterId) {
RealmAwareZkClient zkClient = getRealmAwareZkClient();
List<String> resourcesInIdealState =
zkClient.getChildren(PropertyPathBuilder.idealState(clusterId));
List<String> resourcesInExternalView =
zkClient.getChildren(PropertyPathBuilder.externalView(clusterId));
Map<String, String> resourceHealthResult = new HashMap<>();
for (String resourceName : resourcesInIdealState) {
if (resourcesInExternalView.contains(resourceName)) {
Map<String, String> partitionHealth = computePartitionHealth(clusterId, resourceName);
if (partitionHealth.isEmpty()
|| partitionHealth.values().contains(HealthStatus.UNHEALTHY.name())) {
// No partitions for a resource or there exists one or more UNHEALTHY partitions in this
// resource, UNHEALTHY
resourceHealthResult.put(resourceName, HealthStatus.UNHEALTHY.name());
} else if (partitionHealth.values().contains(HealthStatus.PARTIAL_HEALTHY.name())) {
// No UNHEALTHY partition, but one or more partially healthy partitions, resource is
// partially healthy
resourceHealthResult.put(resourceName, HealthStatus.PARTIAL_HEALTHY.name());
} else {
// No UNHEALTHY or partially healthy partitions and non-empty, resource is healthy
resourceHealthResult.put(resourceName, HealthStatus.HEALTHY.name());
}
} else {
// If a resource is not in ExternalView, then it is UNHEALTHY
resourceHealthResult.put(resourceName, HealthStatus.UNHEALTHY.name());
}
}
return JSONRepresentation(resourceHealthResult);
}
/**
* Returns health profile of all partitions for the corresponding resource in the cluster
* @param clusterId
* @param resourceName
* @return JSON result
* @throws IOException
*/
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{resourceName}/health")
public Response getPartitionHealth(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName) {
return JSONRepresentation(computePartitionHealth(clusterId, resourceName));
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{resourceName}")
public Response getResource(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName,
@DefaultValue("getResource") @QueryParam("command") String command) {
// Get the command. If not provided, the default would be "getResource"
Command cmd;
try {
cmd = Command.valueOf(command);
} catch (Exception e) {
return badRequest("Invalid command : " + command);
}
ConfigAccessor accessor = getConfigAccessor();
HelixAdmin admin = getHelixAdmin();
switch (cmd) {
case getResource:
ResourceConfig resourceConfig = accessor.getResourceConfig(clusterId, resourceName);
IdealState idealState = admin.getResourceIdealState(clusterId, resourceName);
ExternalView externalView = admin.getResourceExternalView(clusterId, resourceName);
Map<String, ZNRecord> resourceMap = new HashMap<>();
if (idealState != null) {
resourceMap.put(ResourceProperties.idealState.name(), idealState.getRecord());
} else {
return notFound();
}
resourceMap.put(ResourceProperties.resourceConfig.name(), null);
resourceMap.put(ResourceProperties.externalView.name(), null);
if (resourceConfig != null) {
resourceMap.put(ResourceProperties.resourceConfig.name(), resourceConfig.getRecord());
}
if (externalView != null) {
resourceMap.put(ResourceProperties.externalView.name(), externalView.getRecord());
}
return JSONRepresentation(resourceMap);
case validateWeight:
// Validate ResourceConfig for WAGED rebalance
Map<String, Boolean> validationResultMap;
try {
validationResultMap = admin.validateResourcesForWagedRebalance(clusterId,
Collections.singletonList(resourceName));
} catch (HelixException e) {
return badRequest(e.getMessage());
}
return JSONRepresentation(validationResultMap);
default:
_logger.error("Unsupported command :" + command);
return badRequest("Unsupported command :" + command);
}
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@PUT
@Path("{resourceName}")
public Response addResource(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName,
@DefaultValue("-1") @QueryParam("numPartitions") int numPartitions,
@DefaultValue("") @QueryParam("stateModelRef") String stateModelRef,
@DefaultValue("SEMI_AUTO") @QueryParam("rebalancerMode") String rebalancerMode,
@DefaultValue("DEFAULT") @QueryParam("rebalanceStrategy") String rebalanceStrategy,
@DefaultValue("0") @QueryParam("bucketSize") int bucketSize,
@DefaultValue("-1") @QueryParam("maxPartitionsPerInstance") int maxPartitionsPerInstance,
@DefaultValue("addResource") @QueryParam("command") String command, String content) {
// Get the command. If not provided, the default would be "addResource"
Command cmd;
try {
cmd = Command.valueOf(command);
} catch (Exception e) {
return badRequest("Invalid command : " + command);
}
HelixAdmin admin = getHelixAdmin();
try {
switch (cmd) {
case addResource:
if (content.length() != 0) {
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
_logger.error("Failed to deserialize user's input " + content + ", Exception: " + e);
return badRequest("Input is not a valid ZNRecord!");
}
if (record.getSimpleFields() != null) {
admin.addResource(clusterId, resourceName, new IdealState(record));
}
} else {
admin.addResource(clusterId, resourceName, numPartitions, stateModelRef, rebalancerMode,
rebalanceStrategy, bucketSize, maxPartitionsPerInstance);
}
break;
case addWagedResource:
// Check if content is valid
if (content == null || content.length() == 0) {
_logger.error("Input is null or empty!");
return badRequest("Input is null or empty!");
}
Map<String, ZNRecord> input;
// Content must supply both IdealState and ResourceConfig
try {
TypeReference<Map<String, ZNRecord>> typeRef =
new TypeReference<Map<String, ZNRecord>>() {
};
input = ZNRECORD_READER.forType(typeRef).readValue(content);
} catch (IOException e) {
_logger.error("Failed to deserialize user's input {}, Exception: {}", content, e);
return badRequest("Input is not a valid map of String-ZNRecord pairs!");
}
// Check if the map contains both IdealState and ResourceConfig
ZNRecord idealStateRecord =
input.get(ResourceAccessor.ResourceProperties.idealState.name());
ZNRecord resourceConfigRecord =
input.get(ResourceAccessor.ResourceProperties.resourceConfig.name());
if (idealStateRecord == null || resourceConfigRecord == null) {
_logger.error("Input does not contain both IdealState and ResourceConfig!");
return badRequest("Input does not contain both IdealState and ResourceConfig!");
}
// Add using HelixAdmin API
try {
admin.addResourceWithWeight(clusterId, new IdealState(idealStateRecord),
new ResourceConfig(resourceConfigRecord));
} catch (HelixException e) {
String errMsg = String.format("Failed to add resource %s with weight in cluster %s!",
idealStateRecord.getId(), clusterId);
_logger.error(errMsg, e);
return badRequest(errMsg);
}
break;
default:
_logger.error("Unsupported command :" + command);
return badRequest("Unsupported command :" + command);
}
} catch (Exception e) {
_logger.error("Error in adding a resource: " + resourceName, e);
return serverError(e);
}
return OK();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{resourceName}")
public Response updateResource(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName, @QueryParam("command") String command,
@DefaultValue("-1") @QueryParam("replicas") int replicas,
@DefaultValue("") @QueryParam("keyPrefix") String keyPrefix,
@DefaultValue("") @QueryParam("group") String group) {
Command cmd;
try {
cmd = Command.valueOf(command);
} catch (Exception e) {
return badRequest("Invalid command : " + command);
}
HelixAdmin admin = getHelixAdmin();
try {
switch (cmd) {
case enable:
admin.enableResource(clusterId, resourceName, true);
break;
case disable:
admin.enableResource(clusterId, resourceName, false);
break;
case rebalance:
if (replicas == -1) {
return badRequest("Number of replicas is needed for rebalancing!");
}
keyPrefix = keyPrefix.length() == 0 ? resourceName : keyPrefix;
admin.rebalance(clusterId, resourceName, replicas, keyPrefix, group);
break;
case enableWagedRebalance:
try {
admin.enableWagedRebalance(clusterId, Collections.singletonList(resourceName));
} catch (HelixException e) {
return badRequest(e.getMessage());
}
break;
default:
_logger.error("Unsupported command :" + command);
return badRequest("Unsupported command :" + command);
}
} catch (Exception e) {
_logger.error("Failed in updating resource : " + resourceName, e);
return badRequest(e.getMessage());
}
return OK();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@DELETE
@Path("{resourceName}")
public Response deleteResource(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName) {
HelixAdmin admin = getHelixAdmin();
try {
admin.dropResource(clusterId, resourceName);
} catch (Exception e) {
_logger.error("Error in deleting a resource: " + resourceName, e);
return serverError();
}
return OK();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{resourceName}/configs")
public Response getResourceConfig(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName) {
ConfigAccessor accessor = getConfigAccessor();
ResourceConfig resourceConfig = accessor.getResourceConfig(clusterId, resourceName);
if (resourceConfig != null) {
return JSONRepresentation(resourceConfig.getRecord());
}
return notFound();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{resourceName}/configs")
public Response updateResourceConfig(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName, @QueryParam("command") String commandStr,
String content) {
Command command;
if (commandStr == null || commandStr.isEmpty()) {
command = Command.update; // Default behavior to keep it backward-compatible
} else {
try {
command = getCommand(commandStr);
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
}
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
_logger.error("Failed to deserialize user's input " + content + ", Exception: " + e);
return badRequest("Input is not a valid ZNRecord!");
}
if (!resourceName.equals(record.getId())) {
return badRequest("ID does not match the resourceName name in input!");
}
ResourceConfig resourceConfig = new ResourceConfig(record);
ConfigAccessor configAccessor = getConfigAccessor();
try {
switch (command) {
case update:
configAccessor.updateResourceConfig(clusterId, resourceName, resourceConfig);
break;
case delete:
HelixConfigScope resourceScope =
new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.RESOURCE)
.forCluster(clusterId).forResource(resourceName).build();
configAccessor.remove(resourceScope, record);
break;
default:
return badRequest(String.format("Unsupported command: %s", command));
}
} catch (HelixException ex) {
return notFound(ex.getMessage());
} catch (Exception ex) {
_logger.error(String.format("Error in update resource config for resource: %s", resourceName),
ex);
return serverError(ex);
}
return OK();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{resourceName}/idealState")
public Response getResourceIdealState(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName) {
HelixAdmin admin = getHelixAdmin();
IdealState idealState = admin.getResourceIdealState(clusterId, resourceName);
if (idealState != null) {
return JSONRepresentation(idealState.getRecord());
}
return notFound();
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{resourceName}/idealState")
public Response updateResourceIdealState(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName, @QueryParam("command") String commandStr,
String content) {
Command command;
if (commandStr == null || commandStr.isEmpty()) {
command = Command.update; // Default behavior is update
} else {
try {
command = getCommand(commandStr);
} catch (HelixException ex) {
return badRequest(ex.getMessage());
}
}
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
_logger.error("Failed to deserialize user's input " + content + ", Exception: " + e);
return badRequest("Input is not a valid ZNRecord!");
}
IdealState idealState = new IdealState(record);
HelixAdmin helixAdmin = getHelixAdmin();
try {
switch (command) {
case update:
helixAdmin.updateIdealState(clusterId, resourceName, idealState);
break;
case delete: {
helixAdmin.removeFromIdealState(clusterId, resourceName, idealState);
}
break;
default:
return badRequest(String.format("Unsupported command: %s", command));
}
} catch (HelixException ex) {
return notFound(ex.getMessage()); // HelixAdmin throws a HelixException if it doesn't
// exist already
} catch (Exception ex) {
_logger.error(String.format("Failed to update the IdealState for resource: %s", resourceName),
ex);
return serverError(ex);
}
return OK();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{resourceName}/externalView")
public Response getResourceExternalView(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName) {
HelixAdmin admin = getHelixAdmin();
ExternalView externalView = admin.getResourceExternalView(clusterId, resourceName);
if (externalView != null) {
return JSONRepresentation(externalView.getRecord());
}
return notFound();
}
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{resourceName}/{customizedStateType}/customizedView")
public Response getResourceCustomizedView(@PathParam("clusterId") String clusterId,
@PathParam("resourceName") String resourceName,
@PathParam("customizedStateType") String customizedStateType) {
HelixAdmin admin = getHelixAdmin();
CustomizedView customizedView =
admin.getResourceCustomizedView(clusterId, resourceName, customizedStateType);
if (customizedView != null) {
return JSONRepresentation(customizedView.getRecord());
}
return notFound();
}
private Map<String, String> computePartitionHealth(String clusterId, String resourceName) {
HelixAdmin admin = getHelixAdmin();
IdealState idealState = admin.getResourceIdealState(clusterId, resourceName);
ExternalView externalView = admin.getResourceExternalView(clusterId, resourceName);
StateModelDefinition stateModelDef =
admin.getStateModelDef(clusterId, idealState.getStateModelDefRef());
String initialState = stateModelDef.getInitialState();
List<String> statesPriorityList = stateModelDef.getStatesPriorityList();
statesPriorityList = statesPriorityList.subList(0, statesPriorityList.indexOf(initialState)); // Trim
// stateList
// to
// initialState
// and
// above
int minActiveReplicas = idealState.getMinActiveReplicas();
// Start the logic that determines the health status of each partition
Map<String, String> partitionHealthResult = new HashMap<>();
Set<String> allPartitionNames = idealState.getPartitionSet();
if (!allPartitionNames.isEmpty()) {
for (String partitionName : allPartitionNames) {
int replicaCount =
idealState.getReplicaCount(idealState.getPreferenceList(partitionName).size());
// Simplify expectedStateCountMap by assuming that all instances are available to reduce
// computation load on this REST endpoint
LinkedHashMap<String, Integer> expectedStateCountMap =
stateModelDef.getStateCountMap(replicaCount, replicaCount);
// Extract all states into Collections from ExternalView
Map<String, String> stateMapInExternalView = externalView.getStateMap(partitionName);
Collection<String> allReplicaStatesInExternalView =
(stateMapInExternalView != null && !stateMapInExternalView.isEmpty())
? stateMapInExternalView.values()
: Collections.<String> emptyList();
int numActiveReplicasInExternalView = 0;
HealthStatus status = HealthStatus.HEALTHY;
// Go through all states that are "active" states (higher priority than InitialState)
for (int statePriorityIndex = 0; statePriorityIndex < statesPriorityList
.size(); statePriorityIndex++) {
String currentState = statesPriorityList.get(statePriorityIndex);
int currentStateCountInIdealState = expectedStateCountMap.get(currentState);
int currentStateCountInExternalView =
Collections.frequency(allReplicaStatesInExternalView, currentState);
numActiveReplicasInExternalView += currentStateCountInExternalView;
// Top state counts must match, if not, unhealthy
if (statePriorityIndex == 0
&& currentStateCountInExternalView != currentStateCountInIdealState) {
status = HealthStatus.UNHEALTHY;
break;
} else if (currentStateCountInExternalView < currentStateCountInIdealState) {
// For non-top states, if count in ExternalView is less than count in IdealState,
// partially healthy
status = HealthStatus.PARTIAL_HEALTHY;
}
}
if (numActiveReplicasInExternalView < minActiveReplicas) {
// If this partition does not satisfy the number of minimum active replicas, unhealthy
status = HealthStatus.UNHEALTHY;
}
partitionHealthResult.put(partitionName, status.name());
}
}
return partitionHealthResult;
}
}
| 9,363 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/TaskAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.core.type.TypeReference;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.ClusterAuth;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.zookeeper.zkclient.exception.ZkNoNodeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ClusterAuth
@Path("/clusters/{clusterId}/workflows/{workflowName}/jobs/{jobName}/tasks")
public class TaskAccessor extends AbstractHelixResource {
private static Logger _logger = LoggerFactory.getLogger(TaskAccessor.class.getName());
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
@Path("{taskPartitionId}/userContent")
public Response getTaskUserContent(
@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName,
@PathParam("jobName") String jobName,
@PathParam("taskPartitionId") String taskPartitionid
) {
TaskDriver taskDriver = getTaskDriver(clusterId);
try {
Map<String, String> contentStore =
taskDriver.getTaskUserContentMap(workflowName, jobName, taskPartitionid);
if (contentStore == null) {
return notFound(String.format(
"Unable to find content store. Workflow (%s) or Job (%s) or Task content store (%s) not created yet.",
workflowName, jobName, taskPartitionid));
}
return JSONRepresentation(contentStore);
} catch (ZkNoNodeException e) {
return notFound(String.format(
"Unable to find content store. Workflow (%s) or Job (%s) not created yet.",
workflowName, jobName));
} catch (Exception e) {
return serverError(e);
}
}
@ResponseMetered(name = HttpConstants.WRITE_REQUEST)
@Timed(name = HttpConstants.WRITE_REQUEST)
@POST
@Path("{taskPartitionId}/userContent")
public Response updateTaskUserContent(
@PathParam("clusterId") String clusterId,
@PathParam("workflowName") String workflowName,
@PathParam("jobName") String jobName,
@PathParam("taskPartitionId") String taskPartitionid,
@QueryParam("command") String commandStr,
String content
) {
Command cmd;
Map<String, String> contentMap = Collections.emptyMap();
try {
contentMap = OBJECT_MAPPER.readValue(content, new TypeReference<Map<String, String>>() {
});
} catch (IOException e) {
return badRequest(String
.format("Content %s cannot be deserialized to Map<String, String>. Err: %s", content,
e.getMessage()));
}
try {
cmd = (commandStr == null || commandStr.isEmpty())
? Command.update
: Command.valueOf(commandStr);
} catch (IllegalArgumentException ie) {
return badRequest(String.format("Invalid command: %s. Err: %s", commandStr, ie.getMessage()));
}
TaskDriver driver = getTaskDriver(clusterId);
try {
switch (cmd) {
case update:
driver.addOrUpdateTaskUserContentMap(workflowName, jobName, taskPartitionid, contentMap);
return OK();
default:
return badRequest(String.format("Command \"%s\" is not supported!", cmd));
}
} catch (NullPointerException npe) {
// ZkCacheBasedDataAccessor would throw npe if workflow or job does not exist
return notFound(
String.format("Workflow (%s) or job (%s) does not exist", workflowName, jobName));
} catch (Exception e) {
_logger.error("Failed to update user content store", e);
return serverError(e);
}
}
}
| 9,364 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/helix/MetadataAccessor.java
|
package org.apache.helix.rest.server.resources.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.common.HelixRestUtils;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.NamespaceAuth;
import org.apache.helix.rest.server.resources.AbstractResource;
@NamespaceAuth
@Path("")
public class MetadataAccessor extends AbstractResource {
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
public Response getMetadata() {
if (HelixRestUtils.isDefaultServlet(_servletRequest.getServletPath())) {
// To keep API endpoints to behave the same, if user call /admin/v2/ ,
// we will return NotFound
return notFound();
}
// This will be the root of all namespaced servlets, and returns
// servlet namespace information
HelixRestNamespace namespace = (HelixRestNamespace) _application.getProperties().get(
ContextPropertyKeys.METADATA.name());
return JSONRepresentation(namespace.getRestInfo());
}
}
| 9,365 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/resources/metadata/NamespacesAccessor.java
|
package org.apache.helix.rest.server.resources.metadata;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Response;
import com.codahale.metrics.annotation.ResponseMetered;
import com.codahale.metrics.annotation.Timed;
import org.apache.helix.rest.common.ContextPropertyKeys;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.server.filters.NamespaceAuth;
import org.apache.helix.rest.server.resources.AbstractResource;
@NamespaceAuth
@Path("/namespaces")
public class NamespacesAccessor extends AbstractResource {
@ResponseMetered(name = HttpConstants.READ_REQUEST)
@Timed(name = HttpConstants.READ_REQUEST)
@GET
public Response getHelixRestNamespaces() {
@SuppressWarnings("unchecked")
List<HelixRestNamespace> allNamespaces =
(List<HelixRestNamespace>) _application.getProperties()
.get(ContextPropertyKeys.ALL_NAMESPACES.name());
List<Map<String, String>> ret = new ArrayList<>();
for (HelixRestNamespace namespace : allNamespaces) {
ret.add(namespace.getRestInfo());
}
return JSONRepresentation(ret);
}
}
| 9,366 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/json
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/json/cluster/ClusterInfo.java
|
package org.apache.helix.rest.server.json.cluster;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import com.fasterxml.jackson.annotation.JsonProperty;
public class ClusterInfo {
@JsonProperty("id")
private final String id;
@JsonProperty("controller")
private final String controller;
@JsonProperty("paused")
private final boolean paused;
@JsonProperty("maintenance")
private final boolean maintenance;
@JsonProperty("resources")
private final List<String> idealStates;
@JsonProperty("instances")
private final List<String> instances;
@JsonProperty("liveInstances")
private final List<String> liveInstances;
private ClusterInfo(Builder builder) {
id = builder.id;
controller = builder.controller;
paused = builder.paused;
maintenance = builder.maintenance;
idealStates = builder.idealStates;
instances = builder.instances;
liveInstances = builder.liveInstances;
}
public static final class Builder {
private String id;
private String controller;
private boolean paused;
private boolean maintenance;
private List<String> idealStates;
private List<String> instances;
private List<String> liveInstances;
public Builder(String id) {
this.id = id;
}
public Builder controller(String controller) {
this.controller = controller;
return this;
}
public Builder paused(boolean paused) {
this.paused = paused;
return this;
}
public Builder maintenance(boolean maintenance) {
this.maintenance = maintenance;
return this;
}
public Builder idealStates(List<String> idealStates) {
this.idealStates = idealStates;
return this;
}
public Builder instances(List<String> instances) {
this.instances = instances;
return this;
}
public Builder liveInstances(List<String> liveInstances) {
this.liveInstances = liveInstances;
return this;
}
public ClusterInfo build() {
return new ClusterInfo(this);
}
}
}
| 9,367 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/json
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/json/cluster/ClusterTopology.java
|
package org.apache.helix.rest.server.json.cluster;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* POJO class that can be easily convert to JSON object
* The Cluster Topology represents the hierarchy of the cluster:
* Cluster
* - Zone
* -- Rack(Optional)
* --- Instance
* Each layer consists its id and metadata
*/
public class ClusterTopology {
@JsonProperty("id")
private final String clusterId;
@JsonProperty("zones")
private List<Zone> zones;
@JsonProperty("allInstances")
private Set<String> allInstances;
public ClusterTopology(String clusterId, List<Zone> zones, Set<String> allInstances) {
this.clusterId = clusterId;
this.zones = zones;
this.allInstances = allInstances;
}
public String getClusterId() {
return clusterId;
}
public List<Zone> getZones() {
return zones;
}
public Set<String> getAllInstances() {
return allInstances;
}
public static final class Zone {
@JsonProperty("id")
private final String id;
@JsonProperty("instances")
private List<Instance> instances;
public Zone(String id) {
this.id = id;
}
public Zone(String id, List<Instance> instances) {
this.id = id;
this.instances = instances;
}
public List<Instance> getInstances() {
return instances;
}
public void setInstances(List<Instance> instances) {
this.instances = instances;
}
public String getId() {
return id;
}
}
public static final class Instance {
@JsonProperty("id")
private final String id;
public Instance(String id) {
this.id = id;
}
public String getId() {
return id;
}
}
public Map<String, Set<String>> toZoneMapping() {
Map<String, Set<String>> zoneMapping = new HashMap<>();
if (zones == null) {
return Collections.emptyMap();
}
for (ClusterTopology.Zone zone : zones) {
zoneMapping.put(zone.getId(), new HashSet<String>());
if (zone.getInstances() != null) {
for (ClusterTopology.Instance instance : zone.getInstances()) {
zoneMapping.get(zone.getId()).add(instance.getId());
}
}
}
return zoneMapping;
}
}
| 9,368 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/json
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/json/instance/StoppableCheck.java
|
package org.apache.helix.rest.server.json.instance;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.Maps;
public class StoppableCheck {
// Category to differentiate which step the check fails
public enum Category {
HELIX_OWN_CHECK("HELIX:"),
CUSTOM_INSTANCE_CHECK("CUSTOM_INSTANCE_HEALTH_FAILURE:"),
CUSTOM_PARTITION_CHECK("CUSTOM_PARTITION_HEALTH_FAILURE:");
String prefix;
Category(String prefix) {
this.prefix = prefix;
}
public String getPrefix() {
return prefix;
}
public static Set<Category> categorySetFromCommaSeperatedString(String categories)
throws IllegalArgumentException {
return Arrays.stream(categories.split(",")).map(StoppableCheck.Category::valueOf)
.collect(Collectors.toSet());
}
}
@JsonProperty("stoppable")
private boolean isStoppable;
// The list of failed checks should be sorted to make test consistent pass
@JsonProperty("failedChecks")
private List<String> failedChecks;
public StoppableCheck(boolean isStoppable, List<String> failedChecks, Category category) {
this.isStoppable = isStoppable;
this.failedChecks = failedChecks.stream()
.sorted()
.map(checkName -> appendPrefix(checkName, category))
.collect(Collectors.toList());
}
public StoppableCheck(Map<String, Boolean> checks, Category category) {
this.failedChecks = Maps.filterValues(checks, Boolean.FALSE::equals).keySet()
.stream()
.sorted()
.map(checkName -> appendPrefix(checkName, category))
.collect(Collectors.toList());
this.isStoppable = this.failedChecks.isEmpty();
}
private String appendPrefix(String checkName, Category category) {
return category.prefix + checkName;
}
public boolean isStoppable() {
return isStoppable;
}
public List<String> getFailedChecks() {
return failedChecks;
}
public void add(StoppableCheck other) {
failedChecks.addAll(other.getFailedChecks());
isStoppable = failedChecks.isEmpty();
}
}
| 9,369 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/json
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/json/instance/InstanceInfo.java
|
package org.apache.helix.rest.server.json.instance;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@JsonInclude(JsonInclude.Include.NON_EMPTY)
public class InstanceInfo {
private static final Logger _logger = LoggerFactory.getLogger(InstanceInfo.class);
@JsonProperty("id")
private final String id;
@JsonProperty("liveInstance")
private final ZNRecord liveInstance;
@JsonProperty("config")
private final ZNRecord instanceConfig;
@JsonProperty("partitions")
private final List<String> partitions;
@JsonInclude(JsonInclude.Include.NON_EMPTY)
@JsonProperty("resources")
private final List<String> resources;
@JsonProperty("health")
private final boolean isHealth;
@JsonProperty("failedHealthChecks")
private final List<String> failedHealthChecks;
private InstanceInfo(Builder builder) {
id = builder.id;
liveInstance = builder.liveInstance;
instanceConfig = builder.instanceConfig;
partitions = builder.partitions;
resources = builder.resources;
isHealth = builder.isHealth;
failedHealthChecks = builder.failedHealthChecks;
}
public static final class Builder {
private String id;
private ZNRecord liveInstance;
private ZNRecord instanceConfig;
private List<String> partitions;
private List<String> resources;
private boolean isHealth;
private List<String> failedHealthChecks;
public Builder(String id) {
this.id = id;
}
public Builder liveInstance(ZNRecord liveInstance) {
this.liveInstance = liveInstance;
return this;
}
public Builder instanceConfig(ZNRecord instanceConfig) {
this.instanceConfig = instanceConfig;
return this;
}
public Builder partitions(List<String> partitions) {
this.partitions = partitions;
return this;
}
public Builder resources(List<String> resources) {
this.resources = resources;
return this;
}
public Builder healthStatus(Map<String, Boolean> healthChecks) {
this.failedHealthChecks = new ArrayList<>();
for (String healthCheck : healthChecks.keySet()) {
if (!healthChecks.get(healthCheck)) {
_logger.warn("Health Check {} failed", healthCheck);
this.failedHealthChecks.add(healthCheck);
}
}
this.isHealth = this.failedHealthChecks.isEmpty();
return this;
}
public Builder healthStatus(boolean isHealth) {
this.isHealth = isHealth;
return this;
}
public InstanceInfo build() {
return new InstanceInfo(this);
}
}
}
| 9,370 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/authValidator/AuthValidator.java
|
package org.apache.helix.rest.server.authValidator;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import javax.ws.rs.container.ContainerRequestContext;
public interface AuthValidator {
boolean validate(ContainerRequestContext request);
}
| 9,371 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/authValidator/NoopAuthValidator.java
|
package org.apache.helix.rest.server.authValidator;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import javax.ws.rs.container.ContainerRequestContext;
public class NoopAuthValidator implements AuthValidator {
public boolean validate(ContainerRequestContext request) {
return true;
}
}
| 9,372 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/service/VirtualTopologyGroupService.java
|
package org.apache.helix.rest.server.service;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.helix.AccessOption;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.cloud.constants.VirtualTopologyGroupConstants;
import org.apache.helix.cloud.topology.FifoVirtualGroupAssignmentAlgorithm;
import org.apache.helix.cloud.topology.VirtualGroupAssignmentAlgorithm;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ClusterTopologyConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.rest.server.json.cluster.ClusterTopology;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Service for virtual topology group.
* It's a virtualization layer on top of physical fault domain and topology in cloud environments.
* The service computes the mapping from virtual group to instances based on the current cluster topology and update the
* information to cluster and all instances in the cluster.
*/
public class VirtualTopologyGroupService {
private static final Logger LOG = LoggerFactory.getLogger(VirtualTopologyGroupService.class);
private final HelixAdmin _helixAdmin;
private final ClusterService _clusterService;
private final ConfigAccessor _configAccessor;
private final HelixDataAccessor _dataAccessor;
private final VirtualGroupAssignmentAlgorithm _assignmentAlgorithm;
public VirtualTopologyGroupService(HelixAdmin helixAdmin, ClusterService clusterService,
ConfigAccessor configAccessor, HelixDataAccessor dataAccessor) {
_helixAdmin = helixAdmin;
_clusterService = clusterService;
_configAccessor = configAccessor;
_dataAccessor = dataAccessor;
_assignmentAlgorithm = FifoVirtualGroupAssignmentAlgorithm.getInstance();
}
/**
* Add virtual topology group for a cluster.
* This includes calculating the virtual group assignment for all instances in the cluster then update instance config
* and cluster config. We override {@link ClusterConfig.ClusterConfigProperty#TOPOLOGY} and
* {@link ClusterConfig.ClusterConfigProperty#FAULT_ZONE_TYPE} for cluster config, and add new field to
* {@link InstanceConfig.InstanceConfigProperty#DOMAIN} that contains virtual topology group information.
* This is only supported for cloud environments. Cluster is expected to be in maintenance mode during config change.
* @param clusterName the cluster name.
* @param customFields custom fields, {@link VirtualTopologyGroupConstants#GROUP_NAME}
* and {@link VirtualTopologyGroupConstants#GROUP_NUMBER} are required,
* {@link VirtualTopologyGroupConstants#AUTO_MAINTENANCE_MODE_DISABLED} is optional.
* -- if set ture, the cluster will NOT automatically enter/exit maintenance mode during this API call;
* -- if set false or not set, the cluster will automatically enter maintenance mode and exit after
* the call succeeds. It won't proceed if the cluster is already in maintenance mode.
* Either case, the cluster must be in maintenance mode before config change.
*/
public void addVirtualTopologyGroup(String clusterName, Map<String, String> customFields) {
// validation
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(clusterName);
Preconditions.checkState(clusterConfig.isTopologyAwareEnabled(),
"Topology-aware rebalance is not enabled in cluster " + clusterName);
String groupName = customFields.get(VirtualTopologyGroupConstants.GROUP_NAME);
String groupNumberStr = customFields.get(VirtualTopologyGroupConstants.GROUP_NUMBER);
Preconditions.checkArgument(!StringUtils.isEmpty(groupName), "virtualTopologyGroupName cannot be empty!");
Preconditions.checkArgument(!StringUtils.isEmpty(groupNumberStr), "virtualTopologyGroupNumber cannot be empty!");
int numGroups = 0;
try {
numGroups = Integer.parseInt(groupNumberStr);
Preconditions.checkArgument(numGroups > 0, "Number of virtual groups should be positive.");
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("virtualTopologyGroupNumber " + groupNumberStr + " is not an integer.", ex);
}
LOG.info("Computing virtual topology group for cluster {} with param {}", clusterName, customFields);
// compute group assignment
ClusterTopology clusterTopology = _clusterService.getClusterTopology(clusterName);
Preconditions.checkArgument(numGroups <= clusterTopology.getAllInstances().size(),
"Number of virtual groups cannot be greater than the number of instances.");
Map<String, Set<String>> assignment =
_assignmentAlgorithm.computeAssignment(numGroups, groupName, clusterTopology.toZoneMapping());
boolean autoMaintenanceModeDisabled = Boolean.parseBoolean(
customFields.getOrDefault(VirtualTopologyGroupConstants.AUTO_MAINTENANCE_MODE_DISABLED, "false"));
// if auto mode is NOT disabled, let service enter maintenance mode and exit after the API succeeds.
if (!autoMaintenanceModeDisabled) {
Preconditions.checkState(!_helixAdmin.isInMaintenanceMode(clusterName),
"This operation is not allowed if cluster is already in maintenance mode before the API call. "
+ "Please set autoMaintenanceModeDisabled=true if this is intended.");
_helixAdmin.manuallyEnableMaintenanceMode(clusterName, true,
"Enable maintenanceMode for virtual topology group change.", customFields);
}
Preconditions.checkState(_helixAdmin.isInMaintenanceMode(clusterName),
"Cluster is not in maintenance mode. This is required for virtual topology group setting. "
+ "Please set autoMaintenanceModeDisabled=false (default) to let the cluster enter maintenance mode automatically, "
+ "or use autoMaintenanceModeDisabled=true and control cluster maintenance mode in client side.");
updateConfigs(clusterName, clusterConfig, assignment);
if (!autoMaintenanceModeDisabled) {
_helixAdmin.manuallyEnableMaintenanceMode(clusterName, false,
"Disable maintenanceMode after virtual topology group change.", customFields);
}
}
private void updateConfigs(String clusterName, ClusterConfig clusterConfig, Map<String, Set<String>> assignment) {
List<String> zkPaths = new ArrayList<>();
List<DataUpdater<ZNRecord>> updaters = new ArrayList<>();
createInstanceConfigUpdater(clusterName, assignment).forEach((zkPath, updater) -> {
zkPaths.add(zkPath);
updaters.add(updater);
});
// update instance config
boolean[] results = _dataAccessor.updateChildren(zkPaths, updaters, AccessOption.EPHEMERAL);
for (int i = 0; i < results.length; i++) {
if (!results[i]) {
throw new HelixException("Failed to update instance config for path " + zkPaths.get(i));
}
}
// update cluster config
String virtualTopologyString = computeVirtualTopologyString(clusterConfig);
clusterConfig.setTopology(virtualTopologyString);
clusterConfig.setFaultZoneType(VirtualTopologyGroupConstants.VIRTUAL_FAULT_ZONE_TYPE);
_configAccessor.updateClusterConfig(clusterName, clusterConfig);
LOG.info("Successfully update instance and cluster config for {}", clusterName);
}
@VisibleForTesting
static String computeVirtualTopologyString(ClusterConfig clusterConfig) {
ClusterTopologyConfig clusterTopologyConfig = ClusterTopologyConfig.createFromClusterConfig(clusterConfig);
String endNodeType = clusterTopologyConfig.getEndNodeType();
String[] splits = new String[] {"", VirtualTopologyGroupConstants.VIRTUAL_FAULT_ZONE_TYPE, endNodeType};
return String.join(VirtualTopologyGroupConstants.PATH_NAME_SPLITTER, splits);
}
/**
* Create updater for instance config for async update.
* @param clusterName cluster name of the instances.
* @param assignment virtual group assignment.
* @return a map from instance zkPath to its {@link DataUpdater} to update.
*/
@VisibleForTesting
static Map<String, DataUpdater<ZNRecord>> createInstanceConfigUpdater(
String clusterName, Map<String, Set<String>> assignment) {
Map<String, DataUpdater<ZNRecord>> updaters = new HashMap<>();
for (Map.Entry<String, Set<String>> entry : assignment.entrySet()) {
String virtualGroup = entry.getKey();
for (String instanceName : entry.getValue()) {
String path = PropertyPathBuilder.instanceConfig(clusterName, instanceName);
updaters.put(path, currentData -> {
InstanceConfig instanceConfig = new InstanceConfig(currentData);
Map<String, String> domainMap = instanceConfig.getDomainAsMap();
domainMap.put(VirtualTopologyGroupConstants.VIRTUAL_FAULT_ZONE_TYPE, virtualGroup);
instanceConfig.setDomain(domainMap);
return instanceConfig.getRecord();
});
}
}
return updaters;
}
}
| 9,373 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/service/InstanceServiceImpl.java
|
package org.apache.helix.rest.server.service;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.rest.clusterMaintenanceService.HealthCheck;
import org.apache.helix.rest.clusterMaintenanceService.MaintenanceManagementService;
import org.apache.helix.rest.common.HelixRestNamespace;
import org.apache.helix.rest.server.json.instance.InstanceInfo;
import org.apache.helix.rest.server.json.instance.StoppableCheck;
public class InstanceServiceImpl implements InstanceService {
private MaintenanceManagementService _maintenanceManagementService;
@Deprecated
public InstanceServiceImpl(ZKHelixDataAccessor dataAccessor, ConfigAccessor configAccessor) {
this(dataAccessor, configAccessor, false);
}
@Deprecated
public InstanceServiceImpl(ZKHelixDataAccessor dataAccessor, ConfigAccessor configAccessor,
boolean skipZKRead) {
this(dataAccessor, configAccessor, skipZKRead, HelixRestNamespace.DEFAULT_NAMESPACE_NAME);
}
public InstanceServiceImpl(ZKHelixDataAccessor dataAccessor, ConfigAccessor configAccessor,
boolean skipZKRead, String namespace) {
this(dataAccessor, configAccessor, skipZKRead, false, namespace);
}
// TODO: too many params, convert to builder pattern
public InstanceServiceImpl(ZKHelixDataAccessor dataAccessor, ConfigAccessor configAccessor,
boolean skipZKRead, boolean continueOnFailures, String namespace) {
_maintenanceManagementService =
new MaintenanceManagementService(dataAccessor, configAccessor, skipZKRead,
continueOnFailures, namespace);
}
@Override
public InstanceInfo getInstanceInfo(String clusterId, String instanceName,
List<HealthCheck> healthChecks) {
return _maintenanceManagementService.getInstanceHealthInfo(clusterId, instanceName, healthChecks);
}
/**
* {@inheritDoc}
* Single instance stoppable check implementation is a special case of
* {@link #batchGetInstancesStoppableChecks(String, List, String)}
* <p>
* Step 1: Perform instance level Helix own health checks
* Step 2: Perform instance level client side health checks
* Step 3: Perform partition level (all partitions on the instance) client side health checks
* <p>
* Note: if the check fails at one step, the rest steps won't be executed because the instance
* cannot be stopped
*/
@Override
public StoppableCheck getInstanceStoppableCheck(String clusterId, String instanceName,
String jsonContent) throws IOException {
return batchGetInstancesStoppableChecks(clusterId, ImmutableList.of(instanceName), jsonContent)
.get(instanceName);
}
@Override
public Map<String, StoppableCheck> batchGetInstancesStoppableChecks(String clusterId,
List<String> instances, String jsonContent) throws IOException {
return _maintenanceManagementService
.batchGetInstancesStoppableChecks(clusterId, instances, jsonContent);
}
}
| 9,374 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/service/ClusterServiceImpl.java
|
package org.apache.helix.rest.server.service;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.helix.AccessOption;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.rest.server.json.cluster.ClusterInfo;
import org.apache.helix.rest.server.json.cluster.ClusterTopology;
public class ClusterServiceImpl implements ClusterService {
private final HelixDataAccessor _dataAccessor;
private final ConfigAccessor _configAccessor;
public ClusterServiceImpl(HelixDataAccessor dataAccessor, ConfigAccessor configAccessor) {
_dataAccessor = dataAccessor;
_configAccessor = configAccessor;
}
@Override
public ClusterTopology getClusterTopology(String cluster) {
String zoneField = _configAccessor.getClusterConfig(cluster).getFaultZoneType();
PropertyKey.Builder keyBuilder = _dataAccessor.keyBuilder();
List<InstanceConfig> instanceConfigs =
_dataAccessor.getChildValues(keyBuilder.instanceConfigs(), true);
Map<String, List<ClusterTopology.Instance>> instanceMapByZone = new HashMap<>();
if (instanceConfigs != null && !instanceConfigs.isEmpty()) {
for (InstanceConfig instanceConfig : instanceConfigs) {
if (!instanceConfig.getDomainAsMap().containsKey(zoneField)) {
continue;
}
final String instanceName = instanceConfig.getInstanceName();
final ClusterTopology.Instance instance = new ClusterTopology.Instance(instanceName);
final String zoneId = instanceConfig.getDomainAsMap().get(zoneField);
if (instanceMapByZone.containsKey(zoneId)) {
instanceMapByZone.get(zoneId).add(instance);
} else {
instanceMapByZone.put(zoneId, new ArrayList<ClusterTopology.Instance>() {
{
add(instance);
}
});
}
}
}
List<ClusterTopology.Zone> zones = new ArrayList<>();
for (String zoneId : instanceMapByZone.keySet()) {
ClusterTopology.Zone zone = new ClusterTopology.Zone(zoneId);
zone.setInstances(instanceMapByZone.get(zoneId));
zones.add(zone);
}
// Get all the instances names
return new ClusterTopology(cluster, zones,
instanceConfigs.stream().map(InstanceConfig::getInstanceName).collect(Collectors.toSet()));
}
@Override
public ClusterInfo getClusterInfo(String clusterId) {
ClusterInfo.Builder builder = new ClusterInfo.Builder(clusterId);
PropertyKey.Builder keyBuilder = _dataAccessor.keyBuilder();
LiveInstance controller =
_dataAccessor.getProperty(_dataAccessor.keyBuilder().controllerLeader());
if (controller != null) {
builder.controller(controller.getInstanceName());
} else {
builder.controller("No Lead Controller");
}
return builder
.paused(_dataAccessor.getBaseDataAccessor().exists(keyBuilder.pause().getPath(),
AccessOption.PERSISTENT))
.maintenance(_dataAccessor.getBaseDataAccessor().exists(keyBuilder.maintenance().getPath(),
AccessOption.PERSISTENT))
.idealStates(_dataAccessor.getChildNames(keyBuilder.idealStates()))
.instances(_dataAccessor.getChildNames(keyBuilder.instances()))
.liveInstances(_dataAccessor.getChildNames(keyBuilder.liveInstances())).build();
}
}
| 9,375 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/service/InstanceService.java
|
package org.apache.helix.rest.server.service;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.helix.rest.clusterMaintenanceService.HealthCheck;
import org.apache.helix.rest.server.json.instance.InstanceInfo;
import org.apache.helix.rest.server.json.instance.StoppableCheck;
public interface InstanceService {
/**
* Get the overall status of the instance
*
* @param clusterId The cluster id
* @param instanceName The instance name
* @return An instance of {@link InstanceInfo} easily convertible to JSON
*/
InstanceInfo getInstanceInfo(String clusterId, String instanceName,
List<HealthCheck> healthChecks);
/**
* Get the current instance stoppable checks
*
* @param clusterId The cluster id
* @param instanceName The instance name
* @param jsonContent The json payloads from client side
* @return An instance of {@link StoppableCheck} easily convertible to JSON
* @throws IOException in case of network failure
*/
StoppableCheck getInstanceStoppableCheck(String clusterId, String instanceName,
String jsonContent) throws IOException;
/**
* Batch get StoppableCheck results for a list of instances in one cluster
*
* @param clusterId The cluster id
* @param instances The list of instances
* @param jsonContent The json payloads from client side
* @return A map contains the instance as key and the StoppableCheck as the value
* @throws IOException in case of network failure
*/
Map<String, StoppableCheck> batchGetInstancesStoppableChecks(String clusterId, List<String> instances, String jsonContent)
throws IOException;
}
| 9,376 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/service/ClusterService.java
|
package org.apache.helix.rest.server.service;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.rest.server.json.cluster.ClusterInfo;
import org.apache.helix.rest.server.json.cluster.ClusterTopology;
/**
* A rest wrapper service that provides information about cluster
* TODO add more business logic and simplify the workload on ClusterAccessor
*/
public interface ClusterService {
/**
* Get cluster topology
* @param cluster
* @return
*/
ClusterTopology getClusterTopology(String cluster);
/**
* Get cluster basic information
* @param clusterId
* @return
*/
ClusterInfo getClusterInfo(String clusterId);
}
| 9,377 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/auditlog/AuditLog.java
|
package org.apache.helix.rest.server.auditlog;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.security.Principal;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AuditLog {
private static Logger _logger = LoggerFactory.getLogger(AuditLog.class.getName());
public static final String ATTRIBUTE_NAME = "AUDIT_LOG";
private Date _startTime;
private Date _completeTime;
private Principal _principal;
private String _clientIP;
private String _clientHostPort;
private String _namespace;
private String _requestPath;
private String _httpMethod;
private List<String> _requestHeaders;
private String _requestEntity;
private int _responseCode;
private String _responseEntity;
private List<Exception> _exceptions;
private String _additionalInfo;
public AuditLog(Date startTime, Date completeTime, Principal principal, String clientIP,
String clientHostPort, String namespace, String requestPath, String httpMethod, List<String> requestHeaders,
String requestEntity, int responseCode, String responseEntity,
String additionalInfo, List<Exception> exceptions) {
_startTime = startTime;
_completeTime = completeTime;
_principal = principal;
_clientIP = clientIP;
_clientHostPort = clientHostPort;
_namespace = namespace;
_requestPath = requestPath;
_httpMethod = httpMethod;
_requestHeaders = requestHeaders;
_requestEntity = requestEntity;
_responseCode = responseCode;
_responseEntity = responseEntity;
_additionalInfo = additionalInfo;
_exceptions = exceptions;
}
@Override
public String toString() {
return "AuditLog{" +
"_startTime=" + _startTime +
", _completeTime=" + _completeTime +
", _principal=" + _principal +
", _clientIP='" + _clientIP + '\'' +
", _clientHostPort='" + _clientHostPort + '\'' +
", _namespace='" + _namespace + '\'' +
", _requestPath='" + _requestPath + '\'' +
", _httpMethod='" + _httpMethod + '\'' +
", _requestHeaders=" + _requestHeaders +
", _requestEntity='" + _requestEntity + '\'' +
", _responseCode=" + _responseCode +
", _responseEntity='" + _responseEntity + '\'' +
", _exceptions=" + _exceptions +
", _additionalInfo='" + _additionalInfo + '\'' +
'}';
}
public Date getStartTime() {
return _startTime;
}
public Date getCompleteTime() {
return _completeTime;
}
public Principal getPrincipal() {
return _principal;
}
public String getClientIP() {
return _clientIP;
}
public String getClientHostPort() {
return _clientHostPort;
}
public String getNamespace() {
return _namespace;
}
public String getRequestPath() {
return _requestPath;
}
public String getHttpMethod() {
return _httpMethod;
}
public List<String> getRequestHeaders() {
return _requestHeaders;
}
public String getRequestEntity() {
return _requestEntity;
}
public int getResponseCode() {
return _responseCode;
}
public String getResponseEntity() {
return _responseEntity;
}
public List<Exception> getExceptions() {
return _exceptions;
}
public String getAdditionalInfo() {
return _additionalInfo;
}
public static class Builder {
private Date _startTime;
private Date _completeTime;
private Principal _principal;
private String _clientIP;
private String _clientHostPort;
private String _namespace;
private String _requestPath;
private String _httpMethod;
private List<String> _requestHeaders;
private String _requestEntity;
private int _responseCode;
private String _responseEntity;
private List<Exception> _exceptions;
private String _additionalInfo;
public Date getStartTime() {
return _startTime;
}
public Builder startTime(Date startTime) {
_startTime = startTime;
return this;
}
public Date getCompleteTime() {
return _completeTime;
}
public Builder completeTime(Date completeTime) {
_completeTime = completeTime;
return this;
}
public Principal getPrincipal() {
return _principal;
}
public Builder principal(Principal principal) {
_principal = principal;
return this;
}
public String getClientIP() {
return _clientIP;
}
public Builder clientIP(String clientIP) {
_clientIP = clientIP;
return this;
}
public String getClientHostPort() {
return _clientHostPort;
}
public Builder clientHostPort(String clientHostPort) {
_clientHostPort = clientHostPort;
return this;
}
public String getNamespace() {
return _namespace;
}
public Builder namespace(String namespace) {
_namespace = namespace;
return this;
}
public String getRequestPath() {
return _requestPath;
}
public Builder requestPath(String requestPath) {
_requestPath = requestPath;
return this;
}
public String getHttpMethod() {
return _httpMethod;
}
public Builder httpMethod(String httpMethod) {
_httpMethod = httpMethod;
return this;
}
public String getRequestEntity() {
return _requestEntity;
}
public Builder requestEntity(String requestEntity) {
_requestEntity = requestEntity;
return this;
}
public List<String> getRequestHeaders() {
return _requestHeaders;
}
public Builder requestHeaders(List<String> requestHeaders) {
_requestHeaders = requestHeaders;
return this;
}
public int getResponseCode() {
return _responseCode;
}
public Builder responseCode(int responseCode) {
_responseCode = responseCode;
return this;
}
public String getResponseEntity() {
return _responseEntity;
}
public Builder responseEntity(String responseEntity) {
_responseEntity = responseEntity;
return this;
}
public List<Exception> getExceptions() {
return _exceptions;
}
public Builder exceptions(List<Exception> exceptions) {
_exceptions = exceptions;
return this;
}
public Builder addException(Exception ex) {
if (_exceptions == null) {
_exceptions = new ArrayList<>();
}
_exceptions.add(ex);
return this;
}
public String getAdditionalInfo() {
return _additionalInfo;
}
public Builder additionalInfo(String additionalInfo) {
_additionalInfo = additionalInfo;
return this;
}
public AuditLog build() {
return new AuditLog(_startTime, _completeTime, _principal, _clientIP, _clientHostPort,
_namespace, _requestPath, _httpMethod, _requestHeaders, _requestEntity, _responseCode,
_responseEntity, _additionalInfo, _exceptions);
}
}
}
| 9,378 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/auditlog/AuditLogger.java
|
package org.apache.helix.rest.server.auditlog;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Define an interface for Audit Log persistence.
*/
public interface AuditLogger {
void write(AuditLog auditLog);
}
| 9,379 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/auditlog
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/server/auditlog/auditloggers/FileBasedAuditLogger.java
|
package org.apache.helix.rest.server.auditlog.auditloggers;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.sun.research.ws.wadl.HTTPMethods;
import org.apache.helix.rest.server.auditlog.AuditLog;
import org.apache.helix.rest.server.auditlog.AuditLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Default AuditLogger implementation that log the AuditLog into local log file.
*/
public class FileBasedAuditLogger implements AuditLogger {
private static Logger _logger = LoggerFactory.getLogger(FileBasedAuditLogger.class.getName());
private boolean _logReadOperations;
public FileBasedAuditLogger() {
this(false);
}
public FileBasedAuditLogger(boolean logReadOperations) {
_logReadOperations = logReadOperations;
}
@Override
public void write(AuditLog auditLog) {
if (_logReadOperations) {
_logger.info(auditLog.toString());
} else if (auditLog.getHttpMethod() == HTTPMethods.PUT.name()
|| auditLog.getHttpMethod() == HTTPMethods.POST.name()
|| auditLog.getHttpMethod() == HTTPMethods.DELETE.name()) {
_logger.info(auditLog.toString());
}
}
}
| 9,380 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/HttpConstants.java
|
package org.apache.helix.rest.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class HttpConstants {
public enum RestVerbs {
GET,
POST,
PUT,
DELETE
}
public static final String HTTP_PROTOCOL_PREFIX = "http://";
public static final int DEFAULT_HTTP_REQUEST_TIMEOUT = 60 * 1000;
/** REST request categorized as read. Can be used to categorize metric names */
public static final String READ_REQUEST = "read";
/** REST request categorized as write. Can be used to categorize metric names */
public static final String WRITE_REQUEST = "write";
}
| 9,381 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/ServletType.java
|
package org.apache.helix.rest.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.rest.server.resources.helix.AbstractHelixResource;
import org.apache.helix.rest.server.resources.metadata.NamespacesAccessor;
import org.apache.helix.rest.server.resources.metadatastore.MetadataStoreDirectoryAccessor;
import org.apache.helix.rest.server.resources.zookeeper.ZooKeeperAccessor;
public enum ServletType {
/**
* Servlet serving default API endpoints (/admin/v2/clusters/...)
*/
DEFAULT_SERVLET(HelixRestNamespace.DEFAULT_NAMESPACE_PATH_SPEC,
new String[] {
AbstractHelixResource.class.getPackage().getName(),
NamespacesAccessor.class.getPackage().getName(),
ZooKeeperAccessor.class.getPackage().getName(),
MetadataStoreDirectoryAccessor.class.getPackage().getName(),
"com.api.resources",
"io.swagger.jaxrs.json",
"io.swagger.jaxrs.listing",
"org.apache.helix.rest.server",
"org.apache.helix.rest.server.resources"
}),
/**
* Servlet serving namespaced API endpoints (/admin/v2/namespaces/{namespaceName})
*/
COMMON_SERVLET("/namespaces/%s/*",
new String[] {
AbstractHelixResource.class.getPackage().getName(),
ZooKeeperAccessor.class.getPackage().getName(),
MetadataStoreDirectoryAccessor.class.getPackage().getName()
});
private final String _servletPathSpecTemplate;
private final String[] _servletPackageArray;
ServletType(String servletPathSpecTemplate, String[] servletPackageArray) {
_servletPathSpecTemplate = servletPathSpecTemplate;
_servletPackageArray = servletPackageArray;
}
public String getServletPathSpecTemplate() {
return _servletPathSpecTemplate;
}
public String[] getServletPackageArray() {
return _servletPackageArray;
}
}
| 9,382 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/HelixRestNamespace.java
|
package org.apache.helix.rest.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
public class HelixRestNamespace {
public enum HelixMetadataStoreType {
ZOOKEEPER,
NO_METADATA_STORE
}
public enum HelixRestNamespaceProperty {
NAME,
METADATA_STORE_TYPE,
METADATA_STORE_ADDRESS,
IS_DEFAULT,
MULTI_ZK_ENABLED,
MSDS_ENDPOINT
}
/**
* Namespaced object will have path such as /url_prefix/namespaces/{namespace_name}/clusters/...
* We are going to have path /url_prefix/clusters/... point to default namespace if there is one
*/
public static final String DEFAULT_NAMESPACE_PATH_SPEC = "/*";
public static final String DEFAULT_NAMESPACE_NAME = "default";
/**
* Name of Helix namespace
*/
private String _name;
/**
* Type of a metadata store that belongs to Helix namespace
*/
private HelixMetadataStoreType _metadataStoreType;
/**
* Address of metadata store. Should be in the format of
* "[ip-address]:[port]" or "[dns-name]:[port]"
*/
private String _metadataStoreAddress;
/**
* Flag indicating whether this namespace is default or not
*/
private boolean _isDefault;
/**
* Flag indicating whether this namespace should have multi-zk feature enabled.
*/
private boolean _isMultiZkEnabled;
/**
* Endpoint for accessing MSDS for this namespace.
*/
private String _msdsEndpoint;
public HelixRestNamespace(String metadataStoreAddress) throws IllegalArgumentException {
this(DEFAULT_NAMESPACE_NAME, HelixMetadataStoreType.ZOOKEEPER, metadataStoreAddress, true);
}
public HelixRestNamespace(String name, HelixMetadataStoreType metadataStoreType,
String metadataStoreAddress, boolean isDefault) throws IllegalArgumentException {
this(name, metadataStoreType, metadataStoreAddress, isDefault, false, null);
}
public HelixRestNamespace(String name, HelixMetadataStoreType metadataStoreType,
String metadataStoreAddress, boolean isDefault, boolean isMultiZkEnabled,
String msdsEndpoint) {
_name = name;
_metadataStoreAddress = metadataStoreAddress;
_metadataStoreType = metadataStoreType;
_isDefault = isDefault;
_isMultiZkEnabled = isMultiZkEnabled;
_msdsEndpoint = msdsEndpoint;
validate();
}
private void validate() throws IllegalArgumentException {
// TODO: add more strict validation for NAME as this will be part of URL
if (_name == null || _name.length() == 0) {
throw new IllegalArgumentException("Name of namespace not provided");
}
if (_metadataStoreType != HelixMetadataStoreType.NO_METADATA_STORE && (_metadataStoreAddress == null
|| _metadataStoreAddress.isEmpty())) {
throw new IllegalArgumentException(
String.format("Metadata store address \"%s\" is not valid for namespace %s", _metadataStoreAddress, _name));
}
}
public boolean isDefault() {
return _isDefault;
}
public String getName() {
return _name;
}
public String getMetadataStoreAddress() {
return _metadataStoreAddress;
}
public Map<String, String> getRestInfo() {
// In REST APIs we currently don't expose metadata store information
Map<String, String> ret = new HashMap<>();
ret.put(HelixRestNamespaceProperty.NAME.name(), _name);
ret.put(HelixRestNamespaceProperty.IS_DEFAULT.name(), String.valueOf(_isDefault));
ret.put(HelixRestNamespaceProperty.MULTI_ZK_ENABLED.name(), String.valueOf(_isMultiZkEnabled));
ret.put(HelixRestNamespaceProperty.MSDS_ENDPOINT.name(), String.valueOf(_msdsEndpoint));
return ret;
}
public boolean isMultiZkEnabled() {
return _isMultiZkEnabled;
}
public String getMsdsEndpoint() {
return _msdsEndpoint;
}
}
| 9,383 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/RestSystemPropertyKeys.java
|
package org.apache.helix.rest.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class RestSystemPropertyKeys {
// System property for REST HTTP request timeout
public static final String REST_HTTP_TIMEOUT_MS = "rest.http.timeout.ms";
}
| 9,384 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/HelixDataAccessorWrapper.java
|
package org.apache.helix.rest.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.SharedMetricRegistries;
import com.codahale.metrics.Timer;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyType;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.RESTConfig;
import org.apache.helix.rest.client.CustomRestClient;
import org.apache.helix.rest.client.CustomRestClientFactory;
import org.apache.helix.rest.common.datamodel.RestSnapShot;
import org.apache.helix.rest.server.service.InstanceService;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is a wrapper for {@link ZKHelixDataAccessor} that caches the result of the batch reads it
* performs.
* Note that the usage of this object is valid for one REST request.
*/
public class HelixDataAccessorWrapper extends ZKHelixDataAccessor {
private static final Logger LOG = LoggerFactory.getLogger(HelixDataAccessorWrapper.class);
private static final ExecutorService POOL = Executors.newCachedThreadPool();
public static final String PARTITION_HEALTH_KEY = "PARTITION_HEALTH";
public static final String IS_HEALTHY_KEY = "IS_HEALTHY";
public static final String EXPIRY_KEY = "EXPIRE";
// Metric names for custom partition check
private static final String CUSTOM_PARTITION_CHECK_HTTP_REQUESTS_ERROR_TOTAL = MetricRegistry
.name(InstanceService.class, "custom_partition_check_http_requests_error_total");
private static final String CUSTOM_PARTITION_CHECK_HTTP_REQUESTS_DURATION =
MetricRegistry.name(InstanceService.class, "custom_partition_check_http_requests_duration");
protected String _namespace;
protected CustomRestClient _restClient;
private RestSnapShotSimpleImpl _restSnapShot;
/**
* @deprecated Because a namespace is required, please use the other constructors.
*
* @param dataAccessor Zk Helix data accessor used to access ZK.
*/
@Deprecated
public HelixDataAccessorWrapper(ZKHelixDataAccessor dataAccessor) {
this(dataAccessor, CustomRestClientFactory.get(), HelixRestNamespace.DEFAULT_NAMESPACE_NAME);
}
@Deprecated
public HelixDataAccessorWrapper(ZKHelixDataAccessor dataAccessor,
CustomRestClient customRestClient) {
this(dataAccessor, customRestClient, HelixRestNamespace.DEFAULT_NAMESPACE_NAME);
}
public HelixDataAccessorWrapper(ZKHelixDataAccessor dataAccessor,
CustomRestClient customRestClient, String namespace) {
super(dataAccessor);
_restClient = customRestClient;
_namespace = namespace;
_restSnapShot = new RestSnapShotSimpleImpl(_clusterName);
}
public Map<String, Map<String, Boolean>> getAllPartitionsHealthOnLiveInstance(
RESTConfig restConfig, Map<String, String> customPayLoads) {
return getAllPartitionsHealthOnLiveInstance(restConfig, customPayLoads, false);
}
/**
* Retrieve partition health status for each live instances combined with reading health partition report from ZK
* and customized REST API call.
*
* @param restConfig restConfig for the cluster contains customize REST API endpoint
* @param customPayLoads User passed in customized payloads
* @param skipZKRead Query the participant end point directly rather than fetch for
* partition health from ZK if this flag is true.
* @return A map of instance -> partition -> healthy or not (boolean).
*/
public Map<String, Map<String, Boolean>> getAllPartitionsHealthOnLiveInstance(
RESTConfig restConfig, Map<String, String> customPayLoads, boolean skipZKRead) {
// Only checks the instances are online with valid reports
List<String> liveInstances = getChildNames(keyBuilder().liveInstances());
// Make a parallel batch call for getting all healthreports from ZK.
List<HelixProperty> zkHealthReports;
if (!skipZKRead) {
zkHealthReports = getProperty(liveInstances.stream()
.map(instance -> keyBuilder().healthReport(instance, PARTITION_HEALTH_KEY))
.collect(Collectors.toList()), false);
} else {
zkHealthReports =
liveInstances.stream().map(instance -> new HelixProperty(instance)).collect(Collectors.toList());
}
Map<String, Future<Map<String, Boolean>>> parallelTasks = new HashMap<>();
for (int i = 0; i < liveInstances.size(); i++) {
String liveInstance = liveInstances.get(i);
Optional<ZNRecord> maybeHealthRecord = Optional.ofNullable(zkHealthReports.get(i)).map(HelixProperty::getRecord);
parallelTasks.put(liveInstance, POOL.submit(() -> maybeHealthRecord.map(
record -> getPartitionsHealthFromCustomAPI(liveInstance, record, restConfig, customPayLoads, skipZKRead))
.orElseGet(
() -> getHealthStatusFromRest(liveInstance, Collections.emptyList(), restConfig, customPayLoads))));
}
Map<String, Map<String, Boolean>> result = new HashMap<>();
for (Map.Entry<String, Future<Map<String, Boolean>>> instanceToFuturePartitionHealth : parallelTasks
.entrySet()) {
String instance = instanceToFuturePartitionHealth.getKey();
try {
result.put(instance, instanceToFuturePartitionHealth.getValue().get());
} catch (InterruptedException | ExecutionException e) {
LOG.error("Failed to get partition health for instance {}", instance, e);
result.put(instance, Collections.emptyMap());
}
}
return result;
}
/**
* Get the partition health status from custom API. When we skip reading data from ZK, partitionHealthRecord will be
* empty. We need a full refresh of all the partitions. If we pass the empty set of partition to be refresh, custom
* API will return nothing.
*
* @param instance instance to query
* @param partitionHealthRecord Retrieved partition health data from ZK. Could be emptry if we skip reading from ZK.
* @param restConfig restConfig for the cluster contains custom API endpoint
* @param customPayLoads User passed in customized payloads
* @param requireFullRead Get all the partition status from custom API endpoint if it is true. It should skip
* the payload of "PARTITION : list of partition need to be fetch" in REST call.
* @return A map of instance -> partition -> healthy or not (boolean).
*/
private Map<String, Boolean> getPartitionsHealthFromCustomAPI(String instance, ZNRecord partitionHealthRecord,
RESTConfig restConfig, Map<String, String> customPayLoads, boolean requireFullRead) {
Map<String, Boolean> result = new HashMap<>();
List<String> expiredPartitions = new ArrayList<>();
for (String partitionName : partitionHealthRecord.getMapFields().keySet()) {
Map<String, String> healthMap = partitionHealthRecord.getMapField(partitionName);
if (healthMap == null
|| Long.parseLong(healthMap.get(EXPIRY_KEY)) < System.currentTimeMillis()) {
// Clean all the existing checks. If we do not clean it, when we do the customized
// check,
// Helix may think these partitions are only partitions holding on the instance.
// But it could potentially have some partitions are unhealthy for expired ones.
// It could problem for shutting down instances.
expiredPartitions.add(partitionName);
continue;
}
result.put(partitionName, Boolean.valueOf(healthMap.get(IS_HEALTHY_KEY)));
}
if (requireFullRead) {
result.putAll(getHealthStatusFromRest(instance, null, restConfig, customPayLoads));
} else if (!expiredPartitions.isEmpty()) {
result.putAll(getHealthStatusFromRest(instance, expiredPartitions, restConfig, customPayLoads));
}
return result;
}
private Map<String, Boolean> getHealthStatusFromRest(String instance, List<String> partitions,
RESTConfig restConfig, Map<String, String> customPayLoads) {
MetricRegistry metrics = SharedMetricRegistries.getOrCreate(_namespace);
// Total requests metric is included as an attribute(Count) in timers
try (final Timer.Context timer = metrics.timer(CUSTOM_PARTITION_CHECK_HTTP_REQUESTS_DURATION)
.time()) {
return _restClient.getPartitionStoppableCheck(restConfig.getBaseUrl(instance), partitions,
customPayLoads);
} catch (IOException e) {
LOG.error("Failed to get partition status on instance {}, partitions: {}", instance,
partitions, e);
metrics.counter(CUSTOM_PARTITION_CHECK_HTTP_REQUESTS_ERROR_TOTAL).inc();
return Collections.emptyMap();
}
}
public RestSnapShot getRestSnapShot() {
return _restSnapShot;
}
@Override
public <T extends HelixProperty> T getProperty(PropertyKey key) {
T property = _restSnapShot.getProperty(key);
if (property == null) {
property = super.getProperty(key);
_restSnapShot.updateValue(key, property);
}
return property;
}
@Override
public List<String> getChildNames(PropertyKey key) {
List<String> names = _restSnapShot.getChildNames(key);
if (names == null) {
names = super.getChildNames(key);
_restSnapShot.updateChildNames(key, names);
}
return names;
}
public void fetchIdealStatesExternalViewStateModel() {
PropertyKey.Builder propertyKeyBuilder = this.keyBuilder();
List<String> resources = getChildNames(propertyKeyBuilder.idealStates());
for (String resourceName : resources) {
getProperty(propertyKeyBuilder.idealStates(resourceName));
ExternalView externalView = getProperty(propertyKeyBuilder.externalView(resourceName));
if (externalView != null) {
String stateModeDef = externalView.getStateModelDefRef();
getProperty(propertyKeyBuilder.stateModelDef(stateModeDef));
}
}
_restSnapShot.addPropertyType(PropertyType.IDEALSTATES);
_restSnapShot.addPropertyType(PropertyType.EXTERNALVIEW);
_restSnapShot.addPropertyType(PropertyType.STATEMODELDEFS);
}
public void populateCache(List<PropertyType> propertyTypes) {
for (PropertyType propertyType : propertyTypes) {
switch (propertyType) {
case IDEALSTATES:
case EXTERNALVIEW:
case STATEMODELDEFS: {
if (!_restSnapShot.containsProperty(propertyType)) {
fetchIdealStatesExternalViewStateModel();
}
break;
}
default:
throw new UnsupportedOperationException("type selection is not supported yet!");
}
}
}
}
| 9,385 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/RestSnapShotSimpleImpl.java
|
package org.apache.helix.rest.common;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.PropertyKey;
import org.apache.helix.rest.common.datamodel.RestSnapShot;
public class RestSnapShotSimpleImpl extends RestSnapShot {
private final Map<PropertyKey, List<String>> _childNodesCache;
public RestSnapShotSimpleImpl(String clusterName) {
super(clusterName);
_childNodesCache = new HashMap<>();
}
public List<String> getChildNames(PropertyKey key) {
if (_childNodesCache.containsKey(key)) {
return _childNodesCache.get(key);
}
return null;
}
public void updateChildNames(PropertyKey key, List<String> children) {
_childNodesCache.put(key, children);
}
}
| 9,386 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/ContextPropertyKeys.java
|
package org.apache.helix.rest.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public enum ContextPropertyKeys {
SERVER_CONTEXT,
METADATA,
ALL_NAMESPACES,
ACL_REGISTER,
}
| 9,387 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/HelixRestUtils.java
|
package org.apache.helix.rest.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class HelixRestUtils {
/**
* Extract namespace information from servlet path. There are 3 cases:
* 1. /namespaces/namespaceName -> return namespaceName
* 2. /namespaces -> return ""
* 3. this is special servlet for default namespace -> return the reserved name for default namespace
* @param servletPath servletPath
* @return Namespace name retrieved from servlet spec.
*/
public static String getNamespaceFromServletPath(String servletPath) {
if (isDefaultServlet(servletPath)) {
return HelixRestNamespace.DEFAULT_NAMESPACE_NAME;
}
String namespaceName = servletPath.replace("/namespaces", "");
if (namespaceName.isEmpty() || namespaceName.equals("/")) {
return "";
} else {
return namespaceName.replace("/", "");
}
}
public static boolean isDefaultServlet(String servletPath) {
// Special servlet for default namespace has path spec "/*", so servletPath is empty
return servletPath == null || servletPath.isEmpty();
}
}
| 9,388 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/common/datamodel/RestSnapShot.java
|
package org.apache.helix.rest.common.datamodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashSet;
import java.util.Set;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyType;
import org.apache.helix.datamodel.Snapshot;
/* This Snapshot can extend Snapshot from common/core module
* once there is more generic snapshot.
* An Snapshot object should contain all the Helix related info that an implementation of
* OperationAbstractClass would need.
*/
// TODO: Future: Support hierarchical Snapshot type for other services besides cluster MaintenanceService.
public class RestSnapShot extends Snapshot<PropertyKey, HelixProperty> {
private Set<PropertyType> _propertyTypes;
private String _clusterName;
public RestSnapShot(String clusterName) {
_propertyTypes = new HashSet<>();
_clusterName = clusterName;
}
public void addPropertyType(PropertyType propertyType) {
_propertyTypes.add(propertyType);
}
public boolean containsProperty(PropertyType propertyType) {
return _propertyTypes.contains(propertyType);
}
public <T extends HelixProperty> T getProperty(PropertyKey key) {
if (containsKey(key)) {
return (T) getValue(key);
}
return null;
}
public String getClusterName() {
return _clusterName;
}
}
| 9,389 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/client/CustomRestClientImpl.java
|
package org.apache.helix.rest.client;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class CustomRestClientImpl implements CustomRestClient {
private static final Logger LOG = LoggerFactory.getLogger(CustomRestClientImpl.class);
// postfix used to append at the end of base url
private static final String INSTANCE_HEALTH_STATUS = "/instanceHealthStatus";
private static final String PARTITION_HEALTH_STATUS = "/partitionHealthStatus";
private static final String IS_HEALTHY_FIELD = "IS_HEALTHY";
private static final String PARTITIONS = "partitions";
private static final String ACCEPT_CONTENT_TYPE = "application/json";
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private HttpClient _httpClient;
private interface JsonConverter {
Map<String, Boolean> convert(JsonNode jsonNode);
}
public CustomRestClientImpl(HttpClient httpClient) {
_httpClient = httpClient;
}
@Override
public Map<String, Boolean> getInstanceStoppableCheck(String baseUrl,
Map<String, String> customPayloads) throws IOException {
// example url: http://<baseUrl>/instanceHealthStatus, assuming the base url already directly
// queries at the instance
String url = baseUrl + INSTANCE_HEALTH_STATUS;
JsonConverter jsonConverter = jsonNode -> {
Map<String, Boolean> result = new HashMap<>();
jsonNode.fields().forEachRemaining(kv -> result.put(kv.getKey(), kv.getValue().asBoolean()));
return result;
};
return handleResponse(post(url, Collections.unmodifiableMap(customPayloads)), jsonConverter);
}
@Override
public Map<String, Boolean> getPartitionStoppableCheck(String baseUrl, List<String> partitions,
Map<String, String> customPayloads) throws IOException {
/*
* example url: http://<baseUrl>/partitionHealthStatus -d {
* "partitions" : ["p1", "p3", "p9"],
* "<key>": "<value>",
* ...
* }
*/
String url = baseUrl + PARTITION_HEALTH_STATUS;
// To avoid ImmutableMap as parameter
Map<String, Object> payLoads = new HashMap<>(customPayloads);
// Add the entry: "partitions" : ["p1", "p3", "p9"]
if (partitions != null) {
payLoads.put(PARTITIONS, partitions);
}
JsonConverter jsonConverter = jsonNode -> {
Map<String, Boolean> result = new HashMap<>();
jsonNode.fields().forEachRemaining(
kv -> result.put(kv.getKey(), kv.getValue().get(IS_HEALTHY_FIELD).asBoolean()));
return result;
};
return handleResponse(post(url, payLoads), jsonConverter);
}
@VisibleForTesting
protected JsonNode getJsonObject(HttpResponse httpResponse) throws IOException {
HttpEntity httpEntity = httpResponse.getEntity();
String str = EntityUtils.toString(httpEntity);
LOG.info("Converting Response Content {} to JsonNode", str);
return OBJECT_MAPPER.readTree(str);
}
private Map<String, Boolean> handleResponse(HttpResponse httpResponse,
JsonConverter jsonConverter) throws IOException {
int status = httpResponse.getStatusLine().getStatusCode();
if (status == HttpStatus.SC_OK) {
LOG.info("Expected HttpResponse statusCode: {}", HttpStatus.SC_OK);
return jsonConverter.convert(getJsonObject(httpResponse));
} else {
// Ensure entity is fully consumed so stream is closed.
EntityUtils.consumeQuietly(httpResponse.getEntity());
throw new ClientProtocolException("Unexpected response status: " + status + ", reason: "
+ httpResponse.getStatusLine().getReasonPhrase());
}
}
@VisibleForTesting
protected HttpResponse post(String url, Map<String, Object> payloads) throws IOException {
HttpPost postRequest = new HttpPost(url);
try {
postRequest.setHeader("Accept", ACCEPT_CONTENT_TYPE);
StringEntity entity = new StringEntity(OBJECT_MAPPER.writeValueAsString(payloads),
ContentType.APPLICATION_JSON);
postRequest.setEntity(entity);
LOG.info("Executing request: {}, headers: {}, entity: {}", postRequest.getRequestLine(),
postRequest.getAllHeaders(), postRequest.getEntity());
HttpResponse response = _httpClient.execute(postRequest);
int status = response.getStatusLine().getStatusCode();
if (status != HttpStatus.SC_OK) {
LOG.warn("Received non-200 status code: {}, payloads: {}", status, payloads);
}
return response;
} catch (IOException e) {
// Release connection to be reused and avoid connection leakage.
postRequest.releaseConnection();
throw e;
}
}
}
| 9,390 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/client/CustomRestClientFactory.java
|
package org.apache.helix.rest.client;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.rest.common.HttpConstants;
import org.apache.helix.rest.common.RestSystemPropertyKeys;
import org.apache.helix.rest.server.HelixRestServer;
import org.apache.helix.util.HelixUtil;
import org.apache.http.client.HttpClient;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.client.HttpClients;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The memory efficient factory to create instances for {@link CustomRestClient}
*/
public class CustomRestClientFactory {
private static final Logger LOG = LoggerFactory.getLogger(CustomRestClientFactory.class);
private static CustomRestClient INSTANCE = null;
// Here int has been used for timeout value because setConnectTimeout,
// setConnectionRequestTimeout and setSocketTimeout are getting int as input
private static final int HTTP_REQUEST_TIMEOUT = HelixUtil.getSystemPropertyAsInt(
RestSystemPropertyKeys.REST_HTTP_TIMEOUT_MS, HttpConstants.DEFAULT_HTTP_REQUEST_TIMEOUT);
private CustomRestClientFactory() {
}
public static CustomRestClient get() {
if (INSTANCE == null) {
synchronized (CustomRestClientFactory.class) {
if (INSTANCE == null) {
try {
HttpClient httpClient;
RequestConfig config = RequestConfig.custom().setConnectTimeout(HTTP_REQUEST_TIMEOUT)
.setConnectionRequestTimeout(HTTP_REQUEST_TIMEOUT)
.setSocketTimeout(HTTP_REQUEST_TIMEOUT).build();
if (HelixRestServer.REST_SERVER_SSL_CONTEXT != null) {
httpClient =
HttpClients.custom().setSSLContext(HelixRestServer.REST_SERVER_SSL_CONTEXT)
.setSSLSocketFactory(new SSLConnectionSocketFactory(
HelixRestServer.REST_SERVER_SSL_CONTEXT, new NoopHostnameVerifier()))
.setDefaultRequestConfig(config).build();
} else {
httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build();
}
INSTANCE = new CustomRestClientImpl(httpClient);
return INSTANCE;
} catch (Exception e) {
LOG.error("Exception when initializing CustomRestClient", e);
}
}
}
}
return INSTANCE;
}
}
| 9,391 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/client/CustomRestClient.java
|
package org.apache.helix.rest.client;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
* Interacting with participant side to query for its health checks
*/
public interface CustomRestClient {
/**
* Get stoppable check result on instance
* @param baseUrl the base url of the participant
* @param customPayloads generic payloads required from client side and helix only works as proxy
* @return a map where key is custom stoppable check name and boolean value indicates if the check
* succeeds
* @throws IOException
*/
Map<String, Boolean> getInstanceStoppableCheck(String baseUrl, Map<String, String> customPayloads)
throws IOException;
/**
* Get stoppable check result on a list of partitions on the instance
*
* @param baseUrl the base url of the participant
* @param partitions a list of partitions maintained by the participant
* @param customPayloads generic payloads required from client side and helix only works as proxy
* @return a map where key is partition name and boolean value indicates if the partition is
* healthy
* @throws IOException
*/
Map<String, Boolean> getPartitionStoppableCheck(String baseUrl, List<String> partitions,
Map<String, String> customPayloads) throws IOException;
}
| 9,392 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/acl/AclRegister.java
|
package org.apache.helix.rest.acl;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import javax.servlet.http.HttpServletRequest;
public interface AclRegister {
// Create an ACL entry based on the request
void createACL(HttpServletRequest request);
}
| 9,393 |
0 |
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest
|
Create_ds/helix/helix-rest/src/main/java/org/apache/helix/rest/acl/NoopAclRegister.java
|
package org.apache.helix.rest.acl;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import javax.servlet.http.HttpServletRequest;
public class NoopAclRegister implements AclRegister {
public void createACL(HttpServletRequest request) {
}
}
| 9,394 |
0 |
Create_ds/helix/helix-agent/src/test/java/org/apache/helix
|
Create_ds/helix/helix-agent/src/test/java/org/apache/helix/agent/TestHelixAgent.java
|
package org.apache.helix.agent;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.File;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.ExternalCommand;
import org.apache.helix.ScriptTestHelper;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.tools.ClusterStateVerifier.BestPossAndExtViewZkVerifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
public class TestHelixAgent extends ZkUnitTestBase {
private final static Logger LOG = LoggerFactory.getLogger(TestHelixAgent.class);
final String workingDir = ScriptTestHelper.getPrefix() + ScriptTestHelper.INTEGRATION_SCRIPT_DIR;
ExternalCommand serverCmd = null;
@BeforeMethod
public void beforeMethod() throws Exception {
serverCmd = ExternalCommand.start(workingDir + "/simpleHttpServer.py");
}
@AfterMethod
public void afterMethod() throws Exception {
if (serverCmd != null) {
// shutdown server
ExternalCommand.execute(new File(workingDir), "simpleHttpClient.py", "exit");
// System.out.println("simpleHttpServer output: \n" + serverCmd.getStringOutput());
// check server has received all the requests
String serverOutput = serverCmd.getStringOutput();
int idx = serverOutput.indexOf("requestPath: /OFFLINE-SLAVE");
Assert.assertTrue(idx > 0, "server should receive OFFINE->SLAVE transition");
idx = serverOutput.indexOf("requestPath: /SLAVE-MASTER", idx);
Assert.assertTrue(idx > 0, "server should receive SLAVE-MASTER transition");
idx = serverOutput.indexOf("requestPath: /MASTER-SLAVE", idx);
Assert.assertTrue(idx > 0, "server should receive MASTER-SLAVE transition");
idx = serverOutput.indexOf("requestPath: /SLAVE-OFFLINE", idx);
Assert.assertTrue(idx > 0, "server should receive SLAVE-OFFLINE transition");
}
}
@Test
public void test() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
final String clusterName = className + "_" + methodName;
final int n = 1;
final String zkAddr = ZK_ADDR;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, zkAddr, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
1, // partitions per resource
n, // number of nodes
1, // replicas
"MasterSlave", true); // do rebalance
// set cluster config
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(clusterName).build();
ConfigAccessor configAccessor = new ConfigAccessor(_gZkClient);
// String pidFile = ScriptTestHelper.getPrefix() + ScriptTestHelper.INTEGRATION_LOG_DIR +
// "/default/foo_{PARTITION_NAME}_pid.txt";
// the pid file path for the first partition
// delete it if exists
// String pidFileFirstPartition = ScriptTestHelper.getPrefix() +
// ScriptTestHelper.INTEGRATION_LOG_DIR + "/default/foo_TestDB0_0_pid.txt";
// File file = new File(pidFileFirstPartition);
// if (file.exists()) {
// file.delete();
// }
// set commands for state-transitions
CommandConfig.Builder builder = new CommandConfig.Builder();
CommandConfig cmdConfig =
builder.setTransition("SLAVE", "MASTER").setCommand("simpleHttpClient.py SLAVE-MASTER")
.setCommandWorkingDir(workingDir).setCommandTimeout("0")
// .setPidFile(pidFile)
.build();
configAccessor.set(scope, cmdConfig.toKeyValueMap());
builder = new CommandConfig.Builder();
cmdConfig =
builder.setTransition("OFFLINE", "SLAVE").setCommand("simpleHttpClient.py OFFLINE-SLAVE")
.setCommandWorkingDir(workingDir).build();
configAccessor.set(scope, cmdConfig.toKeyValueMap());
builder = new CommandConfig.Builder();
cmdConfig =
builder.setTransition("MASTER", "SLAVE").setCommand("simpleHttpClient.py MASTER-SLAVE")
.setCommandWorkingDir(workingDir).build();
configAccessor.set(scope, cmdConfig.toKeyValueMap());
builder = new CommandConfig.Builder();
cmdConfig =
builder.setTransition("SLAVE", "OFFLINE").setCommand("simpleHttpClient.py SLAVE-OFFLINE")
.setCommandWorkingDir(workingDir).build();
configAccessor.set(scope, cmdConfig.toKeyValueMap());
builder = new CommandConfig.Builder();
cmdConfig =
builder.setTransition("OFFLINE", "DROPPED").setCommand(CommandAttribute.NOP.getName())
.build();
configAccessor.set(scope, cmdConfig.toKeyValueMap());
// start controller
ClusterControllerManager controller = new ClusterControllerManager(zkAddr, clusterName, "controller_0");
controller.syncStart();
// start helix-agent
Map<String, Thread> agents = new HashMap<String, Thread>();
for (int i = 0; i < n; i++) {
final String instanceName = "localhost_" + (12918 + i);
Thread agentThread = new Thread() {
@Override
public void run() {
try {
HelixAgentMain.main(new String[] {
"--zkSvr", zkAddr, "--cluster", clusterName, "--instanceName", instanceName,
"--stateModel", "MasterSlave"
});
} catch (Exception e) {
LOG.error("Exception start helix-agent", e);
}
}
};
agents.put(instanceName, agentThread);
agentThread.start();
// wait participant thread to start
Thread.sleep(100);
}
boolean result =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(result);
// read the pid file should get current process id
// String readPid = SystemUtil.getPidFromFile(new File(pidFileFirstPartition));
// Assert.assertNotNull(readPid, "readPid is the pid for foo_test.py. should NOT be null");
// String name = ManagementFactory.getRuntimeMXBean().getName();
// String currentPid = name.substring(0,name.indexOf("@"));
// System.out.println("read-pid: " + readPid + ", current-pid: " + currentPid);
// drop resource will trigger M->S and S->O transitions
ClusterSetup.processCommandLineArgs(new String[] {
"--zkSvr", ZK_ADDR, "--dropResource", clusterName, "TestDB0"
});
result =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(result);
// clean up
controller.syncStop();
for (Thread agentThread : agents.values()) {
agentThread.interrupt();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,395 |
0 |
Create_ds/helix/helix-agent/src/main/java/org/apache/helix
|
Create_ds/helix/helix-agent/src/main/java/org/apache/helix/agent/AgentStateModel.java
|
package org.apache.helix.agent;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.File;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.helix.ExternalCommand;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.Message;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.participant.statemachine.StateModel;
import org.apache.helix.participant.statemachine.StateModelInfo;
import org.apache.helix.participant.statemachine.Transition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@StateModelInfo(initialState = "OFFLINE", states = {})
public class AgentStateModel extends StateModel {
private static final Logger _logger = LoggerFactory.getLogger(AgentStateModel.class);
private static Pattern pattern = Pattern.compile("(\\{.+?\\})");
private static String buildKey(String fromState, String toState, CommandAttribute attribute) {
return fromState + "-" + toState + "." + attribute.getName();
}
private static String instantiateByMessage(String string, Message message) {
Matcher matcher = pattern.matcher(string);
String result = string;
while (matcher.find()) {
String var = matcher.group();
result =
result.replace(var,
message.getAttribute(Message.Attributes.valueOf(var.substring(1, var.length() - 1))));
}
return result;
}
@Transition(to = "*", from = "*")
public void genericStateTransitionHandler(Message message, NotificationContext context)
throws Exception {
// first try get command from message
String cmd = message.getRecord().getSimpleField(CommandAttribute.COMMAND.getName());
String workingDir = message.getRecord().getSimpleField(CommandAttribute.WORKING_DIR.getName());
String timeout = message.getRecord().getSimpleField(CommandAttribute.TIMEOUT.getName());
String pidFile = message.getRecord().getSimpleField(CommandAttribute.PID_FILE.getName());
HelixManager manager = context.getManager();
String clusterName = manager.getClusterName();
String fromState = message.getFromState();
String toState = message.getToState();
// construct keys for command-config
String cmdKey = buildKey(fromState, toState, CommandAttribute.COMMAND);
String workingDirKey = buildKey(fromState, toState, CommandAttribute.WORKING_DIR);
String timeoutKey = buildKey(fromState, toState, CommandAttribute.TIMEOUT);
String pidFileKey = buildKey(fromState, toState, CommandAttribute.PID_FILE);
List<String> cmdConfigKeys = Arrays.asList(cmdKey, workingDirKey, timeoutKey, pidFileKey);
// read command from resource-scope configures
if (cmd == null) {
HelixConfigScope resourceScope =
new HelixConfigScopeBuilder(ConfigScopeProperty.RESOURCE).forCluster(clusterName)
.forResource(message.getResourceName()).build();
Map<String, String> cmdKeyValueMap =
manager.getConfigAccessor().get(resourceScope, cmdConfigKeys);
if (cmdKeyValueMap != null) {
cmd = cmdKeyValueMap.get(cmdKey);
workingDir = cmdKeyValueMap.get(workingDirKey);
timeout = cmdKeyValueMap.get(timeoutKey);
pidFile = cmdKeyValueMap.get(pidFileKey);
}
}
// if resource-scope doesn't contain command, fall back to cluster-scope configures
if (cmd == null) {
HelixConfigScope clusterScope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(clusterName).build();
Map<String, String> cmdKeyValueMap =
manager.getConfigAccessor().get(clusterScope, cmdConfigKeys);
if (cmdKeyValueMap != null) {
cmd = cmdKeyValueMap.get(cmdKey);
workingDir = cmdKeyValueMap.get(workingDirKey);
timeout = cmdKeyValueMap.get(timeoutKey);
pidFile = cmdKeyValueMap.get(pidFileKey);
}
}
if (cmd == null) {
throw new Exception("Unable to find command for transition from:" + message.getFromState()
+ " to:" + message.getToState());
}
_logger.info("Executing command: " + cmd + ", using workingDir: " + workingDir + ", timeout: "
+ timeout + ", on " + manager.getInstanceName());
// skip nop command
if (cmd.equals(CommandAttribute.NOP.getName())) {
return;
}
// split the cmd to actual cmd and args[]
String cmdSplits[] = cmd.trim().split("\\s+");
String cmdValue = cmdSplits[0];
String args[] = Arrays.copyOfRange(cmdSplits, 1, cmdSplits.length);
// get the command-execution timeout
long timeoutValue = 0; // 0 means wait for ever
if (timeout != null) {
try {
timeoutValue = Long.parseLong(timeout);
} catch (NumberFormatException e) {
// OK to use 0
}
}
ExternalCommand externalCmd =
ExternalCommand.executeWithTimeout(new File(workingDir), cmdValue, timeoutValue, args);
int exitValue = externalCmd.exitValue();
// debug
// System.out.println("command: " + cmd + ", exitValue: " + exitValue
// + " output:\n" + externalCmd.getStringOutput());
_logger.debug("command: {}, exitValue: {} output: {}\n", cmd, exitValue,
externalCmd.getStringOutput());
// monitor pid if pidFile exists
if (pidFile == null) {
// no pid to monitor
return;
}
String pidFileValue = instantiateByMessage(pidFile, message);
String pid = SystemUtil.getPidFromFile(new File(pidFileValue));
if (pid != null) {
new ProcessMonitorThread(pid).start();
}
}
}
| 9,396 |
0 |
Create_ds/helix/helix-agent/src/main/java/org/apache/helix
|
Create_ds/helix/helix-agent/src/main/java/org/apache/helix/agent/SystemUtil.java
|
package org.apache.helix.agent;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import org.apache.helix.ExternalCommand;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SystemUtil {
public static final String OS_NAME = System.getProperty("os.name");
private static Logger LOG = LoggerFactory.getLogger(SystemUtil.class);
/**
* PROCESS STATE CODES
*/
public static enum ProcessStateCode {
// Here are the different values that the s, stat and state output specifiers (header "STAT" or
// "S")
// will display to describe the state of a process.
D("Uninterruptible sleep (usually IO)"),
R("Running or runnable (on run queue)"),
S("Interruptible sleep (waiting for an event to complete)"),
T("Stopped, either by a job control signal or because it is being traced."),
W("paging (not valid since the 2.6.xx kernel)"),
X("dead (should never be seen)"),
Z("Defunct (\"zombie\") process, terminated but not reaped by its parent.");
private final String _description;
private ProcessStateCode(String description) {
_description = description;
}
public String getDescription() {
return _description;
}
}
public static ProcessStateCode getProcessState(String processId) throws Exception {
if (OS_NAME.equals("Mac OS X") || OS_NAME.equals("Linux")) {
ExternalCommand cmd = ExternalCommand.start("ps", processId);
cmd.waitFor();
// split by new lines
// should return 2 lines for an existing process, or 1 line for a non-existing process
String lines[] = cmd.getStringOutput().split("[\\r\\n]+");
if (lines.length != 2) {
LOG.info("process: " + processId + " not exist");
return null;
}
// split by whitespace, 1st line is attributes, 2nd line is actual values
// should be parallel arrays
String attributes[] = lines[0].trim().split("\\s+");
String values[] = lines[1].trim().split("\\s+");
Character processStateCodeChar = null;
for (int i = 0; i < attributes.length; i++) {
String attribute = attributes[i];
// header "STAT" or "S"
if ("STAT".equals(attribute) || "S".equals(attribute)) {
// first character should be major process state code
processStateCodeChar = values[i].charAt(0);
break;
}
}
return ProcessStateCode.valueOf(Character.toString(processStateCodeChar));
} else {
throw new UnsupportedOperationException("Not supported OS: " + OS_NAME);
}
}
public static String getPidFromFile(File file) {
BufferedReader br = null;
try {
br = new BufferedReader(new FileReader(file));
String line = br.readLine();
return line;
} catch (IOException e) {
LOG.warn("fail to read pid from pidFile: " + file + ". will not monitor");
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
LOG.error("fail to close file: " + file, e);
}
}
}
return null;
}
}
| 9,397 |
0 |
Create_ds/helix/helix-agent/src/main/java/org/apache/helix
|
Create_ds/helix/helix-agent/src/main/java/org/apache/helix/agent/AgentStateModelFactory.java
|
package org.apache.helix.agent;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.participant.statemachine.StateModelFactory;
public class AgentStateModelFactory extends StateModelFactory<AgentStateModel> {
@Override
public AgentStateModel createNewStateModel(String resourceName, String partitionKey) {
AgentStateModel model = new AgentStateModel();
return model;
}
}
| 9,398 |
0 |
Create_ds/helix/helix-agent/src/main/java/org/apache/helix
|
Create_ds/helix/helix-agent/src/main/java/org/apache/helix/agent/HelixAgentMain.java
|
package org.apache.helix.agent;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.HelixManager;
import org.apache.helix.InstanceType;
import org.apache.helix.manager.zk.ZKHelixManager;
import org.apache.helix.participant.StateMachineEngine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HelixAgentMain {
private static Logger LOG = LoggerFactory.getLogger(HelixAgentMain.class);
public static final String zkAddr = "zkSvr";
public static final String cluster = "cluster";
public static final String help = "help";
public static final String instanceName = "instanceName";
public static final String stateModel = "stateModel";
// hack: OptionalBuilder is not thread safe
@SuppressWarnings("static-access")
synchronized private static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info")
.create();
Option zkAddrOption =
OptionBuilder.withLongOpt(zkAddr).hasArgs(1).isRequired(true)
.withArgName("ZookeeperServerAddress(Required)")
.withDescription("Provide zookeeper address").create();
Option clusterOption =
OptionBuilder.withLongOpt(cluster).hasArgs(1).isRequired(true)
.withArgName("Cluster name (Required)").withDescription("Provide cluster name")
.create();
Option instanceNameOption =
OptionBuilder.withLongOpt(instanceName).hasArgs(1).isRequired(true)
.withArgName("Helix agent name (Required)").withDescription("Provide Helix agent name")
.create();
Option stateModelOption =
OptionBuilder.withLongOpt(stateModel).hasArgs(1).isRequired(true)
.withArgName("State model name (Required)").withDescription("Provide state model name")
.create();
Options options = new Options();
options.addOption(helpOption);
options.addOption(zkAddrOption);
options.addOption(clusterOption);
options.addOption(instanceNameOption);
options.addOption(stateModelOption);
return options;
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + HelixAgentMain.class.getName(), cliOptions);
}
public static CommandLine processCommandLineArgs(String[] cliArgs) throws Exception {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
try {
return cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
LOG.error("fail to parse command-line options. cliArgs: " + Arrays.toString(cliArgs), pe);
printUsage(cliOptions);
System.exit(1);
}
return null;
}
// NOT working for kill -9, working for kill -2/-15
static class HelixAgentShutdownHook extends Thread {
final HelixManager _manager;
HelixAgentShutdownHook(HelixManager manager) {
_manager = manager;
}
@Override
public void run() {
LOG.info("HelixAgentShutdownHook invoked. agent: " + _manager.getInstanceName());
if (_manager != null && _manager.isConnected())
_manager.disconnect();
}
}
public static void main(String[] args) throws Exception {
CommandLine cmd = processCommandLineArgs(args);
String zkAddress = cmd.getOptionValue(zkAddr);
String clusterName = cmd.getOptionValue(cluster);
String instance = cmd.getOptionValue(instanceName);
String stateModelName = cmd.getOptionValue(stateModel);
HelixManager manager =
new ZKHelixManager(clusterName, instance, InstanceType.PARTICIPANT, zkAddress);
StateMachineEngine stateMach = manager.getStateMachineEngine();
stateMach.registerStateModelFactory(stateModelName, new AgentStateModelFactory());
Runtime.getRuntime().addShutdownHook(new HelixAgentShutdownHook(manager));
try {
manager.connect();
Thread.currentThread().join();
} catch (Exception e) {
LOG.error(e.toString());
} finally {
if (manager != null && manager.isConnected()) {
manager.disconnect();
}
}
}
}
| 9,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.