index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/camel-k/e2e/common/config
|
Create_ds/camel-k/e2e/common/config/files/TimerKameletIntegrationNamedConfiguration.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.lang.Exception;
import java.lang.Override;
import org.apache.camel.builder.RouteBuilder;
public class TimerKameletIntegrationNamedConfiguration extends RouteBuilder {
@Override
public void configure() throws Exception {
from("kamelet:iconfig-test-timer-source/mynamedconfig")
.to("log:info");
}
}
| 9,100 |
0 |
Create_ds/camel-k/e2e/common/cli
|
Create_ds/camel-k/e2e/common/cli/files/JavaDuplicateParams.java
|
// camel-k: language=java trait=telemetry.enabled=false trait=affinity.enabled=true property=prop1=false property=foo=bar
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.camel.builder.RouteBuilder;
public class JavaDuplicateParams extends RouteBuilder {
@Override
public void configure() throws Exception {
from("timer:tick")
.setHeader("m").constant("string!")
.setBody().simple("Magic${header.m}")
.log("${body}");
}
}
| 9,101 |
0 |
Create_ds/camel-k/e2e/common/cli
|
Create_ds/camel-k/e2e/common/cli/files/Java.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.camel.builder.RouteBuilder;
public class Java extends RouteBuilder {
@Override
public void configure() throws Exception {
from("timer:tick")
.setHeader("m").constant("string!")
.setBody().simple("Magic${header.m}")
.log("${body}");
}
}
| 9,102 |
0 |
Create_ds/camel-k/e2e/common/languages
|
Create_ds/camel-k/e2e/common/languages/files/Prop.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.camel.builder.RouteBuilder;
public class Prop extends RouteBuilder {
@Override
public void configure() throws Exception {
from("timer:tick")
.setBody().simple("Magic{{myproperty}}")
.log("${body}");
}
}
| 9,103 |
0 |
Create_ds/camel-k/e2e/common/languages
|
Create_ds/camel-k/e2e/common/languages/files/Java.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.camel.builder.RouteBuilder;
public class Java extends RouteBuilder {
@Override
public void configure() throws Exception {
from("timer:tick")
.setHeader("m").constant("string!")
.setBody().simple("Magic${header.m}")
.log("${body}");
}
}
| 9,104 |
0 |
Create_ds/camel-k/e2e/commonwithcustominstall
|
Create_ds/camel-k/e2e/commonwithcustominstall/files/TimerCustomKameletIntegration.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.lang.Exception;
import java.lang.Override;
import org.apache.camel.builder.RouteBuilder;
public class TimerCustomKameletIntegration extends RouteBuilder {
@Override
public void configure() throws Exception {
from("kamelet:timer-custom-source?message=hello%20world")
.to("log:info");
}
}
| 9,105 |
0 |
Create_ds/camel-k/e2e/commonwithcustominstall
|
Create_ds/camel-k/e2e/commonwithcustominstall/files/Java.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.camel.builder.RouteBuilder;
public class Java extends RouteBuilder {
@Override
public void configure() throws Exception {
from("timer:tick")
.setHeader("m").constant("string!")
.setBody().simple("Magic${header.m}")
.log("${body}");
}
}
| 9,106 |
0 |
Create_ds/camel-k/e2e/yaks/common
|
Create_ds/camel-k/e2e/yaks/common/knative-sinkbinding-http/Rest2Channel.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.camel.builder.RouteBuilder;
public class Rest2Channel extends RouteBuilder {
public void configure() throws Exception {
rest("/")
.put("/foo/new")
.to("knative:channel/messages");
}
}
| 9,107 |
0 |
Create_ds/camel-k/e2e/yaks/openshift
|
Create_ds/camel-k/e2e/yaks/openshift/monitoring/Metrics.java
|
// camel-k: language=java trait=prometheus.enabled=true
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.camel.Exchange;
import org.apache.camel.LoggingLevel;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.micrometer.MicrometerConstants;
import javax.enterprise.context.ApplicationScoped;
/**
* This example registers the following metrics:
* <ul>
* <li>{@code camel-k-example-metrics-attempt}</code>: meters the number of calls
* made to the service to process incoming events</li>
* <li>{@code camel-k-example-metrics-error}</code>: meters the number of errors
* corresponding to the number of events that haven't been processed</li>
* <li>{@code camel-k-example-metrics-generated}</code>: meters the number of events to be processed</li>
* <li>{@code camel-k-example-metrics-redelivery}</code>: meters the number of retries
* made to process the events</li>
* <li>{@code camel-k-example-metrics-success}</code>: meters the number of events successfully processed</li>
* </ul>
* The invariant being: {@code attempt = redelivery - success - error}.
* <p> In addition, a ratio gauge {@code success-ratio = success / generated} is registered.
*
*/
@ApplicationScoped
public class Metrics extends RouteBuilder {
@Override
public void configure() {
onException()
.handled(true)
.maximumRedeliveries(2)
.logStackTrace(false)
.logExhausted(false)
.log(LoggingLevel.ERROR, "Failed processing ${body}")
.to("micrometer:counter:camel-k-example-metrics-redelivery?increment=2")
// The 'error' meter
.to("micrometer:counter:camel-k-example-metrics-error");
from("timer:stream?period=1000")
.routeId("unreliable-service")
.setBody(header(Exchange.TIMER_COUNTER).prepend("event #"))
.log("Processing ${body}...")
// The 'generated' meter
.to("micrometer:counter:camel-k-example-metrics-generated")
// TODO: replace with lookup by type as soon as CAMEL-15217 gets fixed
// The 'attempt' meter via @Counted interceptor
.bean("service")
.filter(header(Exchange.REDELIVERED))
.log(LoggingLevel.WARN, "Processed ${body} after ${header.CamelRedeliveryCounter} retries")
.setHeader(MicrometerConstants.HEADER_COUNTER_INCREMENT, header(Exchange.REDELIVERY_COUNTER))
// The 'redelivery' meter
.to("micrometer:counter:camel-k-example-metrics-redelivery")
.end()
.log("Successfully processed ${body}")
// The 'success' meter
.to("micrometer:counter:camel-k-example-metrics-success");
}
}
| 9,108 |
0 |
Create_ds/camel-k/e2e/yaks/openshift/monitoring/app/src/main/java/org/apache/camel
|
Create_ds/camel-k/e2e/yaks/openshift/monitoring/app/src/main/java/org/apache/camel/integration/Service.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.integration;
import java.util.Random;
import io.micrometer.core.annotation.Counted;
import org.apache.camel.Exchange;
import org.apache.camel.RuntimeExchangeException;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.inject.Produces;
import javax.inject.Named;
@Named("service")
@ApplicationScoped
// TODO: to be removed as soon as it's possible to add `quarkus.arc.remove-unused-beans=framework` to Quarkus build configuration in Camel K
@io.quarkus.arc.Unremovable
public class Service {
@Counted(value = "camel-k-example-metrics-attempt")
public void attempt(Exchange exchange) {
Random rand = new Random();
if (rand.nextDouble() < 0.5) {
throw new RuntimeExchangeException("Random failure", exchange);
}
}
}
| 9,109 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/repository/DataProductRepository.java
|
package org.apache.airavata.datacatalog.api.repository;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.transaction.annotation.Transactional;
import java.util.Optional;
@Transactional(readOnly = true)
public interface DataProductRepository extends JpaRepository<DataProductEntity, Long> {
Optional<DataProductEntity> findByExternalId(String externalId);
@Transactional
void deleteByExternalId(String externalId);
}
| 9,110 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/repository/MetadataSchemaRepository.java
|
package org.apache.airavata.datacatalog.api.repository;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface MetadataSchemaRepository extends JpaRepository<MetadataSchemaEntity, Long> {
MetadataSchemaEntity findBySchemaName(String schemaName);
}
| 9,111 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/repository/MetadataSchemaFieldRepository.java
|
package org.apache.airavata.datacatalog.api.repository;
import java.util.List;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaFieldEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface MetadataSchemaFieldRepository extends JpaRepository<MetadataSchemaFieldEntity, Long> {
List<MetadataSchemaFieldEntity> findByMetadataSchema_SchemaName(String schemaName);
MetadataSchemaFieldEntity findByFieldNameAndMetadataSchema_SchemaName(String fieldName, String schemaName);
}
| 9,112 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/repository/TenantRepository.java
|
package org.apache.airavata.datacatalog.api.repository;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.model.TenantEntity;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.transaction.annotation.Transactional;
@Transactional
public interface TenantRepository extends JpaRepository<TenantEntity, Long> {
Optional<TenantEntity> findByExternalId(String externalId);
}
| 9,113 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/repository/UserRepository.java
|
package org.apache.airavata.datacatalog.api.repository;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.model.TenantEntity;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface UserRepository extends JpaRepository<UserEntity, Long> {
Optional<UserEntity> findByExternalIdAndTenant(String externalId, TenantEntity tenantEntity);
Optional<UserEntity> findByExternalIdAndTenant_ExternalId(String externalId, String tenantExternalId);
}
| 9,114 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/sharing/SharingManager.java
|
package org.apache.airavata.datacatalog.api.sharing;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.GroupInfo;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.UserInfo;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.airavata.datacatalog.api.sharing.exception.SharingException;
public interface SharingManager {
/**
* Initialize the sharing manager as necessary. In general this would be called
* once when a tenant is first created and also once for each tenant on startup
* (in case initialization needs to be redone or new steps have been added to
* initialization).
*
* @param tenantId
* @throws SharingException
*/
void initialize(String tenantId) throws SharingException;
/**
* Get or create a {@link UserEntity}.
*
* @param userInfo
* @return
*/
UserEntity resolveUser(UserInfo userInfo) throws SharingException;
/**
* Return true if the user has access to the data product with the given
* permission.
*
* @param userInfo
* @param dataProduct
* @param permission
* @return
*/
boolean userHasAccess(UserInfo userInfo, DataProduct dataProduct, Permission permission) throws SharingException;
/**
* Return the name of the database view that includes sharing information
* for each data product. The view should contain the following columns:
* data_product_id, user_id, and permission_id where the permission_id
* should be a number as defined in the {@link Permission} enum.
*
* @return
*/
String getDataProductSharingView();
/**
* Grant permission to the user for the given data product.
*
* @param userInfo
* @param dataProduct
* @param permission
* @param sharedByUser optional (nullable), the user who is granting the
* permission
*/
void grantPermissionToUser(UserInfo userInfo, DataProduct dataProduct, Permission permission, UserInfo sharedByUser)
throws SharingException;
/**
* Revoke permission from the user for the given data product.
*
* @param userInfo
* @param dataProduct
* @param permission
*/
void revokePermissionFromUser(UserInfo userInfo, DataProduct dataProduct, Permission permission)
throws SharingException;
/**
* Grant permission to the group for the given data product.
*
* @param groupInfo
* @param dataProduct
* @param permission
* @param sharedByUser optional (nullable), the user who is granting the
* permission
*/
void grantPermissionToGroup(GroupInfo groupInfo, DataProduct dataProduct, Permission permission,
UserInfo sharedByUser)
throws SharingException;
/**
* Revoke permission from the group for the given data product.
*
* @param groupInfo
* @param dataProduct
* @param permission
*/
void revokePermissionFromGroup(GroupInfo groupInfo, DataProduct dataProduct, Permission permission)
throws SharingException;
/**
* Return true if public access at the given permission is granted for the
* given data product. Public access means anonymous access; no user information
* provided in the API request.
*
* @param dataProduct
* @param permission
* @return
*/
boolean hasPublicAccess(DataProduct dataProduct, Permission permission) throws SharingException;
/**
* Grant public access to the given data product.
*
* @param dataProduct
* @param permission
*/
void grantPublicAccess(DataProduct dataProduct, Permission permission) throws SharingException;
/**
* Revoke public access from the given data product.
*
* @param dataProduct
* @param permission
*/
void revokePublicAccess(DataProduct dataProduct, Permission permission) throws SharingException;
}
| 9,115 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/sharing/exception/SharingException.java
|
package org.apache.airavata.datacatalog.api.sharing.exception;
public class SharingException extends Exception {
public SharingException() {
}
public SharingException(String message) {
super(message);
}
public SharingException(Throwable cause) {
super(cause);
}
public SharingException(String message, Throwable cause) {
super(message, cause);
}
public SharingException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
}
| 9,116 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/model/TenantEntity.java
|
package org.apache.airavata.datacatalog.api.model;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "tenant", uniqueConstraints = { @UniqueConstraint(columnNames = { "external_id" }) })
public class TenantEntity {
@Id
@SequenceGenerator(name = "tenant_tenant_id_seq", sequenceName = "tenant_tenant_id_seq", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "tenant_tenant_id_seq")
@Column(name = "tenant_id")
private Long tenantId;
/**
* The identifier for the tenant. This external identifier comes from the tenant
* management service that data catalog is configured to use.
*/
@Basic
@Column(name = "external_id", nullable = false)
private String externalId;
@Basic
@Column(name = "name", nullable = false)
private String name;
public Long getTenantId() {
return tenantId;
}
public void setTenantId(Long tenantId) {
this.tenantId = tenantId;
}
public String getExternalId() {
return externalId;
}
public void setExternalId(String externalId) {
this.externalId = externalId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TenantEntity other = (TenantEntity) obj;
if (tenantId == null) {
if (other.tenantId != null)
return false;
} else if (!tenantId.equals(other.tenantId))
return false;
return true;
}
}
| 9,117 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/model/UserEntity.java
|
package org.apache.airavata.datacatalog.api.model;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
// 'user' is a reserved word, so naming this table 'user_table'
@Table(name = "user_table", uniqueConstraints = { @UniqueConstraint(columnNames = { "tenant_id", "external_id" }) })
public class UserEntity {
@Id
@SequenceGenerator(name = "user_user_id_seq", sequenceName = "user_user_id_seq", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "user_user_id_seq")
@Column(name = "user_id")
private Long userId;
/**
* The identifier for the user. This external identifier comes from the user
* management service that data catalog is configured to use.
*/
@Basic
@Column(name = "external_id", nullable = false)
private String externalId;
@Basic
@Column(name = "name", nullable = false)
private String name;
@ManyToOne(optional = false)
@JoinColumn(name = "tenant_id", referencedColumnName = "tenant_id", nullable = false, updatable = false)
private TenantEntity tenant;
public Long getUserId() {
return userId;
}
public void setUserId(Long userId) {
this.userId = userId;
}
public String getExternalId() {
return externalId;
}
public void setExternalId(String externalId) {
this.externalId = externalId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public TenantEntity getTenant() {
return tenant;
}
public void setTenant(TenantEntity tenant) {
this.tenant = tenant;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((userId == null) ? 0 : userId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
UserEntity other = (UserEntity) obj;
if (userId == null) {
if (other.userId != null)
return false;
} else if (!userId.equals(other.userId))
return false;
return true;
}
}
| 9,118 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/model/DataProductEntity.java
|
package org.apache.airavata.datacatalog.api.model;
import java.util.HashSet;
import java.util.Set;
import org.hibernate.annotations.Type;
import com.fasterxml.jackson.databind.JsonNode;
import io.hypersistence.utils.hibernate.type.json.JsonType;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.JoinTable;
import jakarta.persistence.ManyToMany;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "data_product", uniqueConstraints = { @UniqueConstraint(columnNames = { "external_id" }) })
public class DataProductEntity {
@Id
@SequenceGenerator(name = "data_product_data_product_id_seq", sequenceName = "data_product_data_product_id_seq", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "data_product_data_product_id_seq")
@Column(name = "data_product_id")
private Long dataProductId;
@ManyToOne(optional = true)
@JoinColumn(name = "parent_data_product_id", referencedColumnName = "data_product_id", nullable = true)
private DataProductEntity parentDataProductEntity;
@Basic
@Column(name = "external_id", nullable = false)
private String externalId;
@Basic
@Column(name = "name", nullable = false)
private String name;
@Type(JsonType.class)
@Column(name = "metadata", columnDefinition = "jsonb")
private JsonNode metadata;
@ManyToMany
@JoinTable(name = "data_product_metadata_schema", joinColumns = @JoinColumn(name = "data_product_id"), inverseJoinColumns = @JoinColumn(name = "metadata_schema_id"))
private Set<MetadataSchemaEntity> metadataSchemas = new HashSet<>();
@ManyToOne(optional = false)
@JoinColumn(name = "owner_id", referencedColumnName = "user_id", nullable = false, updatable = false)
private UserEntity owner;
public Long getDataProductId() {
return dataProductId;
}
public void setDataProductId(Long dataProductId) {
this.dataProductId = dataProductId;
}
public DataProductEntity getParentDataProductEntity() {
return parentDataProductEntity;
}
public void setParentDataProductEntity(DataProductEntity parentDataProductEntity) {
this.parentDataProductEntity = parentDataProductEntity;
}
public String getExternalId() {
return externalId;
}
public void setExternalId(String externalId) {
this.externalId = externalId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public JsonNode getMetadata() {
return metadata;
}
public void setMetadata(JsonNode metadata) {
this.metadata = metadata;
}
public Set<MetadataSchemaEntity> getMetadataSchemas() {
return metadataSchemas;
}
public void setMetadataSchemas(Set<MetadataSchemaEntity> metadataSchemas) {
this.metadataSchemas = metadataSchemas;
}
public void addMetadataSchema(MetadataSchemaEntity metadataSchema) {
this.metadataSchemas.add(metadataSchema);
}
public void removeMetadataSchema(MetadataSchemaEntity metadataSchema) {
this.metadataSchemas.remove(metadataSchema);
}
public UserEntity getOwner() {
return owner;
}
public void setOwner(UserEntity owner) {
this.owner = owner;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((dataProductId == null) ? 0 : dataProductId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
DataProductEntity other = (DataProductEntity) obj;
if (dataProductId == null) {
if (other.dataProductId != null)
return false;
} else if (!dataProductId.equals(other.dataProductId))
return false;
return true;
}
}
| 9,119 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/model/MetadataSchemaEntity.java
|
package org.apache.airavata.datacatalog.api.model;
import java.util.Set;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.OneToMany;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
@Entity
// TODO: unique constraint on schema_name, tenant
@Table(name = "metadata_schema")
public class MetadataSchemaEntity {
@Id
@SequenceGenerator(name = "metadata_schema_metadata_schema_id_seq", sequenceName = "metadata_schema_metadata_schema_id_seq", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "metadata_schema_metadata_schema_id_seq")
@Column(name = "metadata_schema_id")
private Long metadataSchemaId;
@Basic
@Column(name = "schema_name", nullable = false)
private String schemaName;
@OneToMany(mappedBy = "metadataSchema")
private Set<MetadataSchemaFieldEntity> metadataSchemaFields;
public Long getMetadataSchemaId() {
return metadataSchemaId;
}
public void setMetadataSchemaId(Long metadataSchemaId) {
this.metadataSchemaId = metadataSchemaId;
}
public String getSchemaName() {
return schemaName;
}
public void setSchemaName(String schemaName) {
this.schemaName = schemaName;
}
public Set<MetadataSchemaFieldEntity> getMetadataSchemaFields() {
return metadataSchemaFields;
}
public void setMetadataSchemaFields(Set<MetadataSchemaFieldEntity> metadataSchemaFields) {
this.metadataSchemaFields = metadataSchemaFields;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((metadataSchemaId == null) ? 0 : metadataSchemaId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MetadataSchemaEntity other = (MetadataSchemaEntity) obj;
if (metadataSchemaId == null) {
if (other.metadataSchemaId != null)
return false;
} else if (!metadataSchemaId.equals(other.metadataSchemaId))
return false;
return true;
}
}
| 9,120 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/core/src/main/java/org/apache/airavata/datacatalog/api/model/MetadataSchemaFieldEntity.java
|
package org.apache.airavata.datacatalog.api.model;
import org.apache.airavata.datacatalog.api.FieldValueType;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.EnumType;
import jakarta.persistence.Enumerated;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "metadata_schema_field", uniqueConstraints = {
@UniqueConstraint(columnNames = { "field_name", "metadata_schema_id" }) })
public class MetadataSchemaFieldEntity {
@Id
@SequenceGenerator(name = "metadata_schema_field_metadata_schema_field_id", sequenceName = "metadata_schema_field_metadata_schema_field_id", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "metadata_schema_field_metadata_schema_field_id")
@Column(name = "metadata_schema_field_id")
private Long metadataSchemaFieldId;
@Basic
@Column(name = "field_name", nullable = false)
private String fieldName;
@Basic
@Column(name = "json_path", nullable = false)
private String jsonPath;
@Enumerated(EnumType.STRING)
@Column(nullable = false)
private FieldValueType fieldValueType;
@ManyToOne(optional = false)
@JoinColumn(name = "metadata_schema_id", nullable = false, updatable = false)
private MetadataSchemaEntity metadataSchema;
public Long getMetadataSchemaFieldId() {
return metadataSchemaFieldId;
}
public void setMetadataSchemaFieldId(Long metadataSchemaFieldId) {
this.metadataSchemaFieldId = metadataSchemaFieldId;
}
public String getFieldName() {
return fieldName;
}
public void setFieldName(String fieldName) {
this.fieldName = fieldName;
}
public String getJsonPath() {
return jsonPath;
}
public void setJsonPath(String jsonPath) {
this.jsonPath = jsonPath;
}
public FieldValueType getFieldValueType() {
return fieldValueType;
}
public void setFieldValueType(FieldValueType fieldValueType) {
this.fieldValueType = fieldValueType;
}
public MetadataSchemaEntity getMetadataSchema() {
return metadataSchema;
}
public void setMetadataSchema(MetadataSchemaEntity metadataSchema) {
this.metadataSchema = metadataSchema;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((metadataSchemaFieldId == null) ? 0 : metadataSchemaFieldId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MetadataSchemaFieldEntity other = (MetadataSchemaFieldEntity) obj;
if (metadataSchemaFieldId == null) {
if (other.metadataSchemaFieldId != null)
return false;
} else if (!metadataSchemaFieldId.equals(other.metadataSchemaFieldId))
return false;
return true;
}
}
| 9,121 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/custos-sharing/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/custos-sharing/src/main/java/org/apache/airavata/datacatalog/api/sharing/CustosTestDataBootstrap.java
|
package org.apache.airavata.datacatalog.api.sharing;
import java.io.IOException;
import org.apache.custos.clients.CustosClientProvider;
import org.apache.custos.iam.service.FindUsersResponse;
import org.apache.custos.user.management.client.UserManagementClient;
public class CustosTestDataBootstrap {
public static void main(String[] args) throws IOException {
// Super tenant
// String clientId = System.getenv("CUSTOS_SUPER_CLIENT_ID");
// String clientSec = System.getenv("CUSTOS_SUPER_CLIENT_SEC");
// String childClientId = System.getenv("CUSTOS_CLIENT_ID");
String clientId = System.getenv("CUSTOS_CLIENT_ID");
String clientSec = System.getenv("CUSTOS_CLIENT_SEC");
CustosClientProvider custosClientProvider = new CustosClientProvider.Builder().setServerHost("localhost")
.setServerPort(7000)
.setClientId(clientId) // client Id generated from above step or any active tenant id
.setClientSec(clientSec)
.usePlainText(true) // Don't use this in production setup
.build();
UserManagementClient userManagementClient = custosClientProvider.getUserManagementClient();
String testUsername = "demouser";
FindUsersResponse findUsersResponse = userManagementClient.findUser(testUsername, null, null, null, 0, 1);
if (findUsersResponse.getUsersCount() == 0) {
// Create an enable a demo user
System.out.println("Creating " + testUsername + " user in client " + clientId);
userManagementClient.registerUser(testUsername, "Demo", "User",
"testpassword", "[email protected]", false);
userManagementClient.enableUser(testUsername);
// Should find it now
findUsersResponse = userManagementClient.findUser(testUsername, null, null, null, 0, 1);
}
// Super tenant user query
// FindUsersResponse findUsersResponse =
// userManagementClient.findUsers(childClientId, null, testUsername, null,
// null, null, 0, 1);
System.out.println("findUsersResponse=" + findUsersResponse);
}
}
| 9,122 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/custos-sharing/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/custos-sharing/src/main/java/org/apache/airavata/datacatalog/api/sharing/SharingManagerImpl.java
|
package org.apache.airavata.datacatalog.api.sharing;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.GroupInfo;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.UserInfo;
import org.apache.airavata.datacatalog.api.model.TenantEntity;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.airavata.datacatalog.api.repository.TenantRepository;
import org.apache.airavata.datacatalog.api.repository.UserRepository;
import org.apache.airavata.datacatalog.api.sharing.exception.SharingException;
import org.apache.custos.clients.CustosClientProvider;
import org.apache.custos.group.management.client.GroupManagementClient;
import org.apache.custos.iam.service.FindUsersResponse;
import org.apache.custos.iam.service.UserRepresentation;
import org.apache.custos.sharing.core.Entity;
import org.apache.custos.sharing.core.EntityType;
import org.apache.custos.sharing.core.PermissionType;
import org.apache.custos.sharing.core.exceptions.CustosSharingException;
import org.apache.custos.sharing.core.impl.SharingImpl;
import org.apache.custos.sharing.core.utils.Constants;
import org.apache.custos.user.management.client.UserManagementClient;
import org.apache.custos.user.profile.service.Group;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import jakarta.annotation.PostConstruct;
public class SharingManagerImpl implements SharingManager {
private static final Logger logger = LoggerFactory.getLogger(SharingManagerImpl.class);
private static final String DATA_PRODUCT_ENTITY_TYPE_ID = "DATA_PRODUCT";
@Autowired
SharingImpl custosSharingImpl;
@Autowired
TenantRepository tenantRepository;
@Autowired
UserRepository userRepository;
CustosClientProvider custosClientProvider;
private static final String PUBLIC_ACCESS_GROUP = "public_access_group";
@Value("${identity.server.hostname}")
String hostname;
@Value("${identity.server.port}")
int port;
@Value("${identity.server.clientId}")
String clientId;
@Value("${identity.server.clientSec}")
String clientSec;
@Value("${identity.server.usePlainText:false}")
boolean usePlainText;
@PostConstruct
public void initializeTenants() throws SharingException {
logger.info("Initializing all tenants");
List<TenantEntity> tenants = tenantRepository.findAll();
for (TenantEntity tenant : tenants) {
this.initialize(tenant.getExternalId());
}
logger.info("Initializing Custos client provider");
custosClientProvider = new CustosClientProvider.Builder()
.setServerHost(hostname)
.setServerPort(port)
.setClientId(clientId)
.setClientSec(clientSec)
.usePlainText(usePlainText)
.build();
}
@Override
public void initialize(String tenantId) throws SharingException {
logger.info("Initializing tenant {}", tenantId);
// Create DataProduct entity type
EntityType entityType = EntityType.newBuilder()
.setId(DATA_PRODUCT_ENTITY_TYPE_ID)
.setName("Data Product")
.build();
try {
Optional<EntityType> existingEntityType = custosSharingImpl.getEntityType(tenantId, entityType.getId());
if (!existingEntityType.isPresent()) {
custosSharingImpl.createEntityType(tenantId, entityType);
}
} catch (CustosSharingException e) {
throw new SharingException(e);
}
Set<Permission> allPermissions = new HashSet<>(Arrays.asList(Permission.values()));
allPermissions.remove(Permission.UNRECOGNIZED);
// Create permission types for all permissions
for (Permission permission : allPermissions) {
PermissionType permissionType = PermissionType.newBuilder()
.setId(permission.name())
.setName(permission.name())
.build();
try {
Optional<PermissionType> existingPermissionType = custosSharingImpl.getPermissionType(tenantId,
permissionType.getId());
if (!existingPermissionType.isPresent()) {
custosSharingImpl.createPermissionType(permissionType, tenantId);
}
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
}
@Override
public UserEntity resolveUser(UserInfo userInfo) throws SharingException {
Optional<UserEntity> maybeUserEntity = userRepository.findByExternalIdAndTenant_ExternalId(userInfo.getUserId(),
userInfo.getTenantId());
if (maybeUserEntity.isPresent()) {
return maybeUserEntity.get();
} else {
try (UserManagementClient userManagementClient = custosClientProvider.getUserManagementClient()) {
FindUsersResponse findUsersResponse = userManagementClient.findUsers(userInfo.getTenantId(),
null, userInfo.getUserId(), null, null, null, 0, 1);
if (!findUsersResponse.getUsersList().isEmpty()) {
UserRepresentation userProfile = findUsersResponse.getUsersList().get(0);
TenantEntity tenantEntity = resolveTenant(userInfo);
UserEntity userEntity = new UserEntity();
userEntity.setExternalId(userProfile.getUsername());
userEntity.setName(userProfile.getUsername());
userEntity.setTenant(tenantEntity);
return userRepository.save(userEntity);
} else {
throw new SharingException("User " + userInfo.getUserId() + " in tenant "
+ userInfo.getTenantId() + " not found in Identity Sever ");
}
} catch (IOException e) {
throw new SharingException("Error occurred while resolving user " + userInfo.getUserId()
+ " tenant " + userInfo.getTenantId(), e);
}
}
}
@Override
public boolean userHasAccess(UserInfo userInfo, DataProduct dataProduct, Permission permission)
throws SharingException {
try {
return custosSharingImpl.userHasAccess(userInfo.getTenantId(), dataProduct.getDataProductId(),
permission.name(),
userInfo.getUserId());
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
@Override
public String getDataProductSharingView() {
return "custos_data_product_sharing_view";
}
@Override
public void grantPermissionToUser(UserInfo userInfo, DataProduct dataProduct, Permission permission,
UserInfo sharedByUser)
throws SharingException {
List<String> userIds = new ArrayList<>();
userIds.add(userInfo.getUserId());
String sharedByUserId = sharedByUser != null ? sharedByUser.getUserId() : null;
try {
createDataProductEntityIfMissing(dataProduct);
// OWNER permission can't be assigned but it is granted when the data product is
// created
if (permission != Permission.OWNER) {
custosSharingImpl.shareEntity(userInfo.getTenantId(),
dataProduct.getDataProductId(), permission.name(), userIds, true, Constants.USER,
sharedByUserId);
}
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
@Override
public void revokePermissionFromUser(UserInfo userInfo, DataProduct dataProduct, Permission permission)
throws SharingException {
List<String> userIds = new ArrayList<>();
userIds.add(userInfo.getUserId());
try {
custosSharingImpl.revokePermission(userInfo.getTenantId(),
dataProduct.getDataProductId(), permission.name(), userIds);
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
@Override
public void grantPermissionToGroup(GroupInfo groupInfo, DataProduct dataProduct, Permission permission,
UserInfo sharedByUser)
throws SharingException {
List<String> userIds = new ArrayList<>();
userIds.add(groupInfo.getGroupId());
String sharedByUserId = sharedByUser != null ? sharedByUser.getUserId() : null;
try {
custosSharingImpl.shareEntity(groupInfo.getTenantId(),
dataProduct.getDataProductId(), permission.name(), userIds, true, Constants.GROUP, sharedByUserId);
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
@Override
public void revokePermissionFromGroup(GroupInfo groupInfo, DataProduct dataProduct, Permission permission)
throws SharingException {
List<String> userIds = new ArrayList<>();
userIds.add(groupInfo.getGroupId());
try {
custosSharingImpl.revokePermission(groupInfo.getTenantId(),
dataProduct.getDataProductId(), permission.name(), userIds);
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
@Override
public boolean hasPublicAccess(DataProduct dataProduct, Permission permission) throws SharingException {
try {
return custosSharingImpl.userHasAccess(dataProduct.getOwner().getTenantId(), dataProduct.getDataProductId(),
permission.name(),
PUBLIC_ACCESS_GROUP);
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
@Override
public void grantPublicAccess(DataProduct dataProduct, Permission permission) throws SharingException {
// TODO: create PUBLIC GROUP If not exists
List<String> userIds = new ArrayList<>();
userIds.add(PUBLIC_ACCESS_GROUP);
try {
custosSharingImpl.shareEntity(dataProduct.getOwner().getTenantId(),
dataProduct.getDataProductId(), permission.name(), userIds, true, Constants.GROUP, null);
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
@Override
public void revokePublicAccess(DataProduct dataProduct, Permission permission) throws SharingException {
List<String> userIds = new ArrayList<>();
userIds.add(PUBLIC_ACCESS_GROUP);
try {
custosSharingImpl.revokePermission(dataProduct.getOwner().getTenantId(),
dataProduct.getDataProductId(), permission.name(), userIds);
} catch (CustosSharingException e) {
throw new SharingException(e);
}
}
private void createDataProductEntityIfMissing(DataProduct dataProduct) throws CustosSharingException {
Entity dataProductEntity = Entity.newBuilder()
.setId(dataProduct.getDataProductId())
.setParentId(dataProduct.getParentDataProductId())
.setName(dataProduct.getName())
.setType(DATA_PRODUCT_ENTITY_TYPE_ID)
.setOwnerId(dataProduct.getOwner().getUserId())
.build();
String tenantId = dataProduct.getOwner().getTenantId();
if (!custosSharingImpl.isEntityExists(tenantId, dataProduct.getDataProductId())) {
custosSharingImpl.createEntity(dataProductEntity, tenantId);
}
}
private TenantEntity resolveTenant(UserInfo userInfo) throws SharingException {
Optional<TenantEntity> maybeTenantEntity = tenantRepository.findByExternalId(userInfo.getTenantId());
if (maybeTenantEntity.isPresent()) {
return maybeTenantEntity.get();
} else {
TenantEntity newTenantEntity = new TenantEntity();
newTenantEntity.setExternalId(userInfo.getTenantId());
newTenantEntity.setName(userInfo.getTenantId());
newTenantEntity = tenantRepository.save(newTenantEntity);
initialize(newTenantEntity.getExternalId());
return newTenantEntity;
}
}
private void createPublicGroup(String tenantId) throws SharingException {
try (GroupManagementClient groupManagementClient = custosClientProvider.getGroupManagementClient()) {
// TODO: but how do I specify the tenantId to search in?
Group findGroupsResponse = groupManagementClient.findGroup(clientId, PUBLIC_ACCESS_GROUP, null);
} catch (IOException e) {
throw new SharingException("Error occurred while resolving public group " + PUBLIC_ACCESS_GROUP
+ " tenant " + tenantId, e);
}
}
}
| 9,123 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/test/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/test/java/org/apache/airavata/datacatalog/api/sharing/SimpleSharingManagerImplTest.java
|
package org.apache.airavata.datacatalog.api.sharing;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.GroupInfo;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.UserInfo;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import org.apache.airavata.datacatalog.api.model.TenantEntity;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleGroupEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleTenantEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleUserEntity;
import org.apache.airavata.datacatalog.api.repository.DataProductRepository;
import org.apache.airavata.datacatalog.api.repository.TenantRepository;
import org.apache.airavata.datacatalog.api.repository.UserRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimpleGroupRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimpleTenantRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimpleUserRepository;
import org.apache.airavata.datacatalog.api.sharing.SimpleSharingManagerImplTest.MyConfiguration;
import org.apache.airavata.datacatalog.api.sharing.exception.SharingException;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.domain.EntityScan;
import org.springframework.boot.test.autoconfigure.jdbc.AutoConfigureTestDatabase;
import org.springframework.boot.test.autoconfigure.jdbc.AutoConfigureTestDatabase.Replace;
import org.springframework.boot.test.autoconfigure.orm.jpa.DataJpaTest;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.test.context.ContextConfiguration;
@DataJpaTest
@AutoConfigureTestDatabase(replace = Replace.NONE)
@ContextConfiguration(classes = MyConfiguration.class)
public class SimpleSharingManagerImplTest {
@Configuration
@EnableJpaRepositories("org.apache.airavata.datacatalog.api.repository")
@EntityScan("org.apache.airavata.datacatalog.api.model")
public static class MyConfiguration {
@Bean
public SharingManager getSharingManager() {
return new SimpleSharingManagerImpl();
}
}
@Autowired
SimpleSharingManagerImpl simpleSharingManagerImpl;
@Autowired
UserRepository userRepository;
@Autowired
TenantRepository tenantRepository;
@Autowired
SimpleUserRepository simpleUserRepository;
@Autowired
SimpleGroupRepository simpleGroupRepository;
@Autowired
SimpleTenantRepository simpleTenantRepository;
@Autowired
DataProductRepository dataProductRepository;
@Test
void testResolveUserCreatesUserAndTenantIfMissing() throws SharingException {
Optional<UserEntity> maybeUserEntity = userRepository.findByExternalIdAndTenant_ExternalId("userId",
"tenantId");
assertFalse(maybeUserEntity.isPresent());
Optional<TenantEntity> maybeTenantEntity = tenantRepository.findByExternalId("tenantId");
assertFalse(maybeTenantEntity.isPresent());
UserInfo userInfo = UserInfo.newBuilder().setUserId("userId").setTenantId("tenantId").build();
UserEntity userEntity = this.simpleSharingManagerImpl.resolveUser(userInfo);
assertEquals(userEntity.getExternalId(), "userId");
assertEquals(userEntity.getName(), "userId");
assertEquals(userEntity.getTenant().getExternalId(), "tenantId");
maybeUserEntity = userRepository.findByExternalIdAndTenant_ExternalId("userId", "tenantId");
assertTrue(maybeUserEntity.isPresent());
maybeTenantEntity = tenantRepository.findByExternalId("tenantId");
assertTrue(maybeTenantEntity.isPresent());
}
@Test
void testResolveUserFindsExistingUserAndTenant() throws SharingException {
String userId = "userId";
String tenantId = "tenantId";
TenantEntity tenantEntity = new TenantEntity();
tenantEntity.setExternalId(tenantId);
tenantEntity.setName("tenant name");
tenantRepository.save(tenantEntity);
SimpleTenantEntity simpleTenantEntity = new SimpleTenantEntity();
simpleTenantEntity.setExternalId(tenantId);
simpleTenantEntity.setName(tenantId);
simpleTenantEntity.setTenant(tenantEntity);
simpleTenantRepository.save(simpleTenantEntity);
UserEntity testUserEntity = new UserEntity();
testUserEntity.setExternalId(userId);
testUserEntity.setName("user name");
testUserEntity.setTenant(tenantEntity);
UserEntity savedUserEntity = userRepository.save(testUserEntity);
SimpleUserEntity simpleUserEntity = new SimpleUserEntity();
simpleUserEntity.setExternalId(userId);
simpleUserEntity.setName(userId);
simpleUserEntity.setUser(testUserEntity);
simpleUserEntity.setSimpleTenant(simpleTenantEntity);
simpleUserRepository.save(simpleUserEntity);
UserInfo userInfo = UserInfo.newBuilder().setUserId(userId).setTenantId(tenantId).build();
UserEntity userEntity = this.simpleSharingManagerImpl.resolveUser(userInfo);
assertEquals(userEntity.getExternalId(), userId);
assertEquals(userEntity.getName(), "user name");
assertEquals(userEntity.getTenant().getExternalId(), tenantId);
// Double check same database record
assertEquals(userEntity.getUserId(), savedUserEntity.getUserId());
}
@Test
public void testUserHasAccess() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
UserInfo userB = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userB").build();
// UserEntity testUserB = simpleSharingManagerImpl
// .resolveUser(userB);
DataProductEntity dataProductEntity = new DataProductEntity();
dataProductEntity.setExternalId(UUID.randomUUID().toString());
dataProductEntity.setOwner(testUserA);
dataProductEntity.setName("test data product");
dataProductRepository.save(dataProductEntity);
// Check that userB doesn't have READ access to the data product
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dataProductEntity.getExternalId()) // only need the data product id
.build();
boolean hasAccess = simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ);
assertFalse(hasAccess);
// Grant READ access to userB for the data product
simpleSharingManagerImpl.grantPermissionToUser(userB, dataProduct, Permission.READ, userA);
// Check that userB does now have READ access to the data product
hasAccess = simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ);
assertTrue(hasAccess);
}
@Test
public void testRevokePermissionFromUser() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
UserInfo userB = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userB").build();
DataProductEntity dataProductEntity = new DataProductEntity();
dataProductEntity.setExternalId(UUID.randomUUID().toString());
dataProductEntity.setOwner(testUserA);
dataProductEntity.setName("test data product");
dataProductRepository.save(dataProductEntity);
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dataProductEntity.getExternalId()) // only need the data product id
.build();
// Grant READ access to userB for the data product
simpleSharingManagerImpl.grantPermissionToUser(userB, dataProduct, Permission.READ, userA);
// Check that userB does have READ access to the data product
assertTrue(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
// Revoke READ access from userB
simpleSharingManagerImpl.revokePermissionFromUser(userB, dataProduct, Permission.READ);
// Check that userB does not now have READ access
assertFalse(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
}
@Test
public void testUserHasAccessViaGroupMembership() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
UserInfo userB = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userB").build();
UserInfo userC = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userC").build();
GroupInfo testGroup = GroupInfo.newBuilder().setGroupId("groupId").setTenantId("tenantId").build();
// Create a data product
DataProductEntity dataProductEntity = new DataProductEntity();
dataProductEntity.setExternalId(UUID.randomUUID().toString());
dataProductEntity.setOwner(testUserA);
dataProductEntity.setName("test data product");
dataProductRepository.save(dataProductEntity);
// Add users B and C to the testGroup
simpleSharingManagerImpl.resolveUser(userB);
simpleSharingManagerImpl.resolveUser(userC);
Optional<SimpleUserEntity> userBEntity = simpleUserRepository
.findByExternalIdAndSimpleTenant_ExternalId(userB.getUserId(), userB.getTenantId());
assertTrue(userBEntity.isPresent());
Optional<SimpleUserEntity> userCEntity = simpleUserRepository
.findByExternalIdAndSimpleTenant_ExternalId(userC.getUserId(), userC.getTenantId());
assertTrue(userCEntity.isPresent());
SimpleGroupEntity testGroupEntity = new SimpleGroupEntity();
testGroupEntity.setName(testGroup.getGroupId());
testGroupEntity.setExternalId(testGroup.getGroupId());
testGroupEntity.getMemberUsers().addAll(Arrays.asList(userBEntity.get(), userCEntity.get()));
testGroupEntity.setSimpleTenant(userBEntity.get().getSimpleTenant());
simpleGroupRepository.save(testGroupEntity);
// Check that users B and C doesn't have READ access to the data product
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dataProductEntity.getExternalId()) // only need the data product id
.build();
assertFalse(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
assertFalse(simpleSharingManagerImpl.userHasAccess(userC, dataProduct, Permission.READ));
// Grant READ access to testGroup for the data product
simpleSharingManagerImpl.grantPermissionToGroup(testGroup, dataProduct, Permission.READ, userA);
// Check that users B and C now have READ access to the data product
assertTrue(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
assertTrue(simpleSharingManagerImpl.userHasAccess(userC, dataProduct, Permission.READ));
}
@Test
public void testRevokePermissionFromGroup() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
UserInfo userB = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userB").build();
UserInfo userC = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userC").build();
GroupInfo testGroup = GroupInfo.newBuilder().setGroupId("groupId").setTenantId("tenantId").build();
// Create a data product
DataProductEntity dataProductEntity = new DataProductEntity();
dataProductEntity.setExternalId(UUID.randomUUID().toString());
dataProductEntity.setOwner(testUserA);
dataProductEntity.setName("test data product");
dataProductRepository.save(dataProductEntity);
// Add users B and C to the testGroup
simpleSharingManagerImpl.resolveUser(userB);
simpleSharingManagerImpl.resolveUser(userC);
Optional<SimpleUserEntity> userBEntity = simpleUserRepository
.findByExternalIdAndSimpleTenant_ExternalId(userB.getUserId(), userB.getTenantId());
assertTrue(userBEntity.isPresent());
Optional<SimpleUserEntity> userCEntity = simpleUserRepository
.findByExternalIdAndSimpleTenant_ExternalId(userC.getUserId(), userC.getTenantId());
assertTrue(userCEntity.isPresent());
SimpleGroupEntity testGroupEntity = new SimpleGroupEntity();
testGroupEntity.setName(testGroup.getGroupId());
testGroupEntity.setExternalId(testGroup.getGroupId());
testGroupEntity.getMemberUsers().addAll(Arrays.asList(userBEntity.get(), userCEntity.get()));
testGroupEntity.setSimpleTenant(userBEntity.get().getSimpleTenant());
simpleGroupRepository.save(testGroupEntity);
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dataProductEntity.getExternalId()) // only need the data product id
.build();
// Grant READ access to testGroup for the data product
simpleSharingManagerImpl.grantPermissionToGroup(testGroup, dataProduct, Permission.READ, userA);
// Check that users B and C now have READ access to the data product
assertTrue(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
assertTrue(simpleSharingManagerImpl.userHasAccess(userC, dataProduct, Permission.READ));
// Revoke READ access from testGroup
simpleSharingManagerImpl.revokePermissionFromGroup(testGroup, dataProduct, Permission.READ);
// Check that users B and C don't have READ access to the data product
assertFalse(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
assertFalse(simpleSharingManagerImpl.userHasAccess(userC, dataProduct, Permission.READ));
}
@Test
public void testGrantPublicAccess() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
// Create a data product
DataProductEntity dataProductEntity = new DataProductEntity();
dataProductEntity.setExternalId(UUID.randomUUID().toString());
dataProductEntity.setOwner(testUserA);
dataProductEntity.setName("test data product");
dataProductRepository.save(dataProductEntity);
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dataProductEntity.getExternalId()) // only need the data product id
.build();
assertFalse(simpleSharingManagerImpl.hasPublicAccess(dataProduct, Permission.READ));
simpleSharingManagerImpl.grantPublicAccess(dataProduct, Permission.READ);
assertTrue(simpleSharingManagerImpl.hasPublicAccess(dataProduct, Permission.READ));
}
@Test
public void testRevokePublicAccess() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
// Create a data product
DataProductEntity dataProductEntity = new DataProductEntity();
dataProductEntity.setExternalId(UUID.randomUUID().toString());
dataProductEntity.setOwner(testUserA);
dataProductEntity.setName("test data product");
dataProductRepository.save(dataProductEntity);
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dataProductEntity.getExternalId()) // only need the data product id
.build();
simpleSharingManagerImpl.grantPublicAccess(dataProduct, Permission.READ);
assertTrue(simpleSharingManagerImpl.hasPublicAccess(dataProduct, Permission.READ));
simpleSharingManagerImpl.revokePublicAccess(dataProduct, Permission.READ);
assertFalse(simpleSharingManagerImpl.hasPublicAccess(dataProduct, Permission.READ));
}
@Test
public void testUserHasAccessViaCascade() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
UserInfo userB = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userB").build();
DataProductEntity dp1 = new DataProductEntity();
dp1.setExternalId(UUID.randomUUID().toString());
dp1.setOwner(testUserA);
dp1.setName("test data product 1");
dataProductRepository.save(dp1);
DataProductEntity dp2 = new DataProductEntity();
dp2.setExternalId(UUID.randomUUID().toString());
dp2.setOwner(testUserA);
dp2.setName("test data product 2");
dp2.setParentDataProductEntity(dp1);
dataProductRepository.save(dp2);
DataProductEntity dp3 = new DataProductEntity();
dp3.setExternalId(UUID.randomUUID().toString());
dp3.setOwner(testUserA);
dp3.setName("test data product 3");
dp3.setParentDataProductEntity(dp2);
dataProductRepository.save(dp3);
// Check that userB doesn't have READ access to the data products 1, 2 or 3
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dp1.getExternalId()) // only need the data product id
.build();
assertFalse(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
// Grant READ access to userB for the data product
simpleSharingManagerImpl.grantPermissionToUser(userB, dataProduct, Permission.READ, userA);
// Check that userB does now have READ access to the data product 1, 2 and 3
assertTrue(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
assertTrue(simpleSharingManagerImpl.userHasAccess(userB,
DataProduct.newBuilder().setDataProductId(dp2.getExternalId()).build(), Permission.READ));
assertTrue(simpleSharingManagerImpl.userHasAccess(userB,
DataProduct.newBuilder().setDataProductId(dp3.getExternalId()).build(), Permission.READ));
}
@Test
public void testUserHasAccessViaGroupMembershipAndCascade() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
UserInfo userB = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userB").build();
GroupInfo testGroup = GroupInfo.newBuilder().setGroupId("groupId").setTenantId("tenantId").build();
// Create data products
DataProductEntity dp1 = new DataProductEntity();
dp1.setExternalId(UUID.randomUUID().toString());
dp1.setOwner(testUserA);
dp1.setName("test data product 1");
dataProductRepository.save(dp1);
DataProductEntity dp2 = new DataProductEntity();
dp2.setExternalId(UUID.randomUUID().toString());
dp2.setOwner(testUserA);
dp2.setName("test data product 2");
dp2.setParentDataProductEntity(dp1);
dataProductRepository.save(dp2);
DataProductEntity dp3 = new DataProductEntity();
dp3.setExternalId(UUID.randomUUID().toString());
dp3.setOwner(testUserA);
dp3.setName("test data product 3");
dp3.setParentDataProductEntity(dp2);
dataProductRepository.save(dp3);
// Add user B to the testGroup
simpleSharingManagerImpl.resolveUser(userB);
Optional<SimpleUserEntity> userBEntity = simpleUserRepository
.findByExternalIdAndSimpleTenant_ExternalId(userB.getUserId(), userB.getTenantId());
assertTrue(userBEntity.isPresent());
SimpleGroupEntity testGroupEntity = new SimpleGroupEntity();
testGroupEntity.setName(testGroup.getGroupId());
testGroupEntity.setExternalId(testGroup.getGroupId());
testGroupEntity.getMemberUsers().addAll(Arrays.asList(userBEntity.get()));
testGroupEntity.setSimpleTenant(userBEntity.get().getSimpleTenant());
simpleGroupRepository.save(testGroupEntity);
// Check that user B doesn't have READ access to the data product
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dp1.getExternalId()) // only need the data product id
.build();
assertFalse(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
// Grant READ access to testGroup for the data product
simpleSharingManagerImpl.grantPermissionToGroup(testGroup, dataProduct, Permission.READ, userA);
// Check that users B now has READ access to the data products 1, 2, and 3
assertTrue(simpleSharingManagerImpl.userHasAccess(userB, dataProduct, Permission.READ));
assertTrue(simpleSharingManagerImpl.userHasAccess(userB,
DataProduct.newBuilder().setDataProductId(dp2.getExternalId()).build(), Permission.READ));
assertTrue(simpleSharingManagerImpl.userHasAccess(userB,
DataProduct.newBuilder().setDataProductId(dp3.getExternalId()).build(), Permission.READ));
}
@Test
public void testUserHasAccessOwnerHasAllPermissions() throws SharingException {
UserInfo userA = UserInfo.newBuilder().setTenantId("tenantId").setUserId("userA").build();
UserEntity testUserA = simpleSharingManagerImpl.resolveUser(userA);
// Create a data product
DataProductEntity dataProductEntity = new DataProductEntity();
dataProductEntity.setExternalId(UUID.randomUUID().toString());
dataProductEntity.setOwner(testUserA);
dataProductEntity.setName("test data product");
dataProductRepository.save(dataProductEntity);
DataProduct dataProduct = DataProduct.newBuilder()
.setDataProductId(dataProductEntity.getExternalId()) // only need the data product id
.build();
// Grant OWNER access to userA for the data product
simpleSharingManagerImpl.grantPermissionToUser(userA, dataProduct, Permission.OWNER, userA);
// Check that owner has all permissions
Set<Permission> allPermissions = new HashSet<Permission>(Arrays.asList(Permission.values()));
allPermissions.remove(Permission.UNRECOGNIZED); // remove the special protobuf specific UNRECOGNIZED permission
for (Permission permission : allPermissions) {
assertTrue(simpleSharingManagerImpl.userHasAccess(userA, dataProduct, permission), permission.toString());
}
}
}
| 9,124 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing/simple/SimpleGroupSharingRepository.java
|
package org.apache.airavata.datacatalog.api.repository.sharing.simple;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleGroupSharingEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface SimpleGroupSharingRepository extends JpaRepository<SimpleGroupSharingEntity, Long> {
Optional<SimpleGroupSharingEntity> findBySimpleGroup_SimpleGroupIdAndDataProduct_DataProductIdAndPermission(
long groupId, long dataProductId, Permission permission);
}
| 9,125 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing/simple/SimpleUserSharingRepository.java
|
package org.apache.airavata.datacatalog.api.repository.sharing.simple;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleUserSharingEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface SimpleUserSharingRepository extends JpaRepository<SimpleUserSharingEntity, Long> {
Optional<SimpleUserSharingEntity> findBySimpleUser_SimpleUserIdAndDataProduct_DataProductIdAndPermission(
long userId, long dataProductId, Permission permission);
}
| 9,126 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing/simple/SimpleGroupRepository.java
|
package org.apache.airavata.datacatalog.api.repository.sharing.simple;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleGroupEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleTenantEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface SimpleGroupRepository extends JpaRepository<SimpleGroupEntity, Long> {
Optional<SimpleGroupEntity> findByExternalIdAndSimpleTenant(String externalId, SimpleTenantEntity simpleTenant);
}
| 9,127 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing/simple/SimpleUserRepository.java
|
package org.apache.airavata.datacatalog.api.repository.sharing.simple;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleTenantEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleUserEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface SimpleUserRepository extends JpaRepository<SimpleUserEntity, Long> {
Optional<SimpleUserEntity> findByExternalIdAndSimpleTenant(String externalId, SimpleTenantEntity simpleTenant);
Optional<SimpleUserEntity> findByExternalIdAndSimpleTenant_ExternalId(String externalId,
String tenantId);
SimpleUserEntity findByUser(UserEntity user);
}
| 9,128 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing/simple/SimpleTenantRepository.java
|
package org.apache.airavata.datacatalog.api.repository.sharing.simple;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleTenantEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface SimpleTenantRepository extends JpaRepository<SimpleTenantEntity, Long> {
Optional<SimpleTenantEntity> findByExternalId(String externalId);
}
| 9,129 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/repository/sharing/simple/SimplePublicSharingRepository.java
|
package org.apache.airavata.datacatalog.api.repository.sharing.simple;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimplePublicSharingEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleTenantEntity;
import org.springframework.data.jpa.repository.JpaRepository;
public interface SimplePublicSharingRepository extends JpaRepository<SimplePublicSharingEntity, Long> {
Optional<SimplePublicSharingEntity> findBySimpleTenantAndDataProduct_DataProductIdAndPermission(
SimpleTenantEntity simpleTenant, Long dataProductId, Permission permission);
}
| 9,130 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/sharing/SimpleSharingManagerImpl.java
|
package org.apache.airavata.datacatalog.api.sharing;
import java.util.Arrays;
import java.util.Optional;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.GroupInfo;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.UserInfo;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import org.apache.airavata.datacatalog.api.model.TenantEntity;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleGroupEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleGroupSharingEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimplePublicSharingEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleTenantEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleUserEntity;
import org.apache.airavata.datacatalog.api.model.sharing.simple.SimpleUserSharingEntity;
import org.apache.airavata.datacatalog.api.repository.DataProductRepository;
import org.apache.airavata.datacatalog.api.repository.TenantRepository;
import org.apache.airavata.datacatalog.api.repository.UserRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimpleGroupRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimpleGroupSharingRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimplePublicSharingRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimpleTenantRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimpleUserRepository;
import org.apache.airavata.datacatalog.api.repository.sharing.simple.SimpleUserSharingRepository;
import org.apache.airavata.datacatalog.api.sharing.exception.SharingException;
import org.springframework.beans.factory.annotation.Autowired;
import jakarta.persistence.EntityManager;
import jakarta.persistence.PersistenceContext;
import jakarta.persistence.Query;
public class SimpleSharingManagerImpl implements SharingManager {
@Autowired
private TenantRepository tenantRepository;
@Autowired
private UserRepository userRepository;
@Autowired
private DataProductRepository dataProductRepository;
@Autowired
private SimpleUserSharingRepository simpleUserSharingRepository;
@Autowired
private SimpleGroupSharingRepository simpleGroupSharingRepository;
@Autowired
private SimplePublicSharingRepository simplePublicSharingRepository;
@Autowired
private SimpleTenantRepository simpleTenantRepository;
@Autowired
private SimpleUserRepository simpleUserRepository;
@Autowired
private SimpleGroupRepository simpleGroupRepository;
@PersistenceContext
private EntityManager entityManager;
@Override
public void initialize(String tenantId) throws SharingException {
// Nothing to do
}
@Override
public UserEntity resolveUser(UserInfo userInfo) throws SharingException {
SimpleUserEntity simpleUser = resolveSimpleUser(userInfo);
return simpleUser.getUser();
}
@Override
public boolean userHasAccess(UserInfo userInfo, DataProduct dataProduct, Permission permission)
throws SharingException {
UserEntity user = resolveUser(userInfo);
DataProductEntity dataProductEntity = resolveDataProduct(dataProduct);
Query query = entityManager.createNativeQuery("select 1 from " + getDataProductSharingView()
+ " where user_id = :user_id and data_product_id = :data_product_id and permission_id in :permission_id");
query.setParameter("user_id", user.getUserId());
query.setParameter("data_product_id", dataProductEntity.getDataProductId());
query.setParameter("permission_id", Arrays.asList(permission.getNumber(), Permission.OWNER.getNumber()));
return query.getResultList().size() > 0;
}
@Override
public String getDataProductSharingView() {
return "simple_data_product_sharing_view";
}
@Override
public void grantPermissionToUser(UserInfo userInfo, DataProduct dataProduct, Permission permission,
UserInfo sharedByUser) throws SharingException {
SimpleUserEntity simpleUser = resolveSimpleUser(userInfo);
DataProductEntity dataProductEntity = resolveDataProduct(dataProduct);
SimpleUserEntity sharedByUserEntity = sharedByUser != null ? resolveSimpleUser(sharedByUser) : null;
Optional<SimpleUserSharingEntity> maybeSimpleUserSharingEntity = simpleUserSharingRepository
.findBySimpleUser_SimpleUserIdAndDataProduct_DataProductIdAndPermission(simpleUser.getSimpleUserId(),
dataProductEntity.getDataProductId(), permission);
if (maybeSimpleUserSharingEntity.isEmpty()) {
SimpleUserSharingEntity simpleUserSharingEntity = new SimpleUserSharingEntity();
simpleUserSharingEntity.setDataProduct(dataProductEntity);
simpleUserSharingEntity.setPermission(permission);
simpleUserSharingEntity.setSimpleUser(simpleUser);
simpleUserSharingEntity.setSharedByUser(sharedByUserEntity);
simpleUserSharingRepository.save(simpleUserSharingEntity);
}
}
@Override
public void revokePermissionFromUser(UserInfo userInfo, DataProduct dataProduct, Permission permission)
throws SharingException {
SimpleUserEntity simpleUser = resolveSimpleUser(userInfo);
DataProductEntity dataProductEntity = resolveDataProduct(dataProduct);
Optional<SimpleUserSharingEntity> maybeSimpleUserSharingEntity = simpleUserSharingRepository
.findBySimpleUser_SimpleUserIdAndDataProduct_DataProductIdAndPermission(simpleUser.getSimpleUserId(),
dataProductEntity.getDataProductId(), permission);
maybeSimpleUserSharingEntity.ifPresent(simpleUserSharingEntity -> {
simpleUserSharingRepository.delete(simpleUserSharingEntity);
});
}
@Override
public void grantPermissionToGroup(GroupInfo groupInfo, DataProduct dataProduct, Permission permission,
UserInfo sharedByUser) throws SharingException {
SimpleGroupEntity groupEntity = resolveGroup(groupInfo);
DataProductEntity dataProductEntity = resolveDataProduct(dataProduct);
SimpleUserEntity sharedByUserEntity = sharedByUser != null
? resolveSimpleUser(sharedByUser, groupEntity.getSimpleTenant())
: null;
Optional<SimpleGroupSharingEntity> maybeSimpleGroupSharingEntity = simpleGroupSharingRepository
.findBySimpleGroup_SimpleGroupIdAndDataProduct_DataProductIdAndPermission(
groupEntity.getSimpleGroupId(),
dataProductEntity.getDataProductId(), permission);
if (maybeSimpleGroupSharingEntity.isEmpty()) {
SimpleGroupSharingEntity simpleGroupSharingEntity = new SimpleGroupSharingEntity();
simpleGroupSharingEntity.setDataProduct(dataProductEntity);
simpleGroupSharingEntity.setPermission(permission);
simpleGroupSharingEntity.setSimpleGroup(groupEntity);
simpleGroupSharingEntity.setSharedByUser(sharedByUserEntity);
simpleGroupSharingRepository.save(simpleGroupSharingEntity);
}
}
@Override
public void revokePermissionFromGroup(GroupInfo groupInfo, DataProduct dataProduct, Permission permission)
throws SharingException {
SimpleGroupEntity groupEntity = resolveGroup(groupInfo);
DataProductEntity dataProductEntity = resolveDataProduct(dataProduct);
Optional<SimpleGroupSharingEntity> maybeSimpleGroupSharingEntity = simpleGroupSharingRepository
.findBySimpleGroup_SimpleGroupIdAndDataProduct_DataProductIdAndPermission(
groupEntity.getSimpleGroupId(), dataProductEntity.getDataProductId(), permission);
maybeSimpleGroupSharingEntity.ifPresent(simpleUserSharingEntity -> {
simpleGroupSharingRepository.delete(simpleUserSharingEntity);
});
}
@Override
public boolean hasPublicAccess(DataProduct dataProduct, Permission permission) throws SharingException {
DataProductEntity dataProductEntity = resolveDataProduct(dataProduct);
SimpleUserEntity simpleUser = simpleUserRepository.findByUser(dataProductEntity.getOwner());
SimpleTenantEntity simpleTenant = simpleUser.getSimpleTenant();
Optional<SimplePublicSharingEntity> maybeSimplePublicSharingEntity = simplePublicSharingRepository
.findBySimpleTenantAndDataProduct_DataProductIdAndPermission(simpleTenant,
dataProductEntity.getDataProductId(), permission);
return maybeSimplePublicSharingEntity.isPresent();
}
@Override
public void grantPublicAccess(DataProduct dataProduct, Permission permission) throws SharingException {
DataProductEntity dataProductEntity = resolveDataProduct(dataProduct);
SimpleUserEntity simpleUser = simpleUserRepository.findByUser(dataProductEntity.getOwner());
SimpleTenantEntity simpleTenant = simpleUser.getSimpleTenant();
Optional<SimplePublicSharingEntity> maybeSimplePublicSharingEntity = simplePublicSharingRepository
.findBySimpleTenantAndDataProduct_DataProductIdAndPermission(simpleTenant,
dataProductEntity.getDataProductId(), permission);
if (maybeSimplePublicSharingEntity.isEmpty()) {
SimplePublicSharingEntity simplePublicSharingEntity = new SimplePublicSharingEntity();
simplePublicSharingEntity.setDataProduct(dataProductEntity);
simplePublicSharingEntity.setPermission(permission);
simplePublicSharingEntity.setSimpleTenant(simpleTenant);
simplePublicSharingRepository.save(simplePublicSharingEntity);
}
}
@Override
public void revokePublicAccess(DataProduct dataProduct, Permission permission) throws SharingException {
DataProductEntity dataProductEntity = resolveDataProduct(dataProduct);
SimpleUserEntity simpleUser = simpleUserRepository.findByUser(dataProductEntity.getOwner());
SimpleTenantEntity simpleTenant = simpleUser.getSimpleTenant();
Optional<SimplePublicSharingEntity> maybeSimplePublicSharingEntity = simplePublicSharingRepository
.findBySimpleTenantAndDataProduct_DataProductIdAndPermission(simpleTenant,
dataProductEntity.getDataProductId(), permission);
maybeSimplePublicSharingEntity.ifPresent(simplePublicSharingEntity -> {
simplePublicSharingRepository.delete(simplePublicSharingEntity);
});
}
private SimpleGroupEntity resolveGroup(GroupInfo groupInfo) {
final String tenantId = groupInfo.hasTenantId() ? groupInfo.getTenantId() : "default";
SimpleTenantEntity tenant = resolveTenant(tenantId);
// Create the group if missing
Optional<SimpleGroupEntity> maybeSimpleGroup = simpleGroupRepository
.findByExternalIdAndSimpleTenant(groupInfo.getGroupId(), tenant);
SimpleGroupEntity simpleGroup = maybeSimpleGroup.orElseGet(() -> {
SimpleGroupEntity newGroup = new SimpleGroupEntity();
newGroup.setExternalId(groupInfo.getGroupId());
newGroup.setName(groupInfo.getGroupId());
newGroup.setSimpleTenant(tenant);
return simpleGroupRepository.save(newGroup);
});
return simpleGroup;
}
private SimpleTenantEntity resolveTenant(String tenantId) {
Optional<SimpleTenantEntity> maybeSimpleTenant = simpleTenantRepository.findByExternalId(tenantId);
return maybeSimpleTenant.orElseGet(() -> {
TenantEntity newTenant = new TenantEntity();
newTenant.setExternalId(tenantId);
newTenant.setName(tenantId);
newTenant = tenantRepository.save(newTenant);
SimpleTenantEntity newSimpleTenant = new SimpleTenantEntity();
newSimpleTenant.setExternalId(tenantId);
newSimpleTenant.setName(tenantId);
newSimpleTenant.setTenant(newTenant);
return simpleTenantRepository.save(newSimpleTenant);
});
}
private SimpleUserEntity resolveSimpleUser(UserInfo userInfo) {
final String tenantId = userInfo.hasTenantId() ? userInfo.getTenantId() : "default";
SimpleTenantEntity tenant = resolveTenant(tenantId);
return resolveSimpleUser(userInfo, tenant);
}
private SimpleUserEntity resolveSimpleUser(UserInfo userInfo, SimpleTenantEntity tenant) {
Optional<SimpleUserEntity> maybeSimpleUser = simpleUserRepository
.findByExternalIdAndSimpleTenant(userInfo.getUserId(), tenant);
SimpleUserEntity simpleUser = maybeSimpleUser.orElseGet(() -> {
UserEntity newUser = new UserEntity();
newUser.setExternalId(userInfo.getUserId());
newUser.setName(userInfo.getUserId());
newUser.setTenant(tenant.getTenant());
newUser = userRepository.save(newUser);
SimpleUserEntity newSimpleUser = new SimpleUserEntity();
newSimpleUser.setExternalId(userInfo.getUserId());
newSimpleUser.setName(userInfo.getUserId());
newSimpleUser.setSimpleTenant(tenant);
newSimpleUser.setUser(newUser);
return simpleUserRepository.save(newSimpleUser);
});
return simpleUser;
}
private DataProductEntity resolveDataProduct(DataProduct dataProduct) throws SharingException {
Optional<DataProductEntity> maybeDataProduct = dataProductRepository
.findByExternalId(dataProduct.getDataProductId());
DataProductEntity dataProductEntity = maybeDataProduct.orElseThrow(() -> {
return new SharingException("No data product exists with id " + dataProduct.getDataProductId());
});
return dataProductEntity;
}
}
| 9,131 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing/simple/SimpleUserSharingEntity.java
|
package org.apache.airavata.datacatalog.api.model.sharing.simple;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.EnumType;
import jakarta.persistence.Enumerated;
import jakarta.persistence.ForeignKey;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "simple_user_sharing", uniqueConstraints = {
@UniqueConstraint(columnNames = { "simple_user_id", "data_product_id", "permission_id" }) })
public class SimpleUserSharingEntity {
@Id
@SequenceGenerator(name = "simple_user_sharing_sharing_id", sequenceName = "simple_user_sharing_sharing_id", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "simple_user_sharing_sharing_id")
@Column(name = "sharing_id")
private Long sharingId;
@ManyToOne(optional = false)
@JoinColumn(name = "simple_user_id", nullable = false, updatable = false)
private SimpleUserEntity simpleUser;
@ManyToOne(optional = false)
@JoinColumn(name = "data_product_id", nullable = false, updatable = false, foreignKey = @ForeignKey(name = "fk_simple_user_sharing_data_product_id"))
private DataProductEntity dataProduct;
@Column(name = "permission_id", nullable = false)
@Enumerated(EnumType.STRING)
private Permission permission;
@ManyToOne
@JoinColumn(name = "shared_by_user_id")
private SimpleUserEntity sharedByUser;
public Long getSharingId() {
return sharingId;
}
public void setSharingId(Long sharingId) {
this.sharingId = sharingId;
}
public SimpleUserEntity getSimpleUser() {
return simpleUser;
}
public void setSimpleUser(SimpleUserEntity simpleUser) {
this.simpleUser = simpleUser;
}
public DataProductEntity getDataProduct() {
return dataProduct;
}
public void setDataProduct(DataProductEntity dataProduct) {
this.dataProduct = dataProduct;
}
public Permission getPermission() {
return permission;
}
public void setPermission(Permission permission) {
this.permission = permission;
}
public SimpleUserEntity getSharedByUser() {
return sharedByUser;
}
public void setSharedByUser(SimpleUserEntity sharedByUser) {
this.sharedByUser = sharedByUser;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((sharingId == null) ? 0 : sharingId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SimpleUserSharingEntity other = (SimpleUserSharingEntity) obj;
if (sharingId == null) {
if (other.sharingId != null)
return false;
} else if (!sharingId.equals(other.sharingId))
return false;
return true;
}
}
| 9,132 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing/simple/SimplePublicSharingEntity.java
|
package org.apache.airavata.datacatalog.api.model.sharing.simple;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.EnumType;
import jakarta.persistence.Enumerated;
import jakarta.persistence.ForeignKey;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "simple_public_sharing", uniqueConstraints = {
@UniqueConstraint(columnNames = { "data_product_id", "permission_id", "simple_tenant_id" }) })
public class SimplePublicSharingEntity {
@Id
@SequenceGenerator(name = "simple_public_sharing_sharing_id", sequenceName = "simple_public_sharing_sharing_id", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "simple_public_sharing_sharing_id")
@Column(name = "sharing_id")
private Long sharingId;
@ManyToOne(optional = false)
@JoinColumn(name = "data_product_id", referencedColumnName = "data_product_id", nullable = false, updatable = false, foreignKey = @ForeignKey(name = "fk_simple_public_sharing_data_product_id"))
private DataProductEntity dataProduct;
@Column(name = "permission_id")
@Enumerated(EnumType.STRING)
private Permission permission;
@ManyToOne(optional = false)
@JoinColumn(name = "simple_tenant_id", referencedColumnName = "simple_tenant_id", nullable = false, updatable = false)
private SimpleTenantEntity simpleTenant;
public Long getSharingId() {
return sharingId;
}
public void setSharingId(Long sharingId) {
this.sharingId = sharingId;
}
public DataProductEntity getDataProduct() {
return dataProduct;
}
public void setDataProduct(DataProductEntity dataProduct) {
this.dataProduct = dataProduct;
}
public Permission getPermission() {
return permission;
}
public void setPermission(Permission permission) {
this.permission = permission;
}
public SimpleTenantEntity getSimpleTenant() {
return simpleTenant;
}
public void setSimpleTenant(SimpleTenantEntity simpleTenant) {
this.simpleTenant = simpleTenant;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((sharingId == null) ? 0 : sharingId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SimplePublicSharingEntity other = (SimplePublicSharingEntity) obj;
if (sharingId == null) {
if (other.sharingId != null)
return false;
} else if (!sharingId.equals(other.sharingId))
return false;
return true;
}
}
| 9,133 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing/simple/SimpleTenantEntity.java
|
package org.apache.airavata.datacatalog.api.model.sharing.simple;
import org.apache.airavata.datacatalog.api.model.TenantEntity;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "simple_tenant", uniqueConstraints = { @UniqueConstraint(columnNames = { "external_id" }) })
public class SimpleTenantEntity {
@Id
@SequenceGenerator(name = "simple_tenant_simple_tenant_id_seq", sequenceName = "simple_tenant_simple_tenant_id_seq", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "simple_tenant_simple_tenant_id_seq")
@Column(name = "simple_tenant_id")
private Long simpleTenantId;
@Basic
@Column(name = "external_id", nullable = false)
private String externalId;
@Basic
@Column(name = "name", nullable = false)
private String name;
@ManyToOne(optional = false)
@JoinColumn(name = "tenant_id", nullable = false, updatable = false)
private TenantEntity tenant;
public Long getSimpleTenantId() {
return simpleTenantId;
}
public void setSimpleTenantId(Long simpleTenantId) {
this.simpleTenantId = simpleTenantId;
}
public String getExternalId() {
return externalId;
}
public void setExternalId(String externalId) {
this.externalId = externalId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public TenantEntity getTenant() {
return tenant;
}
public void setTenant(TenantEntity tenant) {
this.tenant = tenant;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((simpleTenantId == null) ? 0 : simpleTenantId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SimpleTenantEntity other = (SimpleTenantEntity) obj;
if (simpleTenantId == null) {
if (other.simpleTenantId != null)
return false;
} else if (!simpleTenantId.equals(other.simpleTenantId))
return false;
return true;
}
}
| 9,134 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing/simple/SimpleUserEntity.java
|
package org.apache.airavata.datacatalog.api.model.sharing.simple;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "simple_user", uniqueConstraints = {
@UniqueConstraint(columnNames = { "simple_tenant_id", "external_id" }) })
public class SimpleUserEntity {
@Id
@SequenceGenerator(name = "simple_user_simple_user_id_seq", sequenceName = "simple_user_simple_user_id_seq", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "simple_user_simple_user_id_seq")
@Column(name = "simple_user_id")
private Long simpleUserId;
@Basic
@Column(name = "external_id", nullable = false)
private String externalId;
@Basic
@Column(name = "name", nullable = false)
private String name;
@ManyToOne(optional = false)
@JoinColumn(name = "simple_tenant_id", referencedColumnName = "simple_tenant_id", nullable = false, updatable = false)
private SimpleTenantEntity simpleTenant;
@ManyToOne(optional = false)
@JoinColumn(name = "user_id", nullable = false, updatable = false)
private UserEntity user;
public Long getSimpleUserId() {
return simpleUserId;
}
public void setSimpleUserId(Long simpleUserId) {
this.simpleUserId = simpleUserId;
}
public String getExternalId() {
return externalId;
}
public void setExternalId(String externalId) {
this.externalId = externalId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public SimpleTenantEntity getSimpleTenant() {
return simpleTenant;
}
public void setSimpleTenant(SimpleTenantEntity tenant) {
this.simpleTenant = tenant;
}
public UserEntity getUser() {
return user;
}
public void setUser(UserEntity user) {
this.user = user;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((simpleUserId == null) ? 0 : simpleUserId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SimpleUserEntity other = (SimpleUserEntity) obj;
if (simpleUserId == null) {
if (other.simpleUserId != null)
return false;
} else if (!simpleUserId.equals(other.simpleUserId))
return false;
return true;
}
}
| 9,135 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing/simple/SimpleGroupSharingEntity.java
|
package org.apache.airavata.datacatalog.api.model.sharing.simple;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.EnumType;
import jakarta.persistence.Enumerated;
import jakarta.persistence.ForeignKey;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "simple_group_sharing", uniqueConstraints = {
@UniqueConstraint(columnNames = { "simple_group_id", "data_product_id", "permission_id" }) })
public class SimpleGroupSharingEntity {
@Id
@SequenceGenerator(name = "simple_group_sharing_sharing_id", sequenceName = "simple_group_sharing_sharing_id", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "simple_group_sharing_sharing_id")
@Column(name = "sharing_id")
private Long sharingId;
@ManyToOne(optional = false)
@JoinColumn(name = "simple_group_id", referencedColumnName = "simple_group_id", nullable = false, updatable = false)
private SimpleGroupEntity simpleGroup;
@ManyToOne(optional = false)
@JoinColumn(name = "data_product_id", referencedColumnName = "data_product_id", nullable = false, updatable = false, foreignKey = @ForeignKey(name = "fk_simple_group_sharing_data_product_id"))
private DataProductEntity dataProduct;
@Column(name = "permission_id")
@Enumerated(EnumType.STRING)
private Permission permission;
@ManyToOne
@JoinColumn(name = "shared_by_user_id", referencedColumnName = "user_id")
private SimpleUserEntity sharedByUser;
public Long getSharingId() {
return sharingId;
}
public void setSharingId(Long sharingId) {
this.sharingId = sharingId;
}
public SimpleGroupEntity getSimpleGroup() {
return simpleGroup;
}
public void setSimpleGroup(SimpleGroupEntity simpleGroup) {
this.simpleGroup = simpleGroup;
}
public DataProductEntity getDataProduct() {
return dataProduct;
}
public void setDataProduct(DataProductEntity dataProduct) {
this.dataProduct = dataProduct;
}
public Permission getPermission() {
return permission;
}
public void setPermission(Permission permission) {
this.permission = permission;
}
public SimpleUserEntity getSharedByUser() {
return sharedByUser;
}
public void setSharedByUser(SimpleUserEntity sharedByUser) {
this.sharedByUser = sharedByUser;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((sharingId == null) ? 0 : sharingId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SimpleGroupSharingEntity other = (SimpleGroupSharingEntity) obj;
if (sharingId == null) {
if (other.sharingId != null)
return false;
} else if (!sharingId.equals(other.sharingId))
return false;
return true;
}
}
| 9,136 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing
|
Create_ds/airavata-data-catalog/data-catalog-api/server/simple-sharing/src/main/java/org/apache/airavata/datacatalog/api/model/sharing/simple/SimpleGroupEntity.java
|
package org.apache.airavata.datacatalog.api.model.sharing.simple;
import java.util.HashSet;
import java.util.Set;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
import jakarta.persistence.Entity;
import jakarta.persistence.GeneratedValue;
import jakarta.persistence.GenerationType;
import jakarta.persistence.Id;
import jakarta.persistence.JoinColumn;
import jakarta.persistence.JoinTable;
import jakarta.persistence.ManyToMany;
import jakarta.persistence.ManyToOne;
import jakarta.persistence.SequenceGenerator;
import jakarta.persistence.Table;
import jakarta.persistence.UniqueConstraint;
@Entity
@Table(name = "simple_group", uniqueConstraints = {
@UniqueConstraint(columnNames = { "simple_tenant_id", "external_id" }) })
public class SimpleGroupEntity {
@Id
@SequenceGenerator(name = "simple_group_simple_group_id", sequenceName = "simple_group_simple_group_id", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "simple_group_simple_group_id")
@Column(name = "simple_group_id")
private Long simpleGroupId;
@Basic
@Column(name = "external_id", nullable = false)
private String externalId;
@Basic
@Column(name = "name", nullable = false)
private String name;
@ManyToOne(optional = false)
@JoinColumn(name = "simple_tenant_id", referencedColumnName = "simple_tenant_id", nullable = false, updatable = false)
private SimpleTenantEntity simpleTenant;
@ManyToMany
@JoinTable(name = "simple_group_membership", joinColumns = @JoinColumn(name = "simple_group_id"), inverseJoinColumns = @JoinColumn(name = "simple_user_id"))
private Set<SimpleUserEntity> memberUsers = new HashSet<>();
public Long getSimpleGroupId() {
return simpleGroupId;
}
public void setSimpleGroupId(Long simpleGroupId) {
this.simpleGroupId = simpleGroupId;
}
public String getExternalId() {
return externalId;
}
public void setExternalId(String externalId) {
this.externalId = externalId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public SimpleTenantEntity getSimpleTenant() {
return simpleTenant;
}
public void setSimpleTenant(SimpleTenantEntity simpleTenant) {
this.simpleTenant = simpleTenant;
}
public Set<SimpleUserEntity> getMemberUsers() {
return memberUsers;
}
public void setMemberUsers(Set<SimpleUserEntity> memberUsers) {
this.memberUsers = memberUsers;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((simpleGroupId == null) ? 0 : simpleGroupId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SimpleGroupEntity other = (SimpleGroupEntity) obj;
if (simpleGroupId == null) {
if (other.simpleGroupId != null)
return false;
} else if (!simpleGroupId.equals(other.simpleGroupId))
return false;
return true;
}
}
| 9,137 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/test/java/org/apache/airavata/datacatalog
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/test/java/org/apache/airavata/datacatalog/api/DataCatalogApiServerApplicationTests.java
|
package org.apache.airavata.datacatalog.api;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.SpringBootTest;
@SpringBootTest
class DataCatalogApiServerApplicationTests {
@Test
void contextLoads() {
}
}
| 9,138 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/DataCatalogApiServiceApplication.java
|
package org.apache.airavata.datacatalog.api;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import org.apache.airavata.datacatalog.api.sharing.SharingManager;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.autoconfigure.domain.EntityScan;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Primary;
import org.springframework.data.jpa.repository.config.EnableJpaAuditing;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
@ComponentScan(basePackages = { "org.apache.airavata.datacatalog.api", "org.apache.custos.sharing.core" })
@SpringBootApplication
@EnableJpaRepositories({ "org.apache.custos.sharing.core.persistance.repository",
"org.apache.airavata.datacatalog.api.repository" })
@EnableJpaAuditing
@EntityScan(basePackages = { "org.apache.airavata.datacatalog.api.model",
"org.apache.custos.sharing.core.persistance.model" })
public class DataCatalogApiServiceApplication {
public static void main(String[] args) {
new SpringApplicationBuilder(DataCatalogApiServiceApplication.class).web(WebApplicationType.NONE).run(args);
}
@Bean
@Primary
public SharingManager getSharingManager(
@Value("${sharing.manager.class.name:org.apache.airavata.datacatalog.api.sharing.SimpleSharingManagerImpl}") String sharingManagerClassName) {
try {
Class<?> sharingManagerClass = (Class<?>) Class.forName(sharingManagerClassName);
Constructor<?> constructor = sharingManagerClass.getConstructor();
try {
return (SharingManager) constructor.newInstance();
} catch (InstantiationException | IllegalAccessException | IllegalArgumentException
| InvocationTargetException e) {
throw new RuntimeException("Failed to instantiated sharing manager " + sharingManagerClass, e);
}
} catch (ClassNotFoundException e) {
throw new RuntimeException("Unable to load the sharing manager class " + sharingManagerClassName, e);
} catch (NoSuchMethodException | SecurityException e) {
throw new RuntimeException("Failed to find no-arg constructor for " + sharingManagerClassName, e);
}
}
}
| 9,139 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/mapper/MetadataSchemaFieldMapper.java
|
package org.apache.airavata.datacatalog.api.mapper;
import org.apache.airavata.datacatalog.api.MetadataSchemaField;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaEntity;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaFieldEntity;
import org.apache.airavata.datacatalog.api.repository.MetadataSchemaRepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
@Component
public class MetadataSchemaFieldMapper {
@Autowired
MetadataSchemaRepository metadataSchemaRepository;
public void mapModelToEntity(MetadataSchemaField metadataSchemaField,
MetadataSchemaFieldEntity metadataSchemaFieldEntity) {
metadataSchemaFieldEntity.setFieldName(metadataSchemaField.getFieldName());
metadataSchemaFieldEntity.setFieldValueType(metadataSchemaField.getValueType());
metadataSchemaFieldEntity.setJsonPath(metadataSchemaField.getJsonPath());
MetadataSchemaEntity metadataSchemaEntity = metadataSchemaRepository
.findBySchemaName(metadataSchemaField.getSchemaName());
metadataSchemaFieldEntity.setMetadataSchema(metadataSchemaEntity);
}
public void mapEntityToModel(MetadataSchemaFieldEntity metadataSchemaFieldEntity,
MetadataSchemaField.Builder metadataSchemaFieldBuilder) {
metadataSchemaFieldBuilder.setFieldName(metadataSchemaFieldEntity.getFieldName());
metadataSchemaFieldBuilder.setJsonPath(metadataSchemaFieldEntity.getJsonPath());
metadataSchemaFieldBuilder.setValueType(metadataSchemaFieldEntity.getFieldValueType());
metadataSchemaFieldBuilder.setSchemaName(metadataSchemaFieldEntity.getMetadataSchema().getSchemaName());
}
}
| 9,140 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/mapper/MetadataSchemaMapper.java
|
package org.apache.airavata.datacatalog.api.mapper;
import org.apache.airavata.datacatalog.api.MetadataSchema;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaEntity;
import org.springframework.stereotype.Component;
@Component
public class MetadataSchemaMapper {
public void mapModelToEntity(MetadataSchema metadataSchema, MetadataSchemaEntity metadataSchemaEntity) {
metadataSchemaEntity.setSchemaName(metadataSchema.getSchemaName());
}
public void mapEntityToModel(MetadataSchemaEntity metadataSchemaEntity,
MetadataSchema.Builder metadataSchemaBuilder) {
metadataSchemaBuilder.setSchemaName(metadataSchemaEntity.getSchemaName());
}
}
| 9,141 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/mapper/DataProductMapper.java
|
package org.apache.airavata.datacatalog.api.mapper;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.exception.EntityNotFoundException;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaEntity;
import org.apache.airavata.datacatalog.api.repository.DataProductRepository;
import org.apache.airavata.datacatalog.api.repository.MetadataSchemaRepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Map to/from
* {@link org.apache.airavata.datacatalog.api.model.DataProductEntity}
* <-> {@link org.apache.airavata.datacatalog.api.DataProduct}
*/
@Component
public class DataProductMapper {
@Autowired
DataProductRepository dataProductRepository;
@Autowired
MetadataSchemaRepository metadataSchemaRepository;
@Autowired
UserInfoMapper userInfoMapper;
public void mapModelToEntity(DataProduct dataProduct, DataProductEntity dataProductEntity) {
dataProductEntity.setName(dataProduct.getName());
if (dataProduct.hasParentDataProductId() && !dataProduct.getParentDataProductId().isEmpty()) {
DataProductEntity parentDataProductEntity = dataProductRepository
.findByExternalId(dataProduct.getParentDataProductId())
.orElseThrow(
() -> new EntityNotFoundException("Could not find the parent data product with the ID: "
+ dataProduct.getParentDataProductId()));
dataProductEntity.setParentDataProductEntity(parentDataProductEntity);
}
if (dataProduct.hasMetadata()) {
ObjectMapper mapper = new ObjectMapper();
try {
JsonNode metadata = mapper.readTree(dataProduct.getMetadata());
dataProductEntity.setMetadata(metadata);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
// Synchronize the list of metadata schemas
if (dataProductEntity.getMetadataSchemas() != null) {
dataProductEntity.getMetadataSchemas().clear();
}
for (String metadataSchemaName : dataProduct.getMetadataSchemasList()) {
// TODO: handle metadata schema not found
MetadataSchemaEntity metadataSchema = metadataSchemaRepository.findBySchemaName(metadataSchemaName);
dataProductEntity.addMetadataSchema(metadataSchema);
}
}
public void mapEntityToModel(DataProductEntity dataProductEntity, DataProduct.Builder dataProductBuilder) {
dataProductBuilder
.setDataProductId(dataProductEntity.getExternalId())
.setName(dataProductEntity.getName());
if (dataProductEntity.getParentDataProductEntity() != null) {
dataProductBuilder.setParentDataProductId(dataProductEntity.getParentDataProductEntity().getExternalId());
}
if (dataProductEntity.getMetadataSchemas() != null) {
for (MetadataSchemaEntity metadataSchema : dataProductEntity.getMetadataSchemas()) {
dataProductBuilder.addMetadataSchemas(metadataSchema.getSchemaName());
}
}
if (dataProductEntity.getMetadata() != null) {
ObjectMapper mapper = new ObjectMapper();
try {
dataProductBuilder.setMetadata(mapper.writeValueAsString(dataProductEntity.getMetadata()));
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
userInfoMapper.mapEntityToModel(dataProductEntity.getOwner(), dataProductBuilder.getOwnerBuilder());
}
}
| 9,142 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/mapper/UserInfoMapper.java
|
package org.apache.airavata.datacatalog.api.mapper;
import org.apache.airavata.datacatalog.api.UserInfo;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.springframework.stereotype.Component;
/**
* Map from {@link org.apache.airavata.datacatalog.api.model.UserEntity} to
* {@link org.apache.airavata.datacatalog.api.UserInfo}. For the reverse, see
* {@link org.apache.airavata.datacatalog.api.sharing.SharingManager#resolveUser(UserInfo)}
*/
@Component
public class UserInfoMapper {
public void mapEntityToModel(UserEntity userEntity, UserInfo.Builder userInfoBuilder) {
userInfoBuilder
.setUserId(userEntity.getExternalId())
.setTenantId(userEntity.getTenant().getExternalId());
}
}
| 9,143 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/service/DataCatalogAPIService.java
|
package org.apache.airavata.datacatalog.api.service;
import java.util.List;
import org.apache.airavata.datacatalog.api.DataCatalogAPIServiceGrpc;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.DataProductAddToMetadataSchemaRequest;
import org.apache.airavata.datacatalog.api.DataProductAddToMetadataSchemaResponse;
import org.apache.airavata.datacatalog.api.DataProductCreateRequest;
import org.apache.airavata.datacatalog.api.DataProductCreateResponse;
import org.apache.airavata.datacatalog.api.DataProductDeleteRequest;
import org.apache.airavata.datacatalog.api.DataProductDeleteResponse;
import org.apache.airavata.datacatalog.api.DataProductGetRequest;
import org.apache.airavata.datacatalog.api.DataProductGetResponse;
import org.apache.airavata.datacatalog.api.DataProductRemoveFromMetadataSchemaRequest;
import org.apache.airavata.datacatalog.api.DataProductRemoveFromMetadataSchemaResponse;
import org.apache.airavata.datacatalog.api.DataProductSearchRequest;
import org.apache.airavata.datacatalog.api.DataProductSearchResponse;
import org.apache.airavata.datacatalog.api.DataProductUpdateRequest;
import org.apache.airavata.datacatalog.api.DataProductUpdateResponse;
import org.apache.airavata.datacatalog.api.MetadataSchema;
import org.apache.airavata.datacatalog.api.MetadataSchemaCreateRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaCreateResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaDeleteRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaDeleteResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaField;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldCreateRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldCreateResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldDeleteRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldDeleteResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldGetRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldGetResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldListRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldListResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldUpdateRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldUpdateResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaGetRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaGetResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaListRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaListResponse;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.UserInfo;
import org.apache.airavata.datacatalog.api.exception.EntityNotFoundException;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlParseException;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlValidateException;
import org.apache.airavata.datacatalog.api.query.MetadataSchemaQueryResult;
import org.apache.airavata.datacatalog.api.sharing.SharingManager;
import org.apache.airavata.datacatalog.api.sharing.exception.SharingException;
import org.lognet.springboot.grpc.GRpcService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
@GRpcService
public class DataCatalogAPIService extends DataCatalogAPIServiceGrpc.DataCatalogAPIServiceImplBase {
private static final Logger logger = LoggerFactory.getLogger(DataCatalogAPIService.class);
@Autowired
DataCatalogService dataCatalogService;
@Autowired
SharingManager sharingManager;
@Override
public void createDataProduct(DataProductCreateRequest request,
StreamObserver<DataProductCreateResponse> responseObserver) {
logger.info("Creating data product {}", request.getDataProduct());
// Set the owner as the requesting user
DataProduct dataProduct = request.getDataProduct().toBuilder().setOwner(request.getUserInfo()).build();
DataProduct result;
try {
result = dataCatalogService.createDataProduct(dataProduct);
responseObserver.onNext(DataProductCreateResponse.newBuilder().setDataProduct(result).build());
responseObserver.onCompleted();
} catch (SharingException e) {
logger.error("Sharing error when trying to create data product", e);
responseObserver.onError(Status.INTERNAL.withDescription(e.getMessage()).asException());
}
}
@Override
public void updateDataProduct(DataProductUpdateRequest request,
StreamObserver<DataProductUpdateResponse> responseObserver) {
// check that user has access to update data product record
if (!checkHasPermission(request.getUserInfo(), request.getDataProduct(), Permission.WRITE_METADATA,
responseObserver)) {
return;
}
try {
DataProduct savedDataProduct = dataCatalogService.updateDataProduct(request.getDataProduct());
responseObserver.onNext(DataProductUpdateResponse.newBuilder().setDataProduct(savedDataProduct).build());
responseObserver.onCompleted();
} catch (EntityNotFoundException e) {
responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).asException());
}
}
@Override
public void getDataProduct(DataProductGetRequest request, StreamObserver<DataProductGetResponse> responseObserver) {
try {
DataProduct dataProduct = dataCatalogService.getDataProduct(request.getDataProductId());
// check that user has READ_METADATA access on data product record
if (!checkHasPermission(request.getUserInfo(), dataProduct, Permission.READ_METADATA, responseObserver)) {
return;
}
responseObserver.onNext(DataProductGetResponse.newBuilder().setDataProduct(dataProduct).build());
responseObserver.onCompleted();
} catch (EntityNotFoundException e) {
responseObserver.onError(Status.NOT_FOUND.asException());
}
}
@Override
public void deleteDataProduct(DataProductDeleteRequest request,
StreamObserver<DataProductDeleteResponse> responseObserver) {
try {
DataProduct dataProduct = dataCatalogService.getDataProduct(request.getDataProductId());
// check that user has WRITE_METADATA access on data product record
if (!checkHasPermission(request.getUserInfo(), dataProduct, Permission.WRITE_METADATA, responseObserver)) {
return;
}
dataCatalogService.deleteDataProduct(request.getDataProductId());
responseObserver.onNext(DataProductDeleteResponse.newBuilder().build());
responseObserver.onCompleted();
} catch (EntityNotFoundException e) {
responseObserver.onError(Status.NOT_FOUND.asException());
}
}
@Override
public void addDataProductToMetadataSchema(DataProductAddToMetadataSchemaRequest request,
StreamObserver<DataProductAddToMetadataSchemaResponse> responseObserver) {
String dataProductId = request.getDataProductId();
String schemaName = request.getSchemaName();
try {
DataProduct checkDataProduct = dataCatalogService.getDataProduct(request.getDataProductId());
// check that user has WRITE_METADATA access on data product record
if (!checkHasPermission(request.getUserInfo(), checkDataProduct, Permission.WRITE_METADATA,
responseObserver)) {
return;
}
DataProduct dataProduct = dataCatalogService.addDataProductToMetadataSchema(dataProductId, schemaName);
responseObserver
.onNext(DataProductAddToMetadataSchemaResponse.newBuilder().setDataProduct(dataProduct).build());
responseObserver.onCompleted();
} catch (EntityNotFoundException e) {
responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).asException());
}
}
@Override
public void removeDataProductFromMetadataSchema(DataProductRemoveFromMetadataSchemaRequest request,
StreamObserver<DataProductRemoveFromMetadataSchemaResponse> responseObserver) {
String dataProductId = request.getDataProductId();
String schemaName = request.getSchemaName();
try {
DataProduct checkDataProduct = dataCatalogService.getDataProduct(request.getDataProductId());
// check that user has WRITE_METADATA access on data product record
if (!checkHasPermission(request.getUserInfo(), checkDataProduct, Permission.WRITE_METADATA,
responseObserver)) {
return;
}
DataProduct dataProduct = dataCatalogService.removeDataProductFromMetadataSchema(dataProductId, schemaName);
responseObserver
.onNext(DataProductRemoveFromMetadataSchemaResponse.newBuilder().setDataProduct(dataProduct)
.build());
responseObserver.onCompleted();
} catch (EntityNotFoundException e) {
responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).asException());
}
}
@Override
public void searchDataProducts(DataProductSearchRequest request,
StreamObserver<DataProductSearchResponse> responseObserver) {
try {
MetadataSchemaQueryResult searchResult = dataCatalogService.searchDataProducts(request.getUserInfo(),
request.getSql());
List<DataProduct> dataProducts = searchResult.dataProducts();
responseObserver.onNext(DataProductSearchResponse.newBuilder().addAllDataProducts(dataProducts).build());
responseObserver.onCompleted();
} catch (MetadataSchemaSqlParseException e) {
responseObserver
.onError(Status.INVALID_ARGUMENT.withDescription("Failed to parse SQL query.").asException());
} catch (MetadataSchemaSqlValidateException e) {
responseObserver
.onError(Status.INVALID_ARGUMENT.withDescription("Failed to validate SQL query.").asException());
}
}
@Override
public void getMetadataSchema(MetadataSchemaGetRequest request,
StreamObserver<MetadataSchemaGetResponse> responseObserver) {
try {
MetadataSchema metadataSchema = dataCatalogService.getMetadataSchema(request.getSchemaName());
responseObserver.onNext(MetadataSchemaGetResponse.newBuilder().setMetadataSchema(metadataSchema).build());
responseObserver.onCompleted();
} catch (EntityNotFoundException e) {
responseObserver.onError(Status.NOT_FOUND.asException());
}
}
@Override
public void getMetadataSchemas(MetadataSchemaListRequest request,
StreamObserver<MetadataSchemaListResponse> responseObserver) {
List<MetadataSchema> fields = dataCatalogService.getMetadataSchemas();
responseObserver.onNext(MetadataSchemaListResponse.newBuilder().addAllMetadataSchemas(fields).build());
responseObserver.onCompleted();
}
@Override
public void createMetadataSchema(MetadataSchemaCreateRequest request,
StreamObserver<MetadataSchemaCreateResponse> responseObserver) {
MetadataSchema metadataSchema = dataCatalogService.createMetadataSchema(request.getMetadataSchema());
responseObserver.onNext(MetadataSchemaCreateResponse.newBuilder().setMetadataSchema(metadataSchema).build());
responseObserver.onCompleted();
}
@Override
public void getMetadataSchemaField(MetadataSchemaFieldGetRequest request,
StreamObserver<MetadataSchemaFieldGetResponse> responseObserver) {
try {
MetadataSchemaField metadataSchemaField = dataCatalogService.getMetadataSchemaField(request.getSchemaName(),
request.getFieldName());
responseObserver.onNext(
MetadataSchemaFieldGetResponse.newBuilder().setMetadataSchemaField(metadataSchemaField).build());
responseObserver.onCompleted();
} catch (EntityNotFoundException e) {
responseObserver.onError(Status.NOT_FOUND.asException());
}
}
@Override
public void createMetadataSchemaField(MetadataSchemaFieldCreateRequest request,
StreamObserver<MetadataSchemaFieldCreateResponse> responseObserver) {
MetadataSchemaField metadataSchemaField = dataCatalogService
.createMetadataSchemaField(request.getMetadataSchemaField());
responseObserver.onNext(
MetadataSchemaFieldCreateResponse.newBuilder().setMetadataSchemaField(metadataSchemaField).build());
responseObserver.onCompleted();
}
@Override
public void deleteMetadataSchema(MetadataSchemaDeleteRequest request,
StreamObserver<MetadataSchemaDeleteResponse> responseObserver) {
// TODO: check that user has write access on metadata schema
dataCatalogService.deleteMetadataSchema(request.getMetadataSchema());
responseObserver.onNext(MetadataSchemaDeleteResponse.newBuilder().build());
responseObserver.onCompleted();
}
@Override
public void deleteMetadataSchemaField(MetadataSchemaFieldDeleteRequest request,
StreamObserver<MetadataSchemaFieldDeleteResponse> responseObserver) {
// TODO: check that user has write access on metadata schema field
dataCatalogService.deleteMetadataSchemaField(request.getMetadataSchemaField());
responseObserver.onNext(MetadataSchemaFieldDeleteResponse.newBuilder().build());
responseObserver.onCompleted();
}
@Override
public void getMetadataSchemaFields(MetadataSchemaFieldListRequest request,
StreamObserver<MetadataSchemaFieldListResponse> responseObserver) {
List<MetadataSchemaField> fields = dataCatalogService.getMetadataSchemaFields(request.getSchemaName());
responseObserver
.onNext(MetadataSchemaFieldListResponse.newBuilder().addAllMetadataSchemaFields(fields).build());
responseObserver.onCompleted();
}
@Override
public void updateMetadataSchemaField(MetadataSchemaFieldUpdateRequest request,
StreamObserver<MetadataSchemaFieldUpdateResponse> responseObserver) {
// TODO: check that user has write access on metadata schema field
MetadataSchemaField metadataSchemaField = dataCatalogService
.updateMetadataSchemaField(request.getMetadataSchemaField());
responseObserver.onNext(
MetadataSchemaFieldUpdateResponse.newBuilder().setMetadataSchemaField(metadataSchemaField).build());
responseObserver.onCompleted();
}
private <T> boolean checkHasPermission(UserInfo userInfo, DataProduct dataProduct, Permission permission,
StreamObserver<T> responseObserver) {
try {
boolean userHasAccess = sharingManager.userHasAccess(userInfo, dataProduct,
permission);
if (!userHasAccess) {
responseObserver.onError(Status.PERMISSION_DENIED
.withDescription("user does not have " + permission + " permission")
.asException());
return false;
} else {
return true;
}
} catch (SharingException e) {
responseObserver.onError(Status.INTERNAL.withDescription(e.getMessage()).asException());
}
return false;
}
}
| 9,144 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/service/DataCatalogService.java
|
package org.apache.airavata.datacatalog.api.service;
import java.util.List;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.MetadataSchema;
import org.apache.airavata.datacatalog.api.MetadataSchemaField;
import org.apache.airavata.datacatalog.api.UserInfo;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlParseException;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlValidateException;
import org.apache.airavata.datacatalog.api.query.MetadataSchemaQueryResult;
import org.apache.airavata.datacatalog.api.sharing.exception.SharingException;
/**
* Transactional service layer for CRUD operations on data catalog database.
*/
public interface DataCatalogService {
DataProduct createDataProduct(DataProduct dataProduct) throws SharingException;
DataProduct updateDataProduct(DataProduct dataProduct);
DataProduct getDataProduct(String dataProductId);
void deleteDataProduct(String dataProductId);
DataProduct addDataProductToMetadataSchema(String dataProductId, String schemaName);
MetadataSchema getMetadataSchema(String schemaName);
List<MetadataSchema> getMetadataSchemas();
MetadataSchema createMetadataSchema(MetadataSchema metadataSchema);
MetadataSchemaField getMetadataSchemaField(String schemaName, String fieldName);
MetadataSchemaField createMetadataSchemaField(MetadataSchemaField metadataSchemaField);
void deleteMetadataSchema(MetadataSchema metadataSchema);
void deleteMetadataSchemaField(MetadataSchemaField metadataSchemaField);
List<MetadataSchemaField> getMetadataSchemaFields(String schemaName);
DataProduct removeDataProductFromMetadataSchema(String dataProductId, String schemaName);
MetadataSchemaField updateMetadataSchemaField(MetadataSchemaField metadataSchemaField);
MetadataSchemaQueryResult searchDataProducts(UserInfo userInfo, String sql)
throws MetadataSchemaSqlParseException, MetadataSchemaSqlValidateException;
}
| 9,145 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/service
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/service/impl/DataCatalogServiceImpl.java
|
package org.apache.airavata.datacatalog.api.service.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.MetadataSchema;
import org.apache.airavata.datacatalog.api.MetadataSchemaField;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldListResponse;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.UserInfo;
import org.apache.airavata.datacatalog.api.exception.EntityNotFoundException;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlParseException;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlValidateException;
import org.apache.airavata.datacatalog.api.mapper.DataProductMapper;
import org.apache.airavata.datacatalog.api.mapper.MetadataSchemaFieldMapper;
import org.apache.airavata.datacatalog.api.mapper.MetadataSchemaMapper;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaEntity;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaFieldEntity;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.airavata.datacatalog.api.query.MetadataSchemaQueryExecutor;
import org.apache.airavata.datacatalog.api.query.MetadataSchemaQueryResult;
import org.apache.airavata.datacatalog.api.repository.DataProductRepository;
import org.apache.airavata.datacatalog.api.repository.MetadataSchemaFieldRepository;
import org.apache.airavata.datacatalog.api.repository.MetadataSchemaRepository;
import org.apache.airavata.datacatalog.api.service.DataCatalogService;
import org.apache.airavata.datacatalog.api.sharing.SharingManager;
import org.apache.airavata.datacatalog.api.sharing.exception.SharingException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
@Service
@Transactional
public class DataCatalogServiceImpl implements DataCatalogService {
// Repositories
@Autowired
DataProductRepository dataProductRepository;
@Autowired
MetadataSchemaRepository metadataSchemaRepository;
@Autowired
MetadataSchemaFieldRepository metadataSchemaFieldRepository;
// Mappers
@Autowired
DataProductMapper dataProductMapper;
@Autowired
MetadataSchemaMapper metadataSchemaMapper;
@Autowired
MetadataSchemaFieldMapper metadataSchemaFieldMapper;
@Autowired
MetadataSchemaQueryExecutor metadataSchemaQueryExecutor;
@Autowired
SharingManager sharingManager;
@Override
public DataProduct createDataProduct(DataProduct dataProduct) throws SharingException {
UserEntity owner = sharingManager.resolveUser(dataProduct.getOwner());
DataProductEntity dataProductEntity = new DataProductEntity();
dataProductEntity.setExternalId(UUID.randomUUID().toString());
dataProductEntity.setOwner(owner);
dataProductMapper.mapModelToEntity(dataProduct, dataProductEntity);
DataProductEntity savedDataProductEntity = dataProductRepository.save(dataProductEntity);
DataProduct savedDataProduct = toDataProduct(savedDataProductEntity);
sharingManager.grantPermissionToUser(dataProduct.getOwner(), savedDataProduct, Permission.OWNER,
dataProduct.getOwner());
return savedDataProduct;
}
@Override
public DataProduct updateDataProduct(DataProduct dataProduct) {
DataProductEntity dataProductEntity = findDataProductEntity(dataProduct.getDataProductId());
dataProductMapper.mapModelToEntity(dataProduct, dataProductEntity);
DataProductEntity savedDataProductEntity = dataProductRepository.save(dataProductEntity);
return toDataProduct(savedDataProductEntity);
}
@Override
public DataProduct getDataProduct(String dataProductId) {
DataProductEntity dataProductEntity = findDataProductEntity(dataProductId);
return toDataProduct(dataProductEntity);
}
@Override
public void deleteDataProduct(String dataProductId) {
dataProductRepository.deleteByExternalId(dataProductId);
}
@Override
public DataProduct addDataProductToMetadataSchema(String dataProductId, String schemaName) {
DataProductEntity dataProduct = findDataProductEntity(dataProductId);
// TODO: handle metadata schema not found
MetadataSchemaEntity metadataSchemaEntity = metadataSchemaRepository.findBySchemaName(schemaName);
dataProduct.addMetadataSchema(metadataSchemaEntity);
DataProductEntity savedDataProductEntity = dataProductRepository.save(dataProduct);
return toDataProduct(savedDataProductEntity);
}
@Override
public DataProduct removeDataProductFromMetadataSchema(String dataProductId, String schemaName) {
DataProductEntity dataProduct = findDataProductEntity(dataProductId);
// TODO: handle metadata schema not found
MetadataSchemaEntity metadataSchemaEntity = metadataSchemaRepository.findBySchemaName(schemaName);
dataProduct.removeMetadataSchema(metadataSchemaEntity);
DataProductEntity savedDataProductEntity = dataProductRepository.save(dataProduct);
return toDataProduct(savedDataProductEntity);
}
@Override
public MetadataSchemaQueryResult searchDataProducts(UserInfo userInfo, String sql)
throws MetadataSchemaSqlParseException, MetadataSchemaSqlValidateException {
try {
UserEntity userEntity = sharingManager.resolveUser(userInfo);
return metadataSchemaQueryExecutor.execute(userEntity, sql);
} catch (SharingException e) {
throw new RuntimeException("Unable to resolve " + userInfo, e);
}
}
@Override
public MetadataSchema getMetadataSchema(String schemaName) {
MetadataSchemaEntity metadataSchemaEntity = metadataSchemaRepository.findBySchemaName(schemaName);
if (metadataSchemaEntity == null) {
throw new EntityNotFoundException("No schema found with name " + schemaName);
}
return toMetadataSchema(metadataSchemaEntity);
}
@Override
public List<MetadataSchema> getMetadataSchemas() {
return metadataSchemaRepository.findAll().stream()
.map(this::toMetadataSchema)
.toList();
}
@Override
public MetadataSchema createMetadataSchema(MetadataSchema metadataSchema) {
MetadataSchemaEntity metadataSchemaEntity = new MetadataSchemaEntity();
metadataSchemaMapper.mapModelToEntity(metadataSchema, metadataSchemaEntity);
MetadataSchemaEntity savedMetadataSchemaEntity = metadataSchemaRepository.save(metadataSchemaEntity);
return toMetadataSchema(savedMetadataSchemaEntity);
}
@Override
public MetadataSchemaField getMetadataSchemaField(String schemaName, String fieldName) {
MetadataSchemaFieldEntity metadataSchemaFieldEntity = metadataSchemaFieldRepository
.findByFieldNameAndMetadataSchema_SchemaName(fieldName, schemaName);
if (metadataSchemaFieldEntity == null) {
throw new EntityNotFoundException("No field found in schema " + schemaName + " with name " + fieldName);
}
return toMetadataSchemaField(metadataSchemaFieldEntity);
}
@Override
public MetadataSchemaField createMetadataSchemaField(MetadataSchemaField metadataSchemaField) {
MetadataSchemaFieldEntity metadataSchemaFieldEntity = new MetadataSchemaFieldEntity();
metadataSchemaFieldMapper.mapModelToEntity(metadataSchemaField, metadataSchemaFieldEntity);
MetadataSchemaFieldEntity savedMetadataSchemaFieldEntity = metadataSchemaFieldRepository
.save(metadataSchemaFieldEntity);
return toMetadataSchemaField(savedMetadataSchemaFieldEntity);
}
@Override
public void deleteMetadataSchema(MetadataSchema metadataSchema) {
// TODO: check that user has write access on metadata schema
// TODO: handle metadata schema not found
MetadataSchemaEntity metadataSchemaEntity = metadataSchemaRepository
.findBySchemaName(metadataSchema.getSchemaName());
metadataSchemaFieldRepository.deleteAll(metadataSchemaEntity.getMetadataSchemaFields());
metadataSchemaRepository.delete(metadataSchemaEntity);
}
@Override
public void deleteMetadataSchemaField(MetadataSchemaField metadataSchemaField) {
// TODO: handle metadata schema field not found
MetadataSchemaFieldEntity metadataSchemaFieldEntity = metadataSchemaFieldRepository
.findByFieldNameAndMetadataSchema_SchemaName(metadataSchemaField.getFieldName(),
metadataSchemaField.getSchemaName());
metadataSchemaFieldRepository.delete(metadataSchemaFieldEntity);
}
@Override
public List<MetadataSchemaField> getMetadataSchemaFields(String schemaName) {
// TODO: handle case where schema doesn't exist
List<MetadataSchemaFieldEntity> metadataSchemaFieldEntities = metadataSchemaFieldRepository
.findByMetadataSchema_SchemaName(schemaName);
List<MetadataSchemaField> fields = new ArrayList<>();
MetadataSchemaFieldListResponse.Builder responseBuilder = MetadataSchemaFieldListResponse.newBuilder();
for (MetadataSchemaFieldEntity metadataSchemaFieldEntity : metadataSchemaFieldEntities) {
fields.add(toMetadataSchemaField(metadataSchemaFieldEntity));
}
return fields;
}
@Override
public MetadataSchemaField updateMetadataSchemaField(MetadataSchemaField metadataSchemaField) {
// TODO: handle metadata schema field not found
MetadataSchemaFieldEntity metadataSchemaFieldEntity = metadataSchemaFieldRepository
.findByFieldNameAndMetadataSchema_SchemaName(metadataSchemaField.getFieldName(),
metadataSchemaField.getSchemaName());
metadataSchemaFieldMapper.mapModelToEntity(metadataSchemaField, metadataSchemaFieldEntity);
MetadataSchemaFieldEntity savedMetadataSchemaFieldEntity = metadataSchemaFieldRepository
.save(metadataSchemaFieldEntity);
return toMetadataSchemaField(savedMetadataSchemaFieldEntity);
}
private DataProduct toDataProduct(DataProductEntity savedDataProductEntity) {
DataProduct.Builder builder = DataProduct.newBuilder();
dataProductMapper.mapEntityToModel(savedDataProductEntity, builder);
return builder.build();
}
private MetadataSchema toMetadataSchema(MetadataSchemaEntity metadataSchemaEntity) {
MetadataSchema.Builder builder = MetadataSchema.newBuilder();
metadataSchemaMapper.mapEntityToModel(metadataSchemaEntity, builder);
return builder.build();
}
private MetadataSchemaField toMetadataSchemaField(MetadataSchemaFieldEntity metadataSchemaFieldEntity) {
MetadataSchemaField.Builder builder = MetadataSchemaField.newBuilder();
metadataSchemaFieldMapper.mapEntityToModel(metadataSchemaFieldEntity, builder);
return builder.build();
}
private DataProductEntity findDataProductEntity(String dataProductId) {
return dataProductRepository
.findByExternalId(dataProductId)
.orElseThrow(() -> new EntityNotFoundException(
"Could not find a data product with the ID: " + dataProductId));
}
}
| 9,146 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/query/MetadataSchemaQueryResult.java
|
package org.apache.airavata.datacatalog.api.query;
import java.util.List;
import org.apache.airavata.datacatalog.api.DataProduct;
public record MetadataSchemaQueryResult(List<DataProduct> dataProducts) {
}
| 9,147 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/query/MetadataSchemaQueryExecutor.java
|
package org.apache.airavata.datacatalog.api.query;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlParseException;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlValidateException;
import org.apache.airavata.datacatalog.api.model.UserEntity;
public interface MetadataSchemaQueryExecutor {
MetadataSchemaQueryResult execute(UserEntity userEntity, String sql)
throws MetadataSchemaSqlParseException, MetadataSchemaSqlValidateException;
}
| 9,148 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/query/MetadataSchemaQueryWriter.java
|
package org.apache.airavata.datacatalog.api.query;
import java.util.Collection;
import java.util.Map;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaEntity;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.calcite.sql.SqlNode;
public interface MetadataSchemaQueryWriter {
/**
* Rewrite the query as needed to filter against metadata schema fields.
*
* @param userEntity
*
* @param sqlNode
* @param metadataSchemas
* @param tableAliases
* @return
*/
String rewriteQuery(UserEntity userEntity, SqlNode sqlNode, Collection<MetadataSchemaEntity> metadataSchemas,
Map<String, String> tableAliases);
}
| 9,149 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/query
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/query/impl/MetadataSchemaQueryExecutorImpl.java
|
package org.apache.airavata.datacatalog.api.query.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.FieldValueType;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlParseException;
import org.apache.airavata.datacatalog.api.exception.MetadataSchemaSqlValidateException;
import org.apache.airavata.datacatalog.api.mapper.DataProductMapper;
import org.apache.airavata.datacatalog.api.model.DataProductEntity;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaEntity;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaFieldEntity;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.airavata.datacatalog.api.query.MetadataSchemaQueryExecutor;
import org.apache.airavata.datacatalog.api.query.MetadataSchemaQueryResult;
import org.apache.airavata.datacatalog.api.query.MetadataSchemaQueryWriter;
import org.apache.airavata.datacatalog.api.repository.MetadataSchemaRepository;
import org.apache.calcite.avatica.util.Casing;
import org.apache.calcite.config.CalciteConnectionConfig;
import org.apache.calcite.jdbc.CalciteSchema;
import org.apache.calcite.prepare.CalciteCatalogReader;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeFactory;
import org.apache.calcite.rel.type.RelDataTypeFactory.Builder;
import org.apache.calcite.runtime.CalciteContextException;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.calcite.schema.impl.AbstractTable;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlSelect;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.sql.util.SqlShuttle;
import org.apache.calcite.sql.validate.SqlValidator;
import org.apache.calcite.sql.validate.SqlValidatorUtil;
import org.apache.calcite.tools.FrameworkConfig;
import org.apache.calcite.tools.Frameworks;
import org.apache.calcite.tools.Planner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import jakarta.persistence.EntityManager;
@Component
public class MetadataSchemaQueryExecutorImpl implements MetadataSchemaQueryExecutor {
private static final Logger logger = LoggerFactory.getLogger(MetadataSchemaQueryExecutorImpl.class);
@Autowired
MetadataSchemaRepository metadataSchemaRepository;
@Autowired
MetadataSchemaQueryWriter metadataSchemaQueryWriter;
@Autowired
EntityManager entityManager;
@Autowired
DataProductMapper dataProductMapper;
@Override
public MetadataSchemaQueryResult execute(UserEntity userEntity, String sql)
throws MetadataSchemaSqlParseException, MetadataSchemaSqlValidateException {
// Create a schema that contains the data_product table and all of the metadata
// schemas
SchemaPlus schema = Frameworks.createRootSchema(true);
schema.add("data_product", new AbstractTable() {
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
Builder builder = (Builder) typeFactory.builder();
return builder
.add("data_product_id", SqlTypeName.INTEGER)
.add("parent_data_product_id", SqlTypeName.INTEGER)
.add("external_id", SqlTypeName.VARCHAR)
.add("metadata", SqlTypeName.OTHER)
.build();
}
});
// TODO: limit by tenant id
List<MetadataSchemaEntity> metadataSchemas = metadataSchemaRepository.findAll();
for (MetadataSchemaEntity metadataSchema : metadataSchemas) {
schema.add(metadataSchema.getSchemaName(), new AbstractTable() {
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
Builder builder = (Builder) typeFactory.builder();
// Add all of the common fields
builder.add("data_product_id", SqlTypeName.INTEGER)
.add("parent_data_product_id", SqlTypeName.INTEGER)
.add("external_id", SqlTypeName.VARCHAR)
.add("metadata", SqlTypeName.OTHER);
// Add all of the schema specific metadata fields
for (MetadataSchemaFieldEntity metadataSchemaField : metadataSchema.getMetadataSchemaFields()) {
builder.add(metadataSchemaField.getFieldName(),
getSqlTypeName(metadataSchemaField.getFieldValueType()));
}
return builder.build();
}
});
}
FrameworkConfig config = Frameworks.newConfigBuilder()
.defaultSchema(schema)
.parserConfig(SqlParser.Config.DEFAULT.withUnquotedCasing(Casing.TO_LOWER))
.build();
Planner planner = Frameworks.getPlanner(config);
SqlNode sqlNode = parse(planner, sql);
SqlValidator validator = getValidator(schema, config, planner);
// Validate the query
SqlNode validatedSqlNode = validate(validator, sqlNode);
// create a mapping of table aliases to actual tables (metadata schemas)
// For example, if query is of the form "select * from smilesdb as sm", then
// create a mapping from sm -> smilesdb
// TODO: may not be a SqlSelect, might be an OrderBy for example
Map<String, String> tableAliases = new HashMap<>();
validatedSqlNode.accept(new SqlShuttle() {
@Override
public SqlNode visit(SqlCall call) {
if (call.getKind() == SqlKind.UNION) {
// If there are multiple UNIONS
for (SqlNode operand : call.getOperandList()) {
operand.accept(this);
}
} else if (call instanceof SqlSelect) {
// if it's a plain select statement, visit it directly
visitSelect((SqlSelect) call);
}
return super.visit(call);
}
private void visitSelect(SqlSelect selectNode) {
selectNode.getFrom().accept(new SqlShuttle() {
@Override
public SqlNode visit(SqlCall call) {
if (call.isA(Collections.singleton(SqlKind.AS))) {
SqlIdentifier first = call.operand(0);
SqlIdentifier second = call.operand(1);
tableAliases.put(second.getSimple(), first.getSimple());
}
return super.visit(call);
}
});
}
});
String finalSql = metadataSchemaQueryWriter.rewriteQuery(userEntity, validatedSqlNode, metadataSchemas,
tableAliases);
logger.debug("Metadata schema query final sql: {}", finalSql);
List<DataProductEntity> dataProductEntities = entityManager.createNativeQuery(finalSql, DataProductEntity.class)
.getResultList();
List<DataProduct> dataProducts = new ArrayList<>();
for (DataProductEntity dataProductEntity : dataProductEntities) {
org.apache.airavata.datacatalog.api.DataProduct.Builder dpBuilder = DataProduct.newBuilder();
dataProductMapper.mapEntityToModel(dataProductEntity, dpBuilder);
dataProducts.add(dpBuilder.build());
}
return new MetadataSchemaQueryResult(dataProducts);
}
private SqlValidator getValidator(SchemaPlus schema, FrameworkConfig config, Planner planner) {
CalciteConnectionConfig connectionConfig = CalciteConnectionConfig.DEFAULT;
CalciteCatalogReader catalogReader = new CalciteCatalogReader(CalciteSchema.from(schema),
CalciteSchema.from(schema).path(null),
planner.getTypeFactory(), connectionConfig);
SqlValidator validator = SqlValidatorUtil.newValidator(SqlStdOperatorTable.instance(),
catalogReader, planner.getTypeFactory(),
config.getSqlValidatorConfig().withIdentifierExpansion(false));
return validator;
}
SqlNode parse(Planner planner, String sql) throws MetadataSchemaSqlParseException {
try {
return planner.parse(sql);
} catch (SqlParseException e) {
throw new MetadataSchemaSqlParseException(e);
}
}
SqlNode validate(SqlValidator validator, SqlNode sqlNode) throws MetadataSchemaSqlValidateException {
try {
return validator.validate(sqlNode);
} catch (CalciteContextException e) {
throw new MetadataSchemaSqlValidateException(e);
}
}
private SqlTypeName getSqlTypeName(FieldValueType fieldValueType) {
switch (fieldValueType) {
case BOOLEAN:
return SqlTypeName.BOOLEAN;
case FLOAT:
return SqlTypeName.FLOAT;
case INTEGER:
return SqlTypeName.INTEGER;
case STRING:
return SqlTypeName.VARCHAR;
case DATESTRING:
return SqlTypeName.TIMESTAMP;
default:
throw new RuntimeException(
"Unexpected fieldValueType, unable to convert to SqlTypeName: " + fieldValueType);
}
}
}
| 9,150 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/query
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/query/impl/PostgresqlMetadataSchemaQueryWriterImpl.java
|
package org.apache.airavata.datacatalog.api.query.impl;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Deque;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.airavata.datacatalog.api.Permission;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaEntity;
import org.apache.airavata.datacatalog.api.model.MetadataSchemaFieldEntity;
import org.apache.airavata.datacatalog.api.model.UserEntity;
import org.apache.airavata.datacatalog.api.query.MetadataSchemaQueryWriter;
import org.apache.airavata.datacatalog.api.sharing.SharingManager;
import org.apache.calcite.sql.SqlBasicCall;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlSelect;
import org.apache.calcite.sql.dialect.PostgresqlSqlDialect;
import org.apache.calcite.sql.util.SqlShuttle;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
@Component
public class PostgresqlMetadataSchemaQueryWriterImpl implements MetadataSchemaQueryWriter {
@Autowired
SharingManager sharingManager;
private static final class MetadataSchemaFieldFilterRewriter extends SqlShuttle {
final Collection<MetadataSchemaEntity> metadataSchemas;
final Map<String, String> tableAliases;
final StringBuilder sql = new StringBuilder();
// Maintain queue of binary logical operators so we know when to
// open/close parentheses and when to add "AND" and "OR" to the query
Deque<SqlCall> binaryLogicalOperatorNodes = new ArrayDeque<>();
MetadataSchemaFieldFilterRewriter(Collection<MetadataSchemaEntity> metadataSchemas,
Map<String, String> tableAliases) {
this.metadataSchemas = metadataSchemas;
this.tableAliases = tableAliases;
}
MetadataSchemaFieldEntity resolveMetadataSchemaField(SqlIdentifier sqlIdentifier) {
MetadataSchemaEntity metadataSchema = null;
String fieldName = null;
if (sqlIdentifier.names.size() == 2) {
String tableName = sqlIdentifier.names.get(0);
metadataSchema = resolveMetadataSchema(tableName);
fieldName = sqlIdentifier.names.get(1);
} else if (sqlIdentifier.names.size() == 1) {
// TODO: just pick the first one, but in general we would need
// to look through all of the metadata schemas to find the one
// that this field belongs to
metadataSchema = this.metadataSchemas.iterator().next();
fieldName = sqlIdentifier.names.get(0);
} else {
throw new RuntimeException("Unexpected sqlIdentifier: " + sqlIdentifier);
}
for (MetadataSchemaFieldEntity metadataSchemaField : metadataSchema.getMetadataSchemaFields()) {
if (metadataSchemaField.getFieldName().equals(fieldName)) {
return metadataSchemaField;
}
}
// If none matched, must not be a metadata schema field
return null;
}
MetadataSchemaEntity resolveMetadataSchema(String tableOrAliasName) {
String tableName = tableOrAliasName;
if (this.tableAliases.containsKey(tableOrAliasName)) {
tableName = this.tableAliases.get(tableOrAliasName);
}
return findMetadataSchema(tableName);
}
MetadataSchemaEntity findMetadataSchema(String schemaName) {
for (MetadataSchemaEntity metadataSchema : this.metadataSchemas) {
if (metadataSchema.getSchemaName().equals(schemaName)) {
return metadataSchema;
}
}
return null;
}
public String finalizeSql() {
while (!this.binaryLogicalOperatorNodes.isEmpty()) {
this.binaryLogicalOperatorNodes.pop();
this.sql.append(" ) ");
}
return this.sql.toString();
}
@Override
public SqlNode visit(SqlCall call) {
SqlCall currentOperator = this.binaryLogicalOperatorNodes.peek();
while (currentOperator != null
&& !call.getParserPosition().overlaps(currentOperator.getParserPosition())) {
this.binaryLogicalOperatorNodes.remove();
currentOperator = this.binaryLogicalOperatorNodes.peek();
this.sql.append(" ) ");
this.sql.append(currentOperator.getOperator().toString());
this.sql.append(" ");
}
if (call.getKind() == SqlKind.NOT) {
this.sql.append(" NOT ");
} else if (call.getKind() == SqlKind.AND || call.getKind() == SqlKind.OR) {
this.binaryLogicalOperatorNodes.push(call);
this.sql.append("( ");
} else {
SqlNode sqlNode = call.getOperandList().get(0);
// TODO: this assumes that there would only ever be one metadata schema field
// and that it comes first and the second operand is a literal
if (sqlNode.isA(Set.of(SqlKind.IDENTIFIER))) {
SqlIdentifier sqlIdentifier = (SqlIdentifier) sqlNode;
MetadataSchemaFieldEntity metadataSchemaField = resolveMetadataSchemaField(sqlIdentifier);
if (metadataSchemaField != null) {
// TODO: assuming an alias
sql.append(sqlIdentifier.names.get(0));
sql.append(".");
sql.append("metadata @@ '");
sql.append(metadataSchemaField.getJsonPath());
sql.append(" ");
switch (call.getOperator().kind) {
case EQUALS:
sql.append(" == ");
break;
default:
sql.append(call.getOperator().kind.sql);
break;
}
sql.append(call.getOperandList().get(1).toSqlString(new PostgresqlSqlDialect(
PostgresqlSqlDialect.DEFAULT_CONTEXT.withLiteralQuoteString("\""))));
sql.append("'");
} else {
sql.append(call.toSqlString(PostgresqlSqlDialect.DEFAULT));
}
}
if (currentOperator != null && !(call.getParserPosition().getEndColumnNum() == currentOperator
.getParserPosition().getEndColumnNum()
&& call.getParserPosition().getEndLineNum() == currentOperator.getParserPosition()
.getEndLineNum())) {
sql.append(" ");
sql.append(currentOperator.getOperator().toString());
sql.append(" ");
}
}
return super.visit(call);
}
}
@Override
public String rewriteQuery(UserEntity userEntity, SqlNode sqlNode, Collection<MetadataSchemaEntity> metadataSchemas,
Map<String, String> tableAliases) {
return writeCommonTableExpressions(userEntity, metadataSchemas) + buildSelectStatement(sqlNode, metadataSchemas, tableAliases);
}
private String buildSelectStatement(SqlNode sqlNode, Collection<MetadataSchemaEntity> metadataSchemas,
Map<String, String> tableAliases) {
StringBuilder sb = new StringBuilder();
if (sqlNode instanceof SqlSelect) {
sb.append(" SELECT * FROM ");
sb.append(((SqlSelect) sqlNode).getFrom().toSqlString(PostgresqlSqlDialect.DEFAULT));
if (((SqlSelect) sqlNode).getWhere() != null) {
sb.append(" WHERE ");
sb.append(rewriteWhereClauseFilters(sqlNode, metadataSchemas, tableAliases));
}
} else if (sqlNode instanceof SqlBasicCall unionNode &&
((SqlBasicCall) sqlNode).getOperator().getKind() == SqlKind.UNION) {
for (int i = 0; i < unionNode.getOperandList().size(); i++) {
if (i > 0) {
sb.append(unionNode.getOperator().getName());
}
sb.append(buildSelectStatement(unionNode.getOperandList().get(i), metadataSchemas, tableAliases));
}
}
return sb.toString();
}
private String rewriteWhereClauseFilters(SqlNode sqlNode, Collection<MetadataSchemaEntity> metadataSchemas,
Map<String, String> tableAliases) {
MetadataSchemaFieldFilterRewriter filterRewriter = new MetadataSchemaFieldFilterRewriter(metadataSchemas,
tableAliases);
sqlNode.accept(filterRewriter);
return filterRewriter.finalizeSql();
}
String writeCommonTableExpressions(UserEntity userEntity, Collection<MetadataSchemaEntity> metadataSchemas) {
StringBuilder sb = new StringBuilder();
List<String> commonTableExpressions = new ArrayList<>();
for (MetadataSchemaEntity metadataSchema : metadataSchemas) {
commonTableExpressions.add(writeCommonTableExpression(userEntity, metadataSchema));
}
sb.append("WITH ");
sb.append(String.join(", ", commonTableExpressions));
return sb.toString();
}
String writeCommonTableExpression(UserEntity userEntity, MetadataSchemaEntity metadataSchemaEntity) {
StringBuilder sb = new StringBuilder();
sb.append(metadataSchemaEntity.getSchemaName());
sb.append(" AS (");
sb.append(
"select dp_.data_product_id, dp_.parent_data_product_id, dp_.external_id, dp_.name, dp_.metadata, dp_.owner_id ");
// for (MetadataSchemaFieldEntity field :
// metadataSchemaEntity.getMetadataSchemaFields()) {
// TODO: include each field as well?
// }
sb.append("from data_product dp_ ");
sb.append("inner join data_product_metadata_schema dpms_ on dpms_.data_product_id = dp_.data_product_id ");
sb.append("inner join metadata_schema ms_ on ms_.metadata_schema_id = dpms_.metadata_schema_id ");
sb.append("inner join ");
sb.append(sharingManager.getDataProductSharingView());
sb.append(" dpsv_ on dpsv_.data_product_id = dp_.data_product_id ");
sb.append("and dpsv_.user_id = ");
// TODO: change these to be bound parameters
sb.append(userEntity.getUserId());
sb.append(" and dpsv_.permission_id in (");
sb.append(Permission.OWNER.getNumber());
sb.append(",");
sb.append(Permission.READ_METADATA.getNumber());
sb.append(") ");
sb.append("where ms_.metadata_schema_id = " + metadataSchemaEntity.getMetadataSchemaId());
sb.append(")");
return sb.toString();
}
}
| 9,151 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/exception/MetadataSchemaSqlValidateException.java
|
package org.apache.airavata.datacatalog.api.exception;
public class MetadataSchemaSqlValidateException extends Exception {
public MetadataSchemaSqlValidateException() {
}
public MetadataSchemaSqlValidateException(String message) {
super(message);
}
public MetadataSchemaSqlValidateException(Throwable cause) {
super(cause);
}
public MetadataSchemaSqlValidateException(String message, Throwable cause) {
super(message, cause);
}
public MetadataSchemaSqlValidateException(String message, Throwable cause, boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
}
| 9,152 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/exception/MetadataSchemaSqlParseException.java
|
package org.apache.airavata.datacatalog.api.exception;
public class MetadataSchemaSqlParseException extends Exception {
public MetadataSchemaSqlParseException() {
}
public MetadataSchemaSqlParseException(String message) {
super(message);
}
public MetadataSchemaSqlParseException(Throwable cause) {
super(cause);
}
public MetadataSchemaSqlParseException(String message, Throwable cause) {
super(message, cause);
}
public MetadataSchemaSqlParseException(String message, Throwable cause, boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
}
| 9,153 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/server/service/src/main/java/org/apache/airavata/datacatalog/api/exception/EntityNotFoundException.java
|
package org.apache.airavata.datacatalog.api.exception;
public class EntityNotFoundException extends RuntimeException {
public EntityNotFoundException() {
}
public EntityNotFoundException(String message) {
super(message);
}
public EntityNotFoundException(Throwable cause) {
super(cause);
}
public EntityNotFoundException(String message, Throwable cause) {
super(message, cause);
}
public EntityNotFoundException(String message, Throwable cause, boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
}
| 9,154 |
0 |
Create_ds/airavata-data-catalog/data-catalog-api/client/src/main/java/org/apache/airavata/datacatalog/api
|
Create_ds/airavata-data-catalog/data-catalog-api/client/src/main/java/org/apache/airavata/datacatalog/api/client/DataCatalogAPIClient.java
|
package org.apache.airavata.datacatalog.api.client;
import java.text.MessageFormat;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.airavata.datacatalog.api.DataCatalogAPIServiceGrpc;
import org.apache.airavata.datacatalog.api.DataCatalogAPIServiceGrpc.DataCatalogAPIServiceBlockingStub;
import org.apache.airavata.datacatalog.api.DataProduct;
import org.apache.airavata.datacatalog.api.DataProductAddToMetadataSchemaRequest;
import org.apache.airavata.datacatalog.api.DataProductAddToMetadataSchemaResponse;
import org.apache.airavata.datacatalog.api.DataProductCreateRequest;
import org.apache.airavata.datacatalog.api.DataProductCreateResponse;
import org.apache.airavata.datacatalog.api.DataProductDeleteRequest;
import org.apache.airavata.datacatalog.api.DataProductGetRequest;
import org.apache.airavata.datacatalog.api.DataProductGetResponse;
import org.apache.airavata.datacatalog.api.DataProductRemoveFromMetadataSchemaRequest;
import org.apache.airavata.datacatalog.api.DataProductRemoveFromMetadataSchemaResponse;
import org.apache.airavata.datacatalog.api.DataProductSearchRequest;
import org.apache.airavata.datacatalog.api.DataProductSearchResponse;
import org.apache.airavata.datacatalog.api.DataProductUpdateRequest;
import org.apache.airavata.datacatalog.api.DataProductUpdateResponse;
import org.apache.airavata.datacatalog.api.FieldValueType;
import org.apache.airavata.datacatalog.api.MetadataSchema;
import org.apache.airavata.datacatalog.api.MetadataSchemaCreateRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaCreateResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaField;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldCreateRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldCreateResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldGetRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldGetResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldListRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaFieldListResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaGetRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaGetResponse;
import org.apache.airavata.datacatalog.api.MetadataSchemaListRequest;
import org.apache.airavata.datacatalog.api.MetadataSchemaListResponse;
import org.apache.airavata.datacatalog.api.UserInfo;
import io.grpc.Channel;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;;
public class DataCatalogAPIClient {
private final DataCatalogAPIServiceBlockingStub blockingStub;
String tenantId = System.getenv("CUSTOS_CLIENT_ID") != null ? System.getenv("CUSTOS_CLIENT_ID") : "demotenant";
String userId = "demouser";
private final UserInfo userInfo = UserInfo.newBuilder()
.setUserId(userId)
.setTenantId(tenantId)
.build();
public DataCatalogAPIClient(Channel channel) {
blockingStub = DataCatalogAPIServiceGrpc.newBlockingStub(channel);
}
public DataProduct createDataProduct(DataProduct dataProduct) {
DataProductCreateRequest request = DataProductCreateRequest.newBuilder().setDataProduct(dataProduct)
.setUserInfo(userInfo).build();
DataProductCreateResponse response = blockingStub.createDataProduct(request);
return response.getDataProduct();
}
public DataProduct updateDataProduct(DataProduct dataProduct) {
DataProductUpdateRequest request = DataProductUpdateRequest.newBuilder().setDataProduct(dataProduct)
.setUserInfo(userInfo).build();
DataProductUpdateResponse response = blockingStub.updateDataProduct(request);
return response.getDataProduct();
}
public DataProduct getDataProduct(String dataProductId) {
DataProductGetRequest request = DataProductGetRequest.newBuilder().setDataProductId(dataProductId)
.setUserInfo(userInfo).build();
DataProductGetResponse response = blockingStub.getDataProduct(request);
return response.getDataProduct();
}
public void deleteDataProduct(String dataProductId) {
DataProductDeleteRequest request = DataProductDeleteRequest.newBuilder().setDataProductId(dataProductId)
.setUserInfo(userInfo).build();
blockingStub.deleteDataProduct(request);
}
public MetadataSchema createMetadataSchema(MetadataSchema metadataSchema) {
MetadataSchemaCreateRequest request = MetadataSchemaCreateRequest.newBuilder().setMetadataSchema(metadataSchema)
.build();
MetadataSchemaCreateResponse response = blockingStub.createMetadataSchema(request);
return response.getMetadataSchema();
}
public MetadataSchema getMetadataSchema(String schemaName) {
MetadataSchemaGetRequest request = MetadataSchemaGetRequest.newBuilder().setSchemaName(schemaName).build();
try {
MetadataSchemaGetResponse response = blockingStub.getMetadataSchema(request);
return response.getMetadataSchema();
} catch (StatusRuntimeException e) {
if (e.getStatus() == Status.NOT_FOUND) {
return null;
}
throw e;
}
}
public List<MetadataSchema> getMetadataSchemas() {
MetadataSchemaListRequest request = MetadataSchemaListRequest.newBuilder().setUserInfo(userInfo).build();
MetadataSchemaListResponse response = blockingStub.getMetadataSchemas(request);
return response.getMetadataSchemasList();
}
public MetadataSchemaField getMetadataSchemaField(String schemaName, String fieldName) {
MetadataSchemaFieldGetRequest request = MetadataSchemaFieldGetRequest.newBuilder().setSchemaName(schemaName)
.setFieldName(fieldName).build();
try {
MetadataSchemaFieldGetResponse response = blockingStub.getMetadataSchemaField(request);
return response.getMetadataSchemaField();
} catch (StatusRuntimeException e) {
if (e.getStatus() == Status.NOT_FOUND) {
return null;
}
throw e;
}
}
public MetadataSchemaField createMetadataSchemaField(MetadataSchemaField metadataSchemaField) {
MetadataSchemaFieldCreateRequest request = MetadataSchemaFieldCreateRequest.newBuilder()
.setMetadataSchemaField(metadataSchemaField).build();
MetadataSchemaFieldCreateResponse response = blockingStub.createMetadataSchemaField(request);
return response.getMetadataSchemaField();
}
public List<MetadataSchemaField> getMetadataSchemaFields(String schemaName) {
MetadataSchemaFieldListRequest request = MetadataSchemaFieldListRequest.newBuilder().setSchemaName(schemaName)
.build();
MetadataSchemaFieldListResponse response = blockingStub.getMetadataSchemaFields(request);
return response.getMetadataSchemaFieldsList();
}
public DataProduct addDataProductToMetadataSchema(String dataProductId, String schemaName) {
DataProductAddToMetadataSchemaRequest request = DataProductAddToMetadataSchemaRequest.newBuilder()
.setDataProductId(dataProductId).setSchemaName(schemaName).setUserInfo(userInfo).build();
DataProductAddToMetadataSchemaResponse response = blockingStub.addDataProductToMetadataSchema(request);
return response.getDataProduct();
}
public DataProduct removeDataProductFromMetadataSchema(String dataProductId, String schemaName) {
DataProductRemoveFromMetadataSchemaRequest request = DataProductRemoveFromMetadataSchemaRequest.newBuilder()
.setDataProductId(dataProductId).setSchemaName(schemaName).setUserInfo(userInfo).build();
DataProductRemoveFromMetadataSchemaResponse response = blockingStub
.removeDataProductFromMetadataSchema(request);
return response.getDataProduct();
}
public List<DataProduct> searchDataProducts(String sql) {
DataProductSearchRequest request = DataProductSearchRequest.newBuilder().setSql(sql).setUserInfo(userInfo)
.build();
DataProductSearchResponse response = blockingStub.searchDataProducts(request);
return response.getDataProductsList();
}
public static void main(String[] args) throws InterruptedException {
String target = "localhost:6565";
ManagedChannel channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build();
try {
DataCatalogAPIClient client = new DataCatalogAPIClient(channel);
DataProduct parentDataProduct = DataProduct.newBuilder().setName("parent dp").build();
DataProduct parentResult = client.createDataProduct(parentDataProduct);
DataProduct dataProduct = DataProduct.newBuilder().setName("testing").setMetadata("{\"foo\": \"bar\"}")
.setParentDataProductId(parentResult.getDataProductId())
.build();
DataProduct result = client.createDataProduct(dataProduct);
System.out.println(MessageFormat.format("Created data product with id [{0}]", result.getDataProductId()));
DataProduct updatedDataProduct = result.toBuilder().setName("updated name").build();
result = client.updateDataProduct(updatedDataProduct);
System.out.println(MessageFormat.format("Updated data product with id [{0}] to have name [{1}]",
result.getDataProductId(), result.getName()));
DataProduct retrievedDataProduct = client.getDataProduct(result.getDataProductId());
System.out.println(MessageFormat.format("Retrieved data product with id [{0}] to have name [{1}]",
retrievedDataProduct.getDataProductId(), retrievedDataProduct.getName()));
DataProduct dataProduct2 = DataProduct.newBuilder().setName("testing 2").setMetadata("{\"foo\": \"bar\"}")
.build();
DataProduct result2 = client.createDataProduct(dataProduct2);
System.out.println(
MessageFormat.format("Created second data product [{0}]", result2));
client.deleteDataProduct(result2.getDataProductId());
System.out.println(
MessageFormat.format("Deleted data product with id [{0}]", result2.getDataProductId()));
// First check if metadata schema exists
MetadataSchema metadataSchema = client.getMetadataSchema("my_schema");
if (metadataSchema == null) {
metadataSchema = MetadataSchema.newBuilder().setSchemaName("my_schema").build();
metadataSchema = client.createMetadataSchema(metadataSchema);
System.out.println(
MessageFormat.format("Created metadata schema with name [{0}]",
metadataSchema.getSchemaName()));
} else {
System.out.println(
MessageFormat.format("Found metadata schema with name [{0}]",
metadataSchema.getSchemaName()));
}
MetadataSchemaField field1 = MetadataSchemaField.newBuilder().setFieldName("field1")
.setJsonPath("$.field1").setValueType(FieldValueType.FLOAT)
.setSchemaName(metadataSchema.getSchemaName()).build();
MetadataSchemaField field1Exists = client.getMetadataSchemaField(field1.getSchemaName(),
field1.getFieldName());
if (field1Exists == null) {
field1 = client.createMetadataSchemaField(field1);
System.out.println(MessageFormat.format("Created metadata schema field [{0}] in schema [{1}]",
field1.getFieldName(), field1.getSchemaName()));
} else {
field1 = field1Exists;
System.out.println(MessageFormat.format("Found metadata schema field [{0}] in schema [{1}]",
field1.getFieldName(), field1.getSchemaName()));
}
MetadataSchemaField field2 = MetadataSchemaField.newBuilder().setFieldName("field2")
.setJsonPath("$.field2").setValueType(FieldValueType.FLOAT)
.setSchemaName(metadataSchema.getSchemaName()).build();
MetadataSchemaField field2Exists = client.getMetadataSchemaField(field2.getSchemaName(),
field2.getFieldName());
if (field2Exists == null) {
field2 = client.createMetadataSchemaField(field2);
System.out.println(MessageFormat.format("Created metadata schema field [{0}] in schema [{1}]",
field2.getFieldName(), field2.getSchemaName()));
} else {
field2 = field2Exists;
System.out.println(MessageFormat.format("Found metadata schema field [{0}] in schema [{1}]",
field2.getFieldName(), field2.getSchemaName()));
}
MetadataSchemaField field3 = MetadataSchemaField.newBuilder().setFieldName("field3")
.setJsonPath("$.field3").setValueType(FieldValueType.STRING)
.setSchemaName(metadataSchema.getSchemaName()).build();
MetadataSchemaField field3Exists = client.getMetadataSchemaField(field3.getSchemaName(),
field3.getFieldName());
if (field3Exists == null) {
field3 = client.createMetadataSchemaField(field3);
System.out.println(MessageFormat.format("Created metadata schema field [{0}] in schema [{1}]",
field3.getFieldName(), field3.getSchemaName()));
} else {
field3 = field3Exists;
System.out.println(MessageFormat.format("Found metadata schema field [{0}] in schema [{1}]",
field3.getFieldName(), field3.getSchemaName()));
}
List<MetadataSchemaField> fields = client.getMetadataSchemaFields(metadataSchema.getSchemaName());
System.out.println(MessageFormat.format("Found {0} fields for schema {1}", fields.size(),
metadataSchema.getSchemaName()));
for (MetadataSchemaField field : fields) {
System.out.println(MessageFormat.format("-> field {0}", field.getFieldName()));
}
result = client.addDataProductToMetadataSchema(result.getDataProductId(), metadataSchema.getSchemaName());
System.out.println(MessageFormat.format("Added data product [{0}] to metadata schema [{1}]",
result.getDataProductId(), metadataSchema.getSchemaName()));
result = client.removeDataProductFromMetadataSchema(result.getDataProductId(),
metadataSchema.getSchemaName());
System.out.println(MessageFormat.format("Removed data product [{0}] from metadata schema [{1}]",
result.getDataProductId(), metadataSchema.getSchemaName()));
// Create data product that belongs to my_schema schema
DataProduct dataProduct3 = DataProduct.newBuilder()
.setName("testing 3")
.setMetadata("{\"field3\": \"bar\", \"field1\": 10}")
.addMetadataSchemas("my_schema")
.build();
DataProduct result3 = client.createDataProduct(dataProduct3);
System.out.println(
MessageFormat.format("Created third data product [{0}], supporting schemas [{1}]",
result3.getDataProductId(), result3.getMetadataSchemasList()));
// Create another data product that belongs to my_schema schema, but with
// different "field3" and "field1" values
DataProduct dataProduct4 = DataProduct.newBuilder()
.setName("testing 4")
.setMetadata("{\"field3\": \"baz\", \"field1\": 2}")
.addMetadataSchemas("my_schema")
.build();
client.createDataProduct(dataProduct4);
List<DataProduct> searchResults = client.searchDataProducts("""
select * from my_schema where field3 = 'bar'
""");
System.out.println(searchResults);
searchResults = client.searchDataProducts("""
select * from my_schema where (field1 < 5 or field3 = 'bar') and field1 > 0
and external_id = 'fff'
""");
// searchResults = client.searchDataProducts("""
// select * from my_schema where not (field1 < 5 or field3 = 'bar')
// """);
System.out.println("Shouldn't match anything: " + searchResults);
// MetadataSchemas retrieval
MetadataSchema exp_schema = client.getMetadataSchema("exp_schema");
if (exp_schema == null) {
exp_schema = MetadataSchema.newBuilder().setSchemaName("exp_schema").build();
exp_schema = client.createMetadataSchema(exp_schema);
System.out.println(MessageFormat.format("Created metadata schema with name [{0}]",
exp_schema.getSchemaName()));
}
List<MetadataSchema> metadataSchemas = client.getMetadataSchemas();
System.out.println("Metadata schema list: " + metadataSchemas);
// Retrieve data products belonging to different schemas
// Create data product that belongs to both my_schema and exp_schema
DataProduct dataProduct5 = DataProduct.newBuilder()
.setName("exp-schema testing5")
.setMetadata("{\"field3\": \"bar\", \"field1\": 10}")
.addMetadataSchemas("my_schema")
.addMetadataSchemas("exp_schema")
.build();
client.createDataProduct(dataProduct5);
// Create data product that belongs to exp_schema
DataProduct dataProduct6 = DataProduct.newBuilder()
.setName("exp-schema testing6")
.setMetadata("{\"field3\": \"bar\", \"field1\": 10}")
.addMetadataSchemas("exp_schema")
.build();
client.createDataProduct(dataProduct5);
// Get the *distinct* data products that belong to both 'my_schema' and 'exp_schema'
List<DataProduct> searchResultsUnion = client.searchDataProducts("""
select data_product_id from my_schema union distinct select data_product_id from exp_schema
""");
// Get the data products that belong to both 'my_schema' and 'exp_schema'
List<DataProduct> searchResultsUnionAll = client.searchDataProducts("""
select data_product_id from my_schema union all select data_product_id from exp_schema
""");
System.out.println(MessageFormat.format("UNION search result count: [{0}], UNION ALL search result count: [{1}]. Should be different",
searchResultsUnion.size(), searchResultsUnionAll.size()));
} finally {
channel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS);
}
}
}
| 9,155 |
0 |
Create_ds/helix/helix-common/src/main/java/org/apache
|
Create_ds/helix/helix-common/src/main/java/org/apache/helix/ZNRecord.java
|
package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
/**
* @deprecated
* Please use {@link org.apache.helix.zookeeper.datamodel.ZNRecord}
* in zookeeper-api instead.
* <p>
* Generic Record Format to store data at a Node This can be used to store
* simpleFields mapFields listFields
*/
@Deprecated
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
public class ZNRecord extends org.apache.helix.zookeeper.datamodel.ZNRecord {
public ZNRecord(String id) {
super(id);
}
public ZNRecord(org.apache.helix.zookeeper.datamodel.ZNRecord record) {
super(record);
}
public ZNRecord(org.apache.helix.zookeeper.datamodel.ZNRecord record, String id) {
super(record, id);
}
}
| 9,156 |
0 |
Create_ds/helix/helix-common/src/main/java/org/apache
|
Create_ds/helix/helix-common/src/main/java/org/apache/helix/ZNRecordDelta.java
|
package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Deprecated; please use ZNRecordDelta in zookeeper-api instead.
*
* A ZNRecord container that specifies how it should be merged with another ZNRecord
*/
@Deprecated
public class ZNRecordDelta {
}
| 9,157 |
0 |
Create_ds/helix/helix-common/src/main/java/org/apache
|
Create_ds/helix/helix-common/src/main/java/org/apache/helix/HelixException.java
|
package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Base class for an exception thrown by Helix due to inconsistencies caught by Helix itself.
*/
public class HelixException extends RuntimeException {
private static final long serialVersionUID = 6558251214364526257L;
public HelixException(String message) {
super(message);
}
/**
* Create a HelixException that can optionally turn off stack trace. Its other characteristics are
* the same as a HelixException with a message.
* @param message the detail message
* @param writableStackTrace whether or not the stack trace should be writable
*/
public HelixException(String message, boolean writableStackTrace) {
super(message, null, false, writableStackTrace);
}
public HelixException(Throwable cause) {
super(cause);
}
public HelixException(String message, Throwable cause) {
super(message, cause);
}
}
| 9,158 |
0 |
Create_ds/helix/helix-common/src/main/java/org/apache
|
Create_ds/helix/helix-common/src/main/java/org/apache/helix/SystemPropertyKeys.java
|
package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
public class SystemPropertyKeys {
// Used to compose default values in HelixManagerProperty
public static final String HELIX_MANAGER_PROPERTIES = "helix-manager.properties";
public static final String HELIX_MANAGER_VERSION = "clustermanager.version";
// Used to compose default values in HelixCloudProperty when cloud provider is Azure
public static final String AZURE_CLOUD_PROPERTIES = "azure-cloud.properties";
// Task Driver
public static final String TASK_CONFIG_LIMITATION = "helixTask.configsLimitation";
// Task executor threadpool reset timeout in ms
public static final String TASK_THREADPOOL_RESET_TIMEOUT = "helixTask.threadpool.resetTimeout";
// ZKHelixManager
public static final String CLUSTER_MANAGER_VERSION = "cluster-manager-version.properties";
// soft constraints weight definitions
public static final String SOFT_CONSTRAINT_WEIGHTS = "soft-constraint-weight.properties";
public static final String FLAPPING_TIME_WINDOW = "helixmanager.flappingTimeWindow";
// max disconnect count during the flapping time window to trigger HelixManager flapping handling
public static final String MAX_DISCONNECT_THRESHOLD = "helixmanager.maxDisconnectThreshold";
public static final String ZK_SESSION_TIMEOUT = "zk.session.timeout";
public static final String ZK_CONNECTION_TIMEOUT = "zk.connection.timeout";
@Deprecated
public static final String ZK_REESTABLISHMENT_CONNECTION_TIMEOUT =
"zk.connectionReEstablishment.timeout";
public static final String ZK_WAIT_CONNECTED_TIMEOUT = "helixmanager.waitForConnectedTimeout";
public static final String PARTICIPANT_HEALTH_REPORT_LATENCY =
"helixmanager.participantHealthReport.reportLatency";
// Indicate monitoring level of the HelixManager metrics
public static final String MONITOR_LEVEL = "helixmanager.monitorLevel";
// CallbackHandler
public static final String ASYNC_BATCH_MODE_ENABLED = "helix.callbackhandler.isAsyncBatchModeEnabled";
public static final String LEGACY_ASYNC_BATCH_MODE_ENABLED = "isAsyncBatchModeEnabled";
// Controller
public static final String CONTROLLER_MESSAGE_PURGE_DELAY = "helix.controller.stages.MessageGenerationPhase.messagePurgeDelay";
// Message
public static final String MESSAGE_EXPECTED_COMPLETION_PERIOD = "helix.controller.message.ExpectMessageCompletionPeriod";
// MBean monitor for helix.
public static final String HELIX_MONITOR_TIME_WINDOW_LENGTH_MS = "helix.monitor.slidingTimeWindow.ms";
// Multi-ZK mode enable/disable flag
public static final String MULTI_ZK_ENABLED = "helix.multiZkEnabled";
// System Property Metadata Store Directory Server endpoint key
public static final String MSDS_SERVER_ENDPOINT_KEY =
MetadataStoreRoutingConstants.MSDS_SERVER_ENDPOINT_KEY;
public static final String STATEUPDATEUTIL_ERROR_PERSISTENCY_ENABLED = "helix.StateUpdateUtil.errorLog.enabled";
public static final String TASK_CURRENT_STATE_PATH_DISABLED =
"helix.taskCurrentStatePathDisabled";
}
| 9,159 |
0 |
Create_ds/helix/helix-common/src/main/java/org/apache/helix
|
Create_ds/helix/helix-common/src/main/java/org/apache/helix/constants/InstanceConstants.java
|
package org.apache.helix.constants;
public class InstanceConstants {
public static final String INSTANCE_NOT_DISABLED = "INSTANCE_NOT_DISABLED";
public enum InstanceDisabledType {
CLOUD_EVENT,
USER_OPERATION,
DEFAULT_INSTANCE_DISABLE_TYPE
}
public enum InstanceOperation {
EVACUATE, // Node will be removed after a period of time
SWAP_IN, // New node joining for swap operation
SWAP_OUT // Existing Node to be removed for swap operation
}
}
| 9,160 |
0 |
Create_ds/helix/helix-common/src/main/java/org/apache/helix/manager/zk
|
Create_ds/helix/helix-common/src/main/java/org/apache/helix/manager/zk/serializer/PayloadSerializer.java
|
package org.apache.helix.manager.zk.serializer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Deprecated; please use PayloadSerializer in zookeeper-api instead.
*
* Interface for converting back and forth between raw bytes and generic objects
*/
@Deprecated
public interface PayloadSerializer extends org.apache.helix.zookeeper.datamodel.serializer.PayloadSerializer {
}
| 9,161 |
0 |
Create_ds/helix/helix-common/src/main/java/org/apache/helix/manager/zk
|
Create_ds/helix/helix-common/src/main/java/org/apache/helix/manager/zk/serializer/JacksonPayloadSerializer.java
|
package org.apache.helix.manager.zk.serializer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Deprecated; please use JacksonPayloadSerializer in zookeeper-api instead.
*
* Serializes and deserializes data of a generic type using Jackson
*/
@Deprecated
public class JacksonPayloadSerializer extends org.apache.helix.zookeeper.datamodel.serializer.JacksonPayloadSerializer {
}
| 9,162 |
0 |
Create_ds/helix/helix-common/src/main/java/org/apache/helix
|
Create_ds/helix/helix-common/src/main/java/org/apache/helix/datamodel/Snapshot.java
|
package org.apache.helix.datamodel;
import java.util.HashMap;
import java.util.Map;
public abstract class Snapshot<K, V> {
protected Map<K, V> _valueCache;
public Snapshot() {
_valueCache = new HashMap<>();
}
public V getValue(K key) {
return _valueCache.get(key);
}
public void updateValue(K key, V value) {
_valueCache.put(key, value);
}
public boolean containsKey(K key) {
return _valueCache.containsKey(key);
}
}
| 9,163 |
0 |
Create_ds/helix/metadata-store-directory-common/src/test/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/test/java/org/apache/helix/msdcommon/mock/TestMockMetadataStoreDirectoryServer.java
|
package org.apache.helix.msdcommon.mock;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.apache.helix.msdcommon.constant.TestConstants;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestMockMetadataStoreDirectoryServer {
@Test
public void testMockMetadataStoreDirectoryServer() throws IOException {
// Start MockMSDS
String host = "localhost";
int port = 11000;
String endpoint = "http://" + host + ":" + port;
String namespace = "MY-HELIX-NAMESPACE";
MockMetadataStoreDirectoryServer server =
new MockMetadataStoreDirectoryServer(host, port, namespace,
TestConstants.FAKE_ROUTING_DATA);
server.startServer();
try (CloseableHttpClient httpClient = HttpClients.createDefault()) {
// Send a GET request for all routing data
HttpGet getRequest = new HttpGet(
endpoint + MockMetadataStoreDirectoryServer.REST_PREFIX + namespace
+ MetadataStoreRoutingConstants.MSDS_GET_ALL_ROUTING_DATA_ENDPOINT);
CloseableHttpResponse getResponse = httpClient.execute(getRequest);
Map<String, Object> resultMap = MockMetadataStoreDirectoryServer.OBJECT_MAPPER
.readValue(getResponse.getEntity().getContent(), Map.class);
List<Map<String, Object>> routingDataList =
(List<Map<String, Object>>) resultMap.get(MetadataStoreRoutingConstants.ROUTING_DATA);
Collection<String> allRealms = routingDataList.stream().map(mapEntry -> (String) mapEntry
.get(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM))
.collect(Collectors.toSet());
Assert.assertEquals(new HashSet(allRealms), TestConstants.FAKE_ROUTING_DATA.keySet());
Map<String, List<String>> retrievedRoutingData = routingDataList.stream().collect(Collectors
.toMap(mapEntry -> (String) mapEntry
.get(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM),
mapEntry -> (List<String>) mapEntry
.get(MetadataStoreRoutingConstants.SHARDING_KEYS)));
Assert.assertEquals(retrievedRoutingData, TestConstants.FAKE_ROUTING_DATA);
// Send a GET request for all realms
getRequest = new HttpGet(endpoint + MockMetadataStoreDirectoryServer.REST_PREFIX + namespace
+ MockMetadataStoreDirectoryServer.ZK_REALM_ENDPOINT);
getResponse = httpClient.execute(getRequest);
Map<String, Collection<String>> allRealmsMap = MockMetadataStoreDirectoryServer.OBJECT_MAPPER
.readValue(getResponse.getEntity().getContent(), Map.class);
Assert.assertTrue(
allRealmsMap.containsKey(MetadataStoreRoutingConstants.METADATA_STORE_REALMS));
allRealms = allRealmsMap.get(MetadataStoreRoutingConstants.METADATA_STORE_REALMS);
Assert.assertEquals(allRealms, TestConstants.FAKE_ROUTING_DATA.keySet());
// Send a GET request for testZkRealm
String testZkRealm = "zk-0";
getRequest = new HttpGet(endpoint + MockMetadataStoreDirectoryServer.REST_PREFIX + namespace
+ MockMetadataStoreDirectoryServer.ZK_REALM_ENDPOINT + "/" + testZkRealm);
getResponse = httpClient.execute(getRequest);
Map<String, Object> shardingKeysMap = MockMetadataStoreDirectoryServer.OBJECT_MAPPER
.readValue(getResponse.getEntity().getContent(), Map.class);
Assert.assertTrue(
shardingKeysMap.containsKey(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM));
Assert.assertTrue(shardingKeysMap.containsKey(MetadataStoreRoutingConstants.SHARDING_KEYS));
String zkRealm =
(String) shardingKeysMap.get(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM);
Collection<String> shardingKeyList =
(Collection) shardingKeysMap.get(MetadataStoreRoutingConstants.SHARDING_KEYS);
Assert.assertEquals(zkRealm, testZkRealm);
Assert.assertEquals(shardingKeyList, TestConstants.FAKE_ROUTING_DATA.get(testZkRealm));
// Try sending a POST request (not supported)
HttpPost postRequest = new HttpPost(
endpoint + MockMetadataStoreDirectoryServer.REST_PREFIX + namespace
+ MockMetadataStoreDirectoryServer.ZK_REALM_ENDPOINT + "/" + testZkRealm);
CloseableHttpResponse postResponse = httpClient.execute(postRequest);
} finally {
// Shutdown
server.stopServer();
}
}
}
| 9,164 |
0 |
Create_ds/helix/metadata-store-directory-common/src/test/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/test/java/org/apache/helix/msdcommon/constant/TestConstants.java
|
package org.apache.helix.msdcommon.constant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collection;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
/**
* Constants to be used for testing.
*/
public class TestConstants {
public static final Map<String, Collection<String>> FAKE_ROUTING_DATA = ImmutableMap.of(
"zk-0", ImmutableList.of("/sharding-key-0", "/sharding-key-1", "/sharding-key-2"),
"zk-1", ImmutableList.of("/sharding-key-3", "/sharding-key-4", "/sharding-key-5"),
"zk-2", ImmutableList.of("/sharding-key-6", "/sharding-key-7", "/sharding-key-8"));
}
| 9,165 |
0 |
Create_ds/helix/metadata-store-directory-common/src/test/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/test/java/org/apache/helix/msdcommon/datamodel/TestTrieRoutingData.java
|
package org.apache.helix.msdcommon.datamodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestTrieRoutingData {
private TrieRoutingData _trie;
@Test
public void testConstructionMissingRoutingData() {
try {
new TrieRoutingData(null);
Assert.fail("Expecting InvalidRoutingDataException");
} catch (InvalidRoutingDataException e) {
Assert.assertTrue(e.getMessage().contains("routingData cannot be null or empty"));
}
try {
new TrieRoutingData(Collections.emptyMap());
Assert.fail("Expecting InvalidRoutingDataException");
} catch (InvalidRoutingDataException e) {
Assert.assertTrue(e.getMessage().contains("routingData cannot be null or empty"));
}
Map<String, List<String>> routingData = new HashMap<>();
routingData.put("realmAddress", Collections.emptyList());
try {
new TrieRoutingData(routingData);
Assert.fail("Expecting InvalidRoutingDataException");
} catch (InvalidRoutingDataException e) {
Assert.assertTrue(e.getMessage().contains("routingData needs at least 1 sharding key"));
}
}
/**
* This test case is for the situation when there's only one sharding key and it's root.
*/
@Test
public void testConstructionSpecialCase() {
Map<String, List<String>> routingData = new HashMap<>();
routingData.put("realmAddress", Collections.singletonList("/"));
TrieRoutingData trie;
try {
trie = new TrieRoutingData(routingData);
Map<String, String> result = trie.getAllMappingUnderPath("/");
Assert.assertEquals(result.size(), 1);
Assert.assertEquals(result.get("/"), "realmAddress");
} catch (InvalidRoutingDataException e) {
Assert.fail("Not expecting InvalidRoutingDataException");
}
}
@Test
public void testConstructionShardingKeyNoLeadingSlash() {
Map<String, List<String>> routingData = new HashMap<>();
routingData.put("realmAddress1", Arrays.asList("/g", "/h/i", "/h/j"));
routingData.put("realmAddress2", Arrays.asList("b/c/d", "/b/f"));
routingData.put("realmAddress3", Collections.singletonList("/b/c/e"));
try {
new TrieRoutingData(routingData);
Assert.fail("Expecting InvalidRoutingDataException");
} catch (InvalidRoutingDataException e) {
Assert
.assertTrue(e.getMessage().contains("Sharding key is not a valid Zookeeper path: b/c/d"));
}
}
@Test
public void testConstructionRootAsShardingKeyInvalid() {
Map<String, List<String>> routingData = new HashMap<>();
routingData.put("realmAddress1", Arrays.asList("/a/b", "/"));
try {
new TrieRoutingData(routingData);
Assert.fail("Expecting InvalidRoutingDataException");
} catch (InvalidRoutingDataException e) {
Assert.assertTrue(e.getMessage()
.contains("There exist other sharding keys. Root cannot be a sharding key."));
}
}
@Test
public void testConstructionShardingKeyContainsAnother() {
Map<String, List<String>> routingData = new HashMap<>();
routingData.put("realmAddress1", Arrays.asList("/a/b", "/a/b/c"));
try {
new TrieRoutingData(routingData);
Assert.fail("Expecting InvalidRoutingDataException");
} catch (InvalidRoutingDataException e) {
Assert.assertTrue(e.getMessage().contains(
"/a/b/c cannot be a sharding key because /a/b is its parent key and is also a sharding key."));
}
}
@Test
public void testConstructionShardingKeyIsAPartOfAnother() {
Map<String, List<String>> routingData = new HashMap<>();
routingData.put("realmAddress1", Arrays.asList("/a/b/c", "/a/b"));
try {
new TrieRoutingData(routingData);
Assert.fail("Expecting InvalidRoutingDataException");
} catch (InvalidRoutingDataException e) {
Assert.assertTrue(e.getMessage().contains(
"/a/b cannot be a sharding key because it is a parent key to another sharding key."));
}
}
/**
* Constructing a trie that will also be reused for other tests
* -----<empty>
* ------/-|--\
* -----b--g--h
* ----/-\---/-\
* ---c--f--i--j
* --/-\
* -d--e
* Note: "g", "i", "j" lead to "realmAddress1"; "d", "f" lead to "realmAddress2"; "e" leads to
* "realmAddress3"
*/
@Test
public void testConstructionNormal() {
Map<String, List<String>> routingData = new HashMap<>();
routingData.put("realmAddress1", Arrays.asList("/g", "/h/i", "/h/j"));
routingData.put("realmAddress2", Arrays.asList("/b/c/d", "/b/f"));
routingData.put("realmAddress3", Collections.singletonList("/b/c/e"));
try {
_trie = new TrieRoutingData(routingData);
} catch (InvalidRoutingDataException e) {
Assert.fail("Not expecting InvalidRoutingDataException");
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetAllMappingUnderPathEmptyPath() {
try {
_trie.getAllMappingUnderPath("");
Assert.fail("Expecting IllegalArgumentException");
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains("Provided path is not a valid Zookeeper path: "));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetAllMappingUnderPathNoLeadingSlash() {
try {
_trie.getAllMappingUnderPath("test");
Assert.fail("Expecting IllegalArgumentException");
} catch (IllegalArgumentException e) {
Assert
.assertTrue(e.getMessage().contains("Provided path is not a valid Zookeeper path: test"));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetAllMappingUnderPathFromRoot() {
Map<String, String> result = _trie.getAllMappingUnderPath("/");
Assert.assertEquals(result.size(), 6);
Assert.assertEquals(result.get("/b/c/d"), "realmAddress2");
Assert.assertEquals(result.get("/b/c/e"), "realmAddress3");
Assert.assertEquals(result.get("/b/f"), "realmAddress2");
Assert.assertEquals(result.get("/g"), "realmAddress1");
Assert.assertEquals(result.get("/h/i"), "realmAddress1");
Assert.assertEquals(result.get("/h/j"), "realmAddress1");
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetAllMappingUnderPathFromSecondLevel() {
Map<String, String> result = _trie.getAllMappingUnderPath("/b");
Assert.assertEquals(result.size(), 3);
Assert.assertEquals(result.get("/b/c/d"), "realmAddress2");
Assert.assertEquals(result.get("/b/c/e"), "realmAddress3");
Assert.assertEquals(result.get("/b/f"), "realmAddress2");
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetAllMappingUnderPathFromLeaf() {
Map<String, String> result = _trie.getAllMappingUnderPath("/b/c/d");
Assert.assertEquals(result.size(), 1);
Assert.assertEquals(result.get("/b/c/d"), "realmAddress2");
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetAllMappingUnderPathWrongPath() {
Map<String, String> result = _trie.getAllMappingUnderPath("/b/c/d/g");
Assert.assertEquals(result.size(), 0);
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetMetadataStoreRealmEmptyPath() {
try {
Assert.assertEquals(_trie.getMetadataStoreRealm(""), "realmAddress2");
Assert.fail("Expecting IllegalArgumentException");
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains("Provided path is not a valid Zookeeper path: "));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetMetadataStoreRealmNoSlash() {
try {
Assert.assertEquals(_trie.getMetadataStoreRealm("b/c/d/x/y/z"), "realmAddress2");
Assert.fail("Expecting IllegalArgumentException");
} catch (IllegalArgumentException e) {
Assert.assertTrue(
e.getMessage().contains("Provided path is not a valid Zookeeper path: b/c/d/x/y/z"));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetMetadataStoreRealm() {
try {
Assert.assertEquals(_trie.getMetadataStoreRealm("/b/c/d/x/y/z"), "realmAddress2");
} catch (NoSuchElementException e) {
Assert.fail("Not expecting NoSuchElementException");
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetMetadataStoreRealmWrongPath() {
try {
_trie.getMetadataStoreRealm("/x/y/z");
Assert.fail("Expecting NoSuchElementException");
} catch (NoSuchElementException e) {
Assert.assertTrue(
e.getMessage().contains("No sharding key found within the provided path. Path: /x/y/z"));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetMetadataStoreRealmNoLeaf() {
try {
_trie.getMetadataStoreRealm("/b/c");
Assert.fail("Expecting NoSuchElementException");
} catch (NoSuchElementException e) {
Assert.assertTrue(
e.getMessage().contains("No sharding key found within the provided path. Path: /b/c"));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetShardingKeyInPath() {
try {
Assert.assertEquals(_trie.getShardingKeyInPath("/b/c/d/x/y/z"), "/b/c/d");
} catch (NoSuchElementException e) {
Assert.fail("Not expecting NoSuchElementException");
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetShardingKeyInPathWrongPath() {
try {
_trie.getShardingKeyInPath("/x/y/z");
Assert.fail("Expecting NoSuchElementException");
} catch (NoSuchElementException e) {
Assert.assertTrue(
e.getMessage().contains("No sharding key found within the provided path. Path: /x/y/z"));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testGetShardingKeyInPathNoLeaf() {
try {
_trie.getShardingKeyInPath("/b/c");
Assert.fail("Expecting NoSuchElementException");
} catch (NoSuchElementException e) {
Assert.assertTrue(
e.getMessage().contains("No sharding key found within the provided path. Path: /b/c"));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testIsShardingKeyInsertionValidNoSlash() {
try {
_trie.isShardingKeyInsertionValid("x/y/z");
Assert.fail("Expecting IllegalArgumentException");
} catch (IllegalArgumentException e) {
Assert.assertTrue(
e.getMessage().contains("Provided shardingKey is not a valid Zookeeper path: x/y/z"));
}
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testIsShardingKeyInsertionValidSlashOnly() {
Assert.assertFalse(_trie.isShardingKeyInsertionValid("/"));
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testIsShardingKeyInsertionValidNormal() {
Assert.assertTrue(_trie.isShardingKeyInsertionValid("/x/y/z"));
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testIsShardingKeyInsertionValidParentKey() {
Assert.assertFalse(_trie.isShardingKeyInsertionValid("/b/c"));
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testIsShardingKeyInsertionValidSameKey() {
Assert.assertFalse(_trie.isShardingKeyInsertionValid("/h/i"));
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testIsShardingKeyInsertionValidChildKey() {
Assert.assertFalse(_trie.isShardingKeyInsertionValid("/h/i/k"));
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testContainsKeyRealmPair() {
Assert.assertTrue(_trie.containsKeyRealmPair("/h/i", "realmAddress1"));
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testContainsKeyRealmPairNoKey() {
Assert.assertFalse(_trie.containsKeyRealmPair("/h/i/k", "realmAddress1"));
}
@Test(dependsOnMethods = "testConstructionNormal")
public void testContainsKeyRealmPairNoRealm() {
Assert.assertFalse(_trie.containsKeyRealmPair("/h/i", "realmAddress0"));
}
}
| 9,166 |
0 |
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon/util/ZkValidationUtil.java
|
package org.apache.helix.msdcommon.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class ZkValidationUtil {
/**
* Validates whether a given path string is a valid ZK path.
*
* Valid matches:
* /
* /abc
* /abc/abc/abc/abc
* /abc/localhost:1234
* /abc/def.hil
* Invalid matches:
* null or empty string
* /abc/
* /abc/abc/abc/abc/
**/
public static boolean isPathValid(String path) {
return path.matches("^/|(/[\\w?[$&+,:;=?@#|'<>.^*()%!-]-]+)+$");
}
}
| 9,167 |
0 |
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon/mock/MockMetadataStoreDirectoryServer.java
|
package org.apache.helix.msdcommon.mock;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.stream.Collectors;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Mock HTTP server that serves GET of metadata store routing data only.
* Helix applications may use this to write unit/integration tests without having to set up the routing ZooKeeper and creating routing data ZNodes.
*/
public class MockMetadataStoreDirectoryServer {
private static final Logger LOG = LoggerFactory.getLogger(MockMetadataStoreDirectoryServer.class);
protected static final String REST_PREFIX = "/admin/v2/namespaces/";
protected static final String ZK_REALM_ENDPOINT =
MetadataStoreRoutingConstants.MSDS_GET_ALL_REALMS_ENDPOINT;
protected static final int NOT_IMPLEMENTED = 501;
protected static final int OK = 200;
protected static final int STOP_WAIT_SEC = 10;
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String _hostname;
protected final int _mockServerPort;
protected final Map<String, Collection<String>> _routingDataMap;
protected final String _namespace;
protected HttpServer _server;
protected final ThreadPoolExecutor _executor =
(ThreadPoolExecutor) Executors.newFixedThreadPool(10);
protected enum SupportedHttpVerbs {
GET
}
/**
* Constructs a Mock MSDS.
* A sample GET might look like the following:
* curl localhost:11000/admin/v2/namespaces/MY-HELIX-NAMESPACE/metadata-store-realms/zk-1
* @param hostname hostname for the REST server. E.g.) "localhost"
* @param port port to use. E.g.) 11000
* @param namespace the Helix REST namespace to mock. E.g.) "MY-HELIX-NAMESPACE"
* @param routingData <ZK realm, List of ZK path sharding keys>
*/
public MockMetadataStoreDirectoryServer(String hostname, int port, String namespace,
Map<String, Collection<String>> routingData) {
if (hostname == null || hostname.isEmpty()) {
throw new IllegalArgumentException("hostname cannot be null or empty!");
}
if (port < 0 || port > 65535) {
throw new IllegalArgumentException("port is not a valid port!");
}
if (namespace == null || namespace.isEmpty()) {
throw new IllegalArgumentException("namespace cannot be null or empty!");
}
if (routingData == null || routingData.isEmpty()) {
throw new IllegalArgumentException("routingData cannot be null or empty!");
}
_hostname = hostname;
_mockServerPort = port;
_namespace = namespace;
_routingDataMap = routingData;
}
public void startServer() throws IOException {
_server = HttpServer.create(new InetSocketAddress(_hostname, _mockServerPort), 0);
generateContexts();
_server.setExecutor(_executor);
_server.start();
LOG.info(
"Started MockMetadataStoreDirectoryServer at " + _hostname + ":" + _mockServerPort + "!");
}
public void stopServer() {
if (_server != null) {
_server.stop(STOP_WAIT_SEC);
}
_executor.shutdown();
LOG.info(
"Stopped MockMetadataStoreDirectoryServer at " + _hostname + ":" + _mockServerPort + "!");
}
public String getEndpoint() {
return "http://" + _hostname + ":" + _mockServerPort + REST_PREFIX + _namespace;
}
/**
* Dynamically generates HTTP server contexts based on the routing data given.
*/
private void generateContexts() {
// Get all routing data endpoint
// Get the result to be in the MetadataStoreShardingKeysByRealm format
List<Map<String, Object>> result = _routingDataMap.entrySet().stream().map(entry -> ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, entry.getKey(),
MetadataStoreRoutingConstants.SHARDING_KEYS, entry.getValue()))
.collect(Collectors.toList());
_server.createContext(
REST_PREFIX + _namespace + MetadataStoreRoutingConstants.MSDS_GET_ALL_ROUTING_DATA_ENDPOINT,
createHttpHandler(ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_NAMESPACE, _namespace,
MetadataStoreRoutingConstants.ROUTING_DATA, result)));
// Get all realms endpoint
_server.createContext(REST_PREFIX + _namespace + ZK_REALM_ENDPOINT, createHttpHandler(
ImmutableMap
.of(MetadataStoreRoutingConstants.METADATA_STORE_REALMS, _routingDataMap.keySet())));
// Get all sharding keys for a realm endpoint
_routingDataMap.forEach((zkRealm, shardingKeyList) -> _server
.createContext(REST_PREFIX + _namespace + ZK_REALM_ENDPOINT + "/" + zkRealm,
createHttpHandler(ImmutableMap
.of(MetadataStoreRoutingConstants.SINGLE_METADATA_STORE_REALM, zkRealm,
MetadataStoreRoutingConstants.SHARDING_KEYS, shardingKeyList))));
}
private HttpHandler createHttpHandler(Map<String, Object> keyValuePairs) {
return httpExchange -> {
OutputStream outputStream = httpExchange.getResponseBody();
String htmlResponse;
if (SupportedHttpVerbs.GET.name().equals(httpExchange.getRequestMethod())) {
htmlResponse = OBJECT_MAPPER.writeValueAsString(keyValuePairs);
httpExchange.sendResponseHeaders(OK, htmlResponse.length());
} else {
htmlResponse = httpExchange.getRequestMethod() + " is not supported!\n";
httpExchange.sendResponseHeaders(NOT_IMPLEMENTED, htmlResponse.length());
}
outputStream.write(htmlResponse.getBytes());
outputStream.flush();
outputStream.close();
};
}
}
| 9,168 |
0 |
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon/constant/MetadataStoreRoutingConstants.java
|
package org.apache.helix.msdcommon.constant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class MetadataStoreRoutingConstants {
public static final String ROUTING_DATA_PATH = "/METADATA_STORE_ROUTING_DATA";
public static final String ROUTING_ZK_ADDRESS_KEY = "ROUTING_ZK_ADDRESS";
// For ZK only
public static final String ZNRECORD_LIST_FIELD_KEY = "ZK_PATH_SHARDING_KEYS";
// Leader election ZNode for ZkRoutingDataWriter
public static final String LEADER_ELECTION_ZNODE = "/_ZK_ROUTING_DATA_WRITER_LEADER";
/** Field name in JSON REST response of getting all metadata store namespaces. */
public static final String METADATA_STORE_NAMESPACES = "namespaces";
/** Field name in JSON REST response of getting all sharding keys in a single namespace. */
public static final String SINGLE_METADATA_STORE_NAMESPACE = "namespace";
/** Field name in JSON REST response of getting metadata store realms in one namespace. */
public static final String METADATA_STORE_REALMS = "realms";
/** Field name in JSON REST response of getting sharding keys in one realm. */
public static final String SINGLE_METADATA_STORE_REALM = "realm";
/** Field name in JSON REST response of getting sharding keys. */
public static final String SHARDING_KEYS = "shardingKeys";
/** Field name in JSON REST response of getting routing data. */
public static final String ROUTING_DATA = "routingData";
/** Field name in JSON REST response related to one single sharding key. */
public static final String SINGLE_SHARDING_KEY = "shardingKey";
/**
* Field name in JSON response of the REST endpoint getting sharding keys with prefix:
* "GET /sharding-keys?prefix={prefix}"
* It is used in below response as an example:
* {
* "prefix": "/sharding/key",
* "shardingKeys": [{
* "realm": "testRealm2",
* "shardingKey": "/sharding/key/1/f"
* }]
* }
*/
public static final String SHARDING_KEY_PATH_PREFIX = "prefix";
// System Property Metadata Store Directory Server endpoint key
public static final String MSDS_SERVER_ENDPOINT_KEY = "metadataStoreDirectoryServerEndpoint";
// Prefix to MSDS resource endpoints
public static final String MSDS_NAMESPACES_URL_PREFIX = "/namespaces";
// MSDS resource getAllRealms endpoint string
public static final String MSDS_GET_ALL_REALMS_ENDPOINT = "/metadata-store-realms";
// MSDS resource get all routing data endpoint string
public static final String MSDS_GET_ALL_ROUTING_DATA_ENDPOINT = "/routing-data";
// MSDS resource get all sharding keys endpoint string
public static final String MSDS_GET_ALL_SHARDING_KEYS_ENDPOINT = "/sharding-keys";
// The key for system properties that contains the hostname of the
// MetadataStoreDirectoryService server instance
public static final String MSDS_SERVER_HOSTNAME_KEY = "msds_hostname";
// The key for system properties that contains the port of the
// MetadataStoreDirectoryService server instance
public static final String MSDS_SERVER_PORT_KEY = "msds_port";
// This is added for helix-rest 2.0. For example, without this value, the url will be
// "localhost:9998"; with this value, the url will be "localhost:9998/admin/v2" if this
// value is "/admin/v2".
public static final String MSDS_CONTEXT_URL_PREFIX_KEY = "msds_context_url_prefix";
}
| 9,169 |
0 |
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon/callback/RoutingDataListener.java
|
package org.apache.helix.msdcommon.callback;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public interface RoutingDataListener {
/**
* Callback for updating the internally-cached routing data.
*/
void refreshRoutingData(String namespace);
}
| 9,170 |
0 |
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon/datamodel/MetadataStoreRoutingData.java
|
package org.apache.helix.msdcommon.datamodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import java.util.NoSuchElementException;
public interface MetadataStoreRoutingData {
/**
* Given a path, return all the "metadata store sharding key-metadata store realm address" pairs
* where the sharding keys contain the given path. For example, given "/a/b", return {"/a/b/c":
* "realm.address.c.com:1234", "/a/b/d": "realm.address.d.com:1234"} where "a/b/c" and "a/b/d" are
* sharding keys and the urls are realm addresses. If the path is invalid, returns an empty
* mapping.
* @param path - the path where the search is conducted
* @return all "sharding key-realm address" pairs where the sharding keys contain the given
* path if the path is valid; empty mapping otherwise
* @throws IllegalArgumentException - when the path is invalid
*/
Map<String, String> getAllMappingUnderPath(String path) throws IllegalArgumentException;
/**
* Given a path, return the realm address corresponding to the sharding key contained in the
* path. If the path doesn't contain a sharding key, throw NoSuchElementException.
* @param path - the path where the search is conducted
* @return the realm address corresponding to the sharding key contained in the path
* @throws IllegalArgumentException - when the path is invalid
* @throws NoSuchElementException - when the path doesn't contain a sharding key
*/
String getMetadataStoreRealm(String path) throws IllegalArgumentException, NoSuchElementException;
/**
* Given a path, return the sharding key contained in the path. If the path doesn't contain a
* sharding key, throw NoSuchElementException.
* @param path - the path that may contain a sharding key
* @return the sharding key contained in the path
* @throws IllegalArgumentException - when the path is invalid
* @throws NoSuchElementException - when the path doesn't contain a sharding key
*/
String getShardingKeyInPath(String path) throws IllegalArgumentException, NoSuchElementException;
/**
* Check if the provided sharding key can be inserted to the routing data. The insertion is
* invalid if: 1. the sharding key is a parent key to an existing sharding key; 2. the sharding
* key has a parent key that is an existing sharding key; 3. the sharding key already exists. In
* any of these cases, inserting the sharding key will cause ambiguity among 2 sharding keys,
* rendering the routing data invalid.
* @param shardingKey - the sharding key to be inserted
* @return true if the sharding key could be inserted, false otherwise
*/
boolean isShardingKeyInsertionValid(String shardingKey);
/**
* Check if the provided sharding key and realm address pair exists in the routing data.
* @param shardingKey - the sharding key checked
* @param realmAddress - the realm address corresponding to the key
* @return true if the sharding key and realm address pair exist in the routing data
*/
boolean containsKeyRealmPair(String shardingKey, String realmAddress);
}
| 9,171 |
0 |
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon/datamodel/TrieRoutingData.java
|
package org.apache.helix.msdcommon.datamodel;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayDeque;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.msdcommon.util.ZkValidationUtil;
/**
* This is a class that uses a data structure similar to trie to represent metadata store routing
* data. It is not exactly a trie because it in essence stores a mapping (from sharding keys to
* realm addresses) instead of pure text information; also, only the terminal nodes store meaningful
* information (realm addresses).
*/
public class TrieRoutingData implements MetadataStoreRoutingData {
private static final String DELIMITER = "/";
private final TrieNode _rootNode;
public TrieRoutingData(Map<String, List<String>> routingData)
throws InvalidRoutingDataException {
if (routingData == null || routingData.isEmpty()) {
throw new InvalidRoutingDataException("routingData cannot be null or empty");
}
if (!containsShardingKey(routingData)) {
throw new InvalidRoutingDataException("routingData needs at least 1 sharding key");
}
if (isRootShardingKey(routingData)) {
Map.Entry<String, List<String>> entry = routingData.entrySet().iterator().next();
_rootNode = new TrieNode(Collections.emptyMap(), "/", true, entry.getKey());
} else {
_rootNode = new TrieNode(new HashMap<>(), "/", false, "");
constructTrie(routingData);
}
}
public Map<String, String> getAllMappingUnderPath(String path)
throws IllegalArgumentException {
if (!ZkValidationUtil.isPathValid(path)) {
throw new IllegalArgumentException("Provided path is not a valid Zookeeper path: " + path);
}
TrieNode curNode = getLongestPrefixNodeAlongPath(path);
if (!curNode.getPath().equals(path)) {
return Collections.emptyMap();
}
Map<String, String> resultMap = new HashMap<>();
Deque<TrieNode> nodeStack = new ArrayDeque<>();
nodeStack.push(curNode);
while (!nodeStack.isEmpty()) {
curNode = nodeStack.pop();
if (curNode.isShardingKey()) {
resultMap.put(curNode.getPath(), curNode.getRealmAddress());
} else {
for (TrieNode child : curNode.getChildren().values()) {
nodeStack.push(child);
}
}
}
return resultMap;
}
public String getMetadataStoreRealm(String path)
throws IllegalArgumentException, NoSuchElementException {
if (!ZkValidationUtil.isPathValid(path)) {
throw new IllegalArgumentException("Provided path is not a valid Zookeeper path: " + path);
}
TrieNode node = getLongestPrefixNodeAlongPath(path);
if (!node.isShardingKey()) {
throw new NoSuchElementException(
"No sharding key found within the provided path. Path: " + path);
}
return node.getRealmAddress();
}
public String getShardingKeyInPath(String path)
throws IllegalArgumentException, NoSuchElementException {
if (!ZkValidationUtil.isPathValid(path)) {
throw new IllegalArgumentException("Provided path is not a valid Zookeeper path: " + path);
}
TrieNode node = getLongestPrefixNodeAlongPath(path);
if (!node.isShardingKey()) {
throw new NoSuchElementException(
"No sharding key found within the provided path. Path: " + path);
}
return node.getPath();
}
public boolean isShardingKeyInsertionValid(String shardingKey) {
if (!ZkValidationUtil.isPathValid(shardingKey)) {
throw new IllegalArgumentException(
"Provided shardingKey is not a valid Zookeeper path: " + shardingKey);
}
TrieNode node = getLongestPrefixNodeAlongPath(shardingKey);
return !node.isShardingKey() && !node.getPath().equals(shardingKey);
}
public boolean containsKeyRealmPair(String shardingKey, String realmAddress) {
if (!ZkValidationUtil.isPathValid(shardingKey)) {
throw new IllegalArgumentException(
"Provided shardingKey is not a valid Zookeeper path: " + shardingKey);
}
TrieNode node = getLongestPrefixNodeAlongPath(shardingKey);
return node.getPath().equals(shardingKey) && node.getRealmAddress().equals(realmAddress);
}
/*
* Given a path, find a trie node that represents the longest prefix of the path. For example,
* given "/a/b/c", the method starts at "/", and attempts to reach "/a", then attempts to reach
* "/a/b", then ends on "/a/b/c"; if any of the node doesn't exist, the traversal terminates and
* the last seen existing node is returned.
* Note:
* 1. When the returned TrieNode is a sharding key, it is the only sharding key along the
* provided path (the path points to this sharding key);
* 2. When the returned TrieNode is not a sharding key but it represents the provided path, the
* provided path is a prefix(parent) to a sharding key;
* 3. When the returned TrieNode is not a sharding key and it does not represent the provided
* path (meaning the traversal ended before the last node of the path is reached), the provided
* path is not associated with any sharding key and can be added as a sharding key without
* creating ambiguity cases among sharding keys.
* @param path - the path where the search is conducted
* @return a TrieNode that represents the longest prefix of the path
*/
private TrieNode getLongestPrefixNodeAlongPath(String path) {
if (path.equals(DELIMITER)) {
return _rootNode;
}
TrieNode curNode = _rootNode;
TrieNode nextNode;
for (String pathSection : path.substring(1).split(DELIMITER, 0)) {
nextNode = curNode.getChildren().get(pathSection);
if (nextNode == null) {
return curNode;
}
curNode = nextNode;
}
return curNode;
}
/*
* Checks if there is any sharding key in the routing data
* @param routingData - a mapping from "sharding keys" to "realm addresses" to be parsed into a
* trie
* @return whether there is any sharding key
*/
private boolean containsShardingKey(Map<String, List<String>> routingData) {
for (Map.Entry<String, List<String>> entry : routingData.entrySet()) {
if (entry.getValue().size() > 0) {
return true;
}
}
return false;
}
/*
* Checks for the edge case when the only sharding key in provided routing data is the delimiter.
* When this is the case, the trie is valid and contains only one node, which
* is the root node, and the root node is a leaf node with a realm address associated with it.
* @param routingData - a mapping from "sharding keys" to "realm addresses" to be parsed into a
* trie
* @return whether the edge case is true
*/
private boolean isRootShardingKey(Map<String, List<String>> routingData) {
if (routingData.size() == 1) {
for (List<String> shardingKeys : routingData.values()) {
return shardingKeys.size() == 1 && shardingKeys.get(0).equals(DELIMITER);
}
}
return false;
}
/*
* Constructs a trie based on the provided routing data. It loops through all sharding keys and
* constructs the trie in a top down manner.
* @param routingData- a mapping from "sharding keys" to "realm addresses" to be parsed into a
* trie
* @throws InvalidRoutingDataException - when there is an empty sharding key (edge case that
* always renders the routing data invalid); when there is a sharding key which already
* contains a sharding key (invalid); when there is a sharding key that is a part of
* another sharding key (invalid); when a sharding key doesn't have a leading delimiter
*/
private void constructTrie(Map<String, List<String>> routingData)
throws InvalidRoutingDataException {
for (Map.Entry<String, List<String>> entry : routingData.entrySet()) {
for (String shardingKey : entry.getValue()) {
// Missing leading delimiter is invalid
if (!ZkValidationUtil.isPathValid(shardingKey)) {
throw new InvalidRoutingDataException(
"Sharding key is not a valid Zookeeper path: " + shardingKey);
}
// Root can only be a sharding key if it's the only sharding key. Since this method is
// running, the special case has already been checked, therefore it's definitely invalid
if (shardingKey.equals(DELIMITER)) {
throw new InvalidRoutingDataException(
"There exist other sharding keys. Root cannot be a sharding key.");
}
// Locate the next delimiter
int nextDelimiterIndex = shardingKey.indexOf(DELIMITER, 1);
int prevDelimiterIndex = 0;
String keySection = shardingKey.substring(prevDelimiterIndex + 1,
nextDelimiterIndex > 0 ? nextDelimiterIndex : shardingKey.length());
TrieNode curNode = _rootNode;
TrieNode nextNode = curNode.getChildren().get(keySection);
// If the key section is not the last section yet, go in the loop; if the key section is the
// last section, exit
while (nextDelimiterIndex > 0) {
// If the node is already a leaf node, the current sharding key is invalid; if the node
// doesn't exist, construct a node and continue
if (nextNode != null && nextNode.isShardingKey()) {
throw new InvalidRoutingDataException(
shardingKey + " cannot be a sharding key because " + shardingKey
.substring(0, nextDelimiterIndex)
+ " is its parent key and is also a sharding key.");
} else if (nextNode == null) {
nextNode =
new TrieNode(new HashMap<>(), shardingKey.substring(0, nextDelimiterIndex), false,
"");
curNode.addChild(keySection, nextNode);
}
prevDelimiterIndex = nextDelimiterIndex;
nextDelimiterIndex = shardingKey.indexOf(DELIMITER, prevDelimiterIndex + 1);
keySection = shardingKey.substring(prevDelimiterIndex + 1,
nextDelimiterIndex > 0 ? nextDelimiterIndex : shardingKey.length());
curNode = nextNode;
nextNode = curNode.getChildren().get(keySection);
}
// If the last node already exists, it's a part of another sharding key, making the current
// sharding key invalid
if (nextNode != null) {
throw new InvalidRoutingDataException(shardingKey
+ " cannot be a sharding key because it is a parent key to another sharding key.");
}
nextNode = new TrieNode(new HashMap<>(), shardingKey, true, entry.getKey());
curNode.addChild(keySection, nextNode);
}
}
}
private static class TrieNode {
/*
* This field is a mapping between trie key and children nodes. For example, node "a" has
* children "ab" and "ac", therefore the keys are "b" and "c" respectively.
*/
private Map<String, TrieNode> _children;
/*
* This field states whether the path represented by the node is a sharding key
*/
private final boolean _isShardingKey;
/*
* This field contains the complete path/prefix leading to the current node. For example, the
* name of root node is "/", then the name of its child node
* is "/a", and the name of the child's child node is "/a/b".
*/
private final String _path;
/*
* This field represents the data contained in a node(which represents a path), and is only
* available to the terminal nodes.
*/
private final String _realmAddress;
TrieNode(Map<String, TrieNode> children, String path, boolean isShardingKey,
String realmAddress) {
_children = children;
_isShardingKey = isShardingKey;
_path = path;
_realmAddress = realmAddress;
}
public Map<String, TrieNode> getChildren() {
return _children;
}
public boolean isShardingKey() {
return _isShardingKey;
}
public String getPath() {
return _path;
}
public String getRealmAddress() {
return _realmAddress;
}
public void addChild(String key, TrieNode node) {
_children.put(key, node);
}
}
}
| 9,172 |
0 |
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon
|
Create_ds/helix/metadata-store-directory-common/src/main/java/org/apache/helix/msdcommon/exception/InvalidRoutingDataException.java
|
package org.apache.helix.msdcommon.exception;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This exception is thrown by MetadataStoreRoutingDataAccessor when the routing data it's trying to
* access is malformed and is there invalid.
*/
public class InvalidRoutingDataException extends Exception {
public InvalidRoutingDataException(String info) {
super(info);
}
}
| 9,173 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/TestHelixAdminScenariosRest.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.ClusterDistributedController;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.manager.zk.ZKUtil;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.IdealStateProperty;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.tools.ClusterStateVerifier.BestPossAndExtViewZkVerifier;
import org.apache.helix.tools.ClusterStateVerifier.MasterNbInExtViewVerifier;
import org.apache.helix.webapp.resources.ClusterRepresentationUtil;
import org.apache.helix.webapp.resources.InstancesResource.ListInstancesWrapper;
import org.apache.helix.webapp.resources.JsonParameters;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.restlet.Component;
import org.restlet.Request;
import org.restlet.Response;
import org.restlet.data.MediaType;
import org.restlet.data.Method;
import org.restlet.data.Reference;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Simulate all the admin tasks needed by using command line tool
*/
public class TestHelixAdminScenariosRest extends AdminTestBase {
private static final int MAX_RETRIES = 5;
RestAdminApplication _adminApp;
Component _component;
String _tag1 = "tag1123";
String _tag2 = "tag212334";
public static String ObjectToJson(Object object)
throws JsonGenerationException, JsonMappingException, IOException {
ObjectMapper mapper = new ObjectMapper();
mapper.enable(SerializationFeature.INDENT_OUTPUT);
StringWriter sw = new StringWriter();
mapper.writeValue(sw, object);
return sw.toString();
}
public static <T extends Object> T JsonToObject(Class<T> clazz, String jsonString)
throws JsonParseException, JsonMappingException, IOException {
StringReader sr = new StringReader(jsonString);
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(sr, clazz);
}
static String assertSuccessPostOperation(String url, Map<String, String> jsonParameters,
boolean hasException) throws IOException {
return assertSuccessPostOperation(url, jsonParameters, null, hasException);
}
static String assertSuccessPostOperation(String url, Map<String, String> jsonParameters,
Map<String, String> extraForm, boolean hasException) throws IOException {
Reference resourceRef = new Reference(url);
int numRetries = 0;
while (numRetries <= MAX_RETRIES) {
Request request = new Request(Method.POST, resourceRef);
if (extraForm != null) {
String entity =
JsonParameters.JSON_PARAMETERS + "="
+ ClusterRepresentationUtil.ObjectToJson(jsonParameters);
for (String key : extraForm.keySet()) {
entity = entity + "&" + (key + "=" + extraForm.get(key));
}
request.setEntity(entity, MediaType.APPLICATION_ALL);
} else {
request
.setEntity(
JsonParameters.JSON_PARAMETERS + "="
+ ClusterRepresentationUtil.ObjectToJson(jsonParameters),
MediaType.APPLICATION_ALL);
}
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
if (result != null) {
result.write(sw);
}
int code = response.getStatus().getCode();
boolean successCode =
code == Status.SUCCESS_NO_CONTENT.getCode() || code == Status.SUCCESS_OK.getCode();
if (successCode || numRetries == MAX_RETRIES) {
Assert.assertTrue(successCode);
Assert.assertTrue(hasException == sw.toString().toLowerCase().contains("exception"));
return sw.toString();
}
numRetries++;
}
Assert.fail("Request failed after all retries");
return null;
}
void deleteUrl(String url, boolean hasException) throws IOException {
Reference resourceRef = new Reference(url);
Request request = new Request(Method.DELETE, resourceRef);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
Assert.assertTrue(hasException == sw.toString().toLowerCase().contains("exception"));
}
String getUrl(String url) throws IOException {
Reference resourceRef = new Reference(url);
Request request = new Request(Method.GET, resourceRef);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
return sw.toString();
}
String getClusterUrl(String cluster) {
return "http://localhost:" + ADMIN_PORT + "/clusters" + "/" + cluster;
}
String getInstanceUrl(String cluster, String instance) {
return "http://localhost:" + ADMIN_PORT + "/clusters/" + cluster + "/instances/" + instance;
}
String getResourceUrl(String cluster, String resourceGroup) {
return "http://localhost:" + ADMIN_PORT + "/clusters/" + cluster + "/resourceGroups/"
+ resourceGroup;
}
void assertClusterSetupException(String command) {
boolean exceptionThrown = false;
try {
ClusterSetup.processCommandLineArgs(command.split(" "));
} catch (Exception e) {
exceptionThrown = true;
}
Assert.assertTrue(exceptionThrown);
}
private Map<String, String> addClusterCmd(String clusterName) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.CLUSTER_NAME, clusterName);
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addCluster);
return parameters;
}
private void addCluster(String clusterName) throws IOException {
String url = "http://localhost:" + ADMIN_PORT + "/clusters";
String response = assertSuccessPostOperation(url, addClusterCmd(clusterName), false);
Assert.assertTrue(response.contains(clusterName));
}
@Test
public void testAddCluster() throws Exception {
String url = "http://localhost:" + ADMIN_PORT + "/clusters";
// Normal add
String response = assertSuccessPostOperation(url, addClusterCmd("clusterTest"), false);
Assert.assertTrue(response.contains("clusterTest"));
// malformed cluster name
response = assertSuccessPostOperation(url, addClusterCmd("/ClusterTest"), true);
// Add the grand cluster
response = assertSuccessPostOperation(url, addClusterCmd("Klazt3rz"), false);
Assert.assertTrue(response.contains("Klazt3rz"));
response = assertSuccessPostOperation(url, addClusterCmd("\\ClusterTest"), false);
Assert.assertTrue(response.contains("\\ClusterTest"));
// Add already exist cluster
response = assertSuccessPostOperation(url, addClusterCmd("clusterTest"), false);
// delete cluster without resource and instance
Assert.assertTrue(ZKUtil.isClusterSetup("Klazt3rz", _gZkClient));
Assert.assertTrue(ZKUtil.isClusterSetup("clusterTest", _gZkClient));
Assert.assertTrue(ZKUtil.isClusterSetup("\\ClusterTest", _gZkClient));
String clusterUrl = getClusterUrl("\\ClusterTest");
deleteUrl(clusterUrl, false);
String clustersUrl = "http://localhost:" + ADMIN_PORT + "/clusters";
response = getUrl(clustersUrl);
clusterUrl = getClusterUrl("clusterTest1");
deleteUrl(clusterUrl, false);
response = getUrl(clustersUrl);
Assert.assertFalse(response.contains("clusterTest1"));
clusterUrl = getClusterUrl("clusterTest");
deleteUrl(clusterUrl, false);
response = getUrl(clustersUrl);
Assert.assertFalse(response.contains("clusterTest"));
clusterUrl = getClusterUrl("clusterTestOK");
deleteUrl(clusterUrl, false);
Assert.assertFalse(_gZkClient.exists("/clusterTest"));
Assert.assertFalse(_gZkClient.exists("/clusterTest1"));
Assert.assertFalse(_gZkClient.exists("/clusterTestOK"));
response = assertSuccessPostOperation(url, addClusterCmd("clusterTest1"), false);
response = getUrl(clustersUrl);
Assert.assertTrue(response.contains("clusterTest1"));
}
private Map<String, String> addResourceCmd(String resourceName, String stateModelDef,
int partition) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.RESOURCE_GROUP_NAME, resourceName);
parameters.put(JsonParameters.STATE_MODEL_DEF_REF, stateModelDef);
parameters.put(JsonParameters.PARTITIONS, "" + partition);
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addResource);
return parameters;
}
private void addResource(String clusterName, String resourceName, int partitions)
throws IOException {
final String reourcesUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups";
String response =
assertSuccessPostOperation(reourcesUrl,
addResourceCmd(resourceName, "MasterSlave", partitions), false);
Assert.assertTrue(response.contains(resourceName));
}
@Test
public void testAddResource() throws Exception {
final String clusterName = "clusterTestAddResource";
addCluster(clusterName);
String reourcesUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups";
String response =
assertSuccessPostOperation(reourcesUrl, addResourceCmd("db_22", "MasterSlave", 144), false);
Assert.assertTrue(response.contains("db_22"));
response =
assertSuccessPostOperation(reourcesUrl, addResourceCmd("db_11", "MasterSlave", 44), false);
Assert.assertTrue(response.contains("db_11"));
// Add duplicate resource
response =
assertSuccessPostOperation(reourcesUrl, addResourceCmd("db_22", "OnlineOffline", 55), true);
// drop resource now
String resourceUrl = getResourceUrl(clusterName, "db_11");
deleteUrl(resourceUrl, false);
Assert.assertFalse(_gZkClient.exists("/" + clusterName + "/IDEALSTATES/db_11"));
response =
assertSuccessPostOperation(reourcesUrl, addResourceCmd("db_11", "MasterSlave", 44), false);
Assert.assertTrue(response.contains("db_11"));
Assert.assertTrue(_gZkClient.exists("/" + clusterName + "/IDEALSTATES/db_11"));
response =
assertSuccessPostOperation(reourcesUrl, addResourceCmd("db_33", "MasterSlave", 44), false);
Assert.assertTrue(response.contains("db_33"));
response =
assertSuccessPostOperation(reourcesUrl, addResourceCmd("db_44", "MasterSlave", 44), false);
Assert.assertTrue(response.contains("db_44"));
}
private Map<String, String> activateClusterCmd(String grandClusterName, boolean enabled) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.GRAND_CLUSTER, grandClusterName);
parameters.put(JsonParameters.ENABLED, "" + enabled);
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.activateCluster);
return parameters;
}
@Test
public void testDeactivateCluster() throws Exception {
final String clusterName = "clusterTestDeactivateCluster";
final String controllerClusterName = "controllerClusterTestDeactivateCluster";
Map<String, MockParticipantManager> participants =
new HashMap<String, MockParticipantManager>();
Map<String, ClusterDistributedController> distControllers =
new HashMap<String, ClusterDistributedController>();
// setup cluster
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 6, null);
addResource(clusterName, "db_11", 16);
rebalanceResource(clusterName, "db_11");
addCluster(controllerClusterName);
addInstancesToCluster(controllerClusterName, "controller_900", 2, null);
// start mock nodes
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
// start controller nodes
for (int i = 0; i < 2; i++) {
String controllerName = "controller_900" + i;
ClusterDistributedController distController =
new ClusterDistributedController(ZK_ADDR, controllerClusterName, controllerName);
distController.syncStart();
distControllers.put(controllerName, distController);
}
String clusterUrl = getClusterUrl(clusterName);
// activate cluster
assertSuccessPostOperation(clusterUrl, activateClusterCmd(controllerClusterName, true), false);
boolean verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
controllerClusterName));
Assert.assertTrue(verifyResult);
verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
// deactivate cluster
assertSuccessPostOperation(clusterUrl, activateClusterCmd(controllerClusterName, false), false);
Thread.sleep(6000);
Assert.assertFalse(_gZkClient.exists("/" + controllerClusterName + "/IDEALSTATES/"
+ clusterName));
HelixDataAccessor accessor = participants.get("localhost_1231").getHelixDataAccessor();
String path = accessor.keyBuilder().controllerLeader().getPath();
Assert.assertFalse(_gZkClient.exists(path));
deleteUrl(clusterUrl, true);
Assert.assertTrue(_gZkClient.exists("/" + clusterName));
// leader node should be gone
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
deleteUrl(clusterUrl, false);
Assert.assertFalse(_gZkClient.exists("/" + clusterName));
// clean up
for (ClusterDistributedController controller : distControllers.values()) {
controller.syncStop();
}
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
private Map<String, String> addIdealStateCmd() {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addIdealState);
return parameters;
}
@Test
public void testDropAddResource() throws Exception {
final String clusterName = "clusterTestDropAddResource";
// setup cluster
addCluster(clusterName);
addResource(clusterName, "db_11", 22);
addInstancesToCluster(clusterName, "localhost_123", 6, null);
rebalanceResource(clusterName, "db_11");
ZNRecord record =
_gSetupTool.getClusterManagementTool().getResourceIdealState(clusterName, "db_11")
.getRecord();
String x = ObjectToJson(record);
FileWriter fos = new FileWriter("/tmp/temp.log");
PrintWriter pw = new PrintWriter(fos);
pw.write(x);
pw.close();
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_9900");
controller.syncStart();
// start mock nodes
Map<String, MockParticipantManager> participants =
new HashMap<String, MockParticipantManager>();
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
boolean verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
String resourceUrl = getResourceUrl(clusterName, "db_11");
deleteUrl(resourceUrl, false);
verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
addResource(clusterName, "db_11", 22);
String idealStateUrl = getResourceUrl(clusterName, "db_11") + "/idealState";
Map<String, String> extraform = new HashMap<String, String>();
extraform.put(JsonParameters.NEW_IDEAL_STATE, x);
assertSuccessPostOperation(idealStateUrl, addIdealStateCmd(), extraform, false);
verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
ZNRecord record2 =
_gSetupTool.getClusterManagementTool().getResourceIdealState(clusterName, "db_11")
.getRecord();
Assert.assertTrue(record2.equals(record));
// clean up
controller.syncStop();
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
private Map<String, String> addInstanceCmd(String instances) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.INSTANCE_NAMES, instances);
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstance);
return parameters;
}
private Map<String, String> expandClusterCmd() {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.expandCluster);
return parameters;
}
@Test
public void testExpandCluster() throws Exception {
final String clusterName = "clusterTestExpandCluster";
// setup cluster
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 6, null);
addResource(clusterName, "db_11", 22);
rebalanceResource(clusterName, "db_11");
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_9900");
controller.syncStart();
// start mock nodes
Map<String, MockParticipantManager> participants =
new HashMap<String, MockParticipantManager>();
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
boolean verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
String clusterUrl = getClusterUrl(clusterName);
String instancesUrl = clusterUrl + "/instances";
String instances = "localhost:12331;localhost:12341;localhost:12351;localhost:12361";
String response = assertSuccessPostOperation(instancesUrl, addInstanceCmd(instances), false);
String[] hosts = instances.split(";");
for (String host : hosts) {
Assert.assertTrue(response.contains(host.replace(':', '_')));
}
response = assertSuccessPostOperation(clusterUrl, expandClusterCmd(), false);
for (int i = 3; i <= 6; i++) {
String instanceName = "localhost_123" + i + "1";
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
verifyResult =
ClusterStateVerifier
.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
// clean up
controller.syncStop();
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
private Map<String, String> enablePartitionCmd(String resourceName, String partitions,
boolean enabled) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.enablePartition);
parameters.put(JsonParameters.ENABLED, "" + enabled);
parameters.put(JsonParameters.PARTITION, partitions);
parameters.put(JsonParameters.RESOURCE, resourceName);
return parameters;
}
@Test
public void testEnablePartitions() throws IOException, InterruptedException {
final String clusterName = "clusterTestEnablePartitions";
// setup cluster
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 6, null);
addResource(clusterName, "db_11", 22);
rebalanceResource(clusterName, "db_11");
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_9900");
controller.syncStart();
// start mock nodes
Map<String, MockParticipantManager> participants =
new HashMap<String, MockParticipantManager>();
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
HelixDataAccessor accessor = participants.get("localhost_1231").getHelixDataAccessor();
// drop node should fail as not disabled
String hostName = "localhost_1231";
String instanceUrl = getInstanceUrl(clusterName, hostName);
ExternalView ev = accessor.getProperty(accessor.keyBuilder().externalView("db_11"));
String response =
assertSuccessPostOperation(instanceUrl,
enablePartitionCmd("db_11", "db_11_0;db_11_11", false), false);
Assert.assertTrue(response.contains("DISABLED_PARTITION"));
Assert.assertTrue(response.contains("db_11_0"));
Assert.assertTrue(response.contains("db_11_11"));
boolean verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
ev = accessor.getProperty(accessor.keyBuilder().externalView("db_11"));
Assert.assertEquals(ev.getStateMap("db_11_0").get(hostName), "OFFLINE");
Assert.assertEquals(ev.getStateMap("db_11_11").get(hostName), "OFFLINE");
response =
assertSuccessPostOperation(instanceUrl,
enablePartitionCmd("db_11", "db_11_0;db_11_11", true), false);
Assert.assertFalse(response.contains("db_11_0"));
Assert.assertFalse(response.contains("db_11_11"));
verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
ev = accessor.getProperty(accessor.keyBuilder().externalView("db_11"));
Assert.assertEquals(ev.getStateMap("db_11_0").get(hostName), "MASTER");
Assert.assertEquals(ev.getStateMap("db_11_11").get(hostName), "SLAVE");
// clean up
controller.syncStop();
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
private Map<String, String> enableInstanceCmd(boolean enabled) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.enableInstance);
parameters.put(JsonParameters.ENABLED, "" + enabled);
return parameters;
}
private Map<String, String> swapInstanceCmd(String oldInstance, String newInstance) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.swapInstance);
parameters.put(JsonParameters.OLD_INSTANCE, oldInstance);
parameters.put(JsonParameters.NEW_INSTANCE, newInstance);
return parameters;
}
@Test
public void testInstanceOperations() throws Exception {
final String clusterName = "clusterTestInstanceOperations";
// setup cluster
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 6, null);
addResource(clusterName, "db_11", 8);
rebalanceResource(clusterName, "db_11");
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_9900");
controller.syncStart();
// start mock nodes
Map<String, MockParticipantManager> participants =
new HashMap<String, MockParticipantManager>();
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
HelixDataAccessor accessor;
// drop node should fail as not disabled
String instanceUrl = getInstanceUrl(clusterName, "localhost_1232");
deleteUrl(instanceUrl, true);
// disabled node
String response = assertSuccessPostOperation(instanceUrl, enableInstanceCmd(false), false);
Assert.assertTrue(response.contains("false"));
// Cannot drop / swap
deleteUrl(instanceUrl, true);
String instancesUrl = getClusterUrl(clusterName) + "/instances";
response =
assertSuccessPostOperation(instancesUrl,
swapInstanceCmd("localhost_1232", "localhost_12320"), true);
// disconnect the node
participants.get("localhost_1232").syncStop();
// add new node then swap instance
response = assertSuccessPostOperation(instancesUrl, addInstanceCmd("localhost_12320"), false);
Assert.assertTrue(response.contains("localhost_12320"));
// swap instance. The instance get swapped out should not exist anymore
response =
assertSuccessPostOperation(instancesUrl,
swapInstanceCmd("localhost_1232", "localhost_12320"), false);
Assert.assertTrue(response.contains("localhost_12320"));
Assert.assertFalse(response.contains("localhost_1232\""));
accessor = participants.get("localhost_1231").getHelixDataAccessor();
String path = accessor.keyBuilder().instanceConfig("localhost_1232").getPath();
Assert.assertFalse(_gZkClient.exists(path));
MockParticipantManager newParticipant =
new MockParticipantManager(ZK_ADDR, clusterName, "localhost_12320");
newParticipant.syncStart();
participants.put("localhost_12320", newParticipant);
boolean verifyResult =
ClusterStateVerifier
.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
// clean up
controller.syncStop();
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
@Test
public void testStartCluster() throws Exception {
final String clusterName = "clusterTestStartCluster";
final String controllerClusterName = "controllerClusterTestStartCluster";
Map<String, MockParticipantManager> participants =
new HashMap<String, MockParticipantManager>();
Map<String, ClusterDistributedController> distControllers =
new HashMap<String, ClusterDistributedController>();
// setup cluster
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 6, null);
addResource(clusterName, "db_11", 8);
rebalanceResource(clusterName, "db_11");
addCluster(controllerClusterName);
addInstancesToCluster(controllerClusterName, "controller_900", 2, null);
// start mock nodes
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
// start controller nodes
for (int i = 0; i < 2; i++) {
String controllerName = "controller_900" + i;
ClusterDistributedController distController =
new ClusterDistributedController(ZK_ADDR, controllerClusterName, controllerName);
distController.syncStart();
distControllers.put(controllerName, distController);
}
Thread.sleep(100);
// activate clusters
// wrong grand clustername
String clusterUrl = getClusterUrl(clusterName);
assertSuccessPostOperation(clusterUrl, activateClusterCmd("nonExistCluster", true), true);
// wrong cluster name
clusterUrl = getClusterUrl("nonExistCluster");
assertSuccessPostOperation(clusterUrl, activateClusterCmd(controllerClusterName, true), true);
clusterUrl = getClusterUrl(clusterName);
assertSuccessPostOperation(clusterUrl, activateClusterCmd(controllerClusterName, true), false);
Thread.sleep(500);
deleteUrl(clusterUrl, true);
// verify leader node
HelixDataAccessor accessor = distControllers.get("controller_9001").getHelixDataAccessor();
LiveInstance controllerLeader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
Assert.assertTrue(controllerLeader.getInstanceName().startsWith("controller_900"));
accessor = participants.get("localhost_1232").getHelixDataAccessor();
LiveInstance leader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
for (int i = 0; i < 5; i++) {
if (leader != null) {
break;
}
Thread.sleep(1000);
leader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
}
Assert.assertTrue(leader.getInstanceName().startsWith("controller_900"));
boolean verifyResult =
ClusterStateVerifier
.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
verifyResult =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(verifyResult);
Thread.sleep(1000);
// clean up
for (ClusterDistributedController controller : distControllers.values()) {
controller.syncStop();
}
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
private Map<String, String> rebalanceCmd(int replicas, String prefix, String tag) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.REPLICAS, "" + replicas);
if (prefix != null) {
parameters.put(JsonParameters.RESOURCE_KEY_PREFIX, prefix);
}
if (tag != null) {
parameters.put(ClusterSetup.instanceGroupTag, tag);
}
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.rebalance);
return parameters;
}
private void rebalanceResource(String clusterName, String resourceName) throws IOException {
String resourceUrl = getResourceUrl(clusterName, resourceName);
String idealStateUrl = resourceUrl + "/idealState";
assertSuccessPostOperation(idealStateUrl, rebalanceCmd(3, null, null), false);
}
@Test
public void testRebalanceResource() throws Exception {
// add a normal cluster
final String clusterName = "clusterTestRebalanceResource";
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 3, _tag1);
addResource(clusterName, "db_11", 44);
String resourceUrl = getResourceUrl(clusterName, "db_11");
String idealStateUrl = resourceUrl + "/idealState";
String response = assertSuccessPostOperation(idealStateUrl, rebalanceCmd(3, null, null), false);
ZNRecord record = JsonToObject(ZNRecord.class, response);
Assert.assertTrue(record.getId().equalsIgnoreCase("db_11"));
Assert.assertEquals(record.getListField("db_11_0").size(), 3);
Assert.assertEquals(record.getMapField("db_11_0").size(), 3);
deleteUrl(resourceUrl, false);
// re-add and rebalance
final String reourcesUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups";
response = getUrl(reourcesUrl);
Assert.assertFalse(response.contains("db_11"));
addResource(clusterName, "db_11", 48);
idealStateUrl = resourceUrl + "/idealState";
response = assertSuccessPostOperation(idealStateUrl, rebalanceCmd(3, null, null), false);
record = JsonToObject(ZNRecord.class, response);
Assert.assertTrue(record.getId().equalsIgnoreCase("db_11"));
Assert.assertEquals(record.getListField("db_11_0").size(), 3);
Assert.assertEquals(record.getMapField("db_11_0").size(), 3);
// rebalance with key prefix
addResource(clusterName, "db_22", 55);
resourceUrl = getResourceUrl(clusterName, "db_22");
idealStateUrl = resourceUrl + "/idealState";
response = assertSuccessPostOperation(idealStateUrl, rebalanceCmd(2, "alias", null), false);
record = JsonToObject(ZNRecord.class, response);
Assert.assertTrue(record.getId().equalsIgnoreCase("db_22"));
Assert.assertEquals(record.getListField("alias_0").size(), 2);
Assert.assertEquals(record.getMapField("alias_0").size(), 2);
Assert.assertTrue((((String) (record.getMapFields().keySet().toArray()[0])))
.startsWith("alias_"));
Assert.assertFalse(response.contains(IdealStateProperty.INSTANCE_GROUP_TAG.toString()));
addResource(clusterName, "db_33", 44);
resourceUrl = getResourceUrl(clusterName, "db_33");
idealStateUrl = resourceUrl + "/idealState";
response = assertSuccessPostOperation(idealStateUrl, rebalanceCmd(2, null, _tag1), false);
Assert.assertTrue(response.contains(IdealStateProperty.INSTANCE_GROUP_TAG.toString()));
Assert.assertTrue(response.contains(_tag1));
for (int i = 0; i < 6; i++) {
String instance = "localhost_123" + i;
if (i < 3) {
Assert.assertTrue(response.contains(instance));
} else {
Assert.assertFalse(response.contains(instance));
}
}
addResource(clusterName, "db_44", 44);
resourceUrl = getResourceUrl(clusterName, "db_44");
idealStateUrl = resourceUrl + "/idealState";
response = assertSuccessPostOperation(idealStateUrl, rebalanceCmd(2, "alias", _tag1), false);
Assert.assertTrue(response.contains(IdealStateProperty.INSTANCE_GROUP_TAG.toString()));
Assert.assertTrue(response.contains(_tag1));
record = JsonToObject(ZNRecord.class, response);
Assert.assertTrue((((String) (record.getMapFields().keySet().toArray()[0])))
.startsWith("alias_"));
for (int i = 0; i < 6; i++) {
String instance = "localhost_123" + i;
if (i < 3) {
Assert.assertTrue(response.contains(instance));
} else {
Assert.assertFalse(response.contains(instance));
}
}
}
private void addInstancesToCluster(String clusterName, String instanceNamePrefix, int n,
String tag) throws IOException {
Map<String, String> parameters = new HashMap<String, String>();
final String clusterUrl = getClusterUrl(clusterName);
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstance);
// add instances to cluster
String instancesUrl = clusterUrl + "/instances";
for (int i = 0; i < n; i++) {
parameters.put(JsonParameters.INSTANCE_NAME, instanceNamePrefix + i);
String response = assertSuccessPostOperation(instancesUrl, parameters, false);
Assert.assertTrue(response.contains((instanceNamePrefix + i).replace(':', '_')));
}
// add tag to instance
if (tag != null && !tag.isEmpty()) {
parameters.clear();
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstanceTag);
parameters.put(ClusterSetup.instanceGroupTag, tag);
for (int i = 0; i < n; i++) {
String instanceUrl = instancesUrl + "/" + (instanceNamePrefix + i).replace(':', '_');
String response = assertSuccessPostOperation(instanceUrl, parameters, false);
Assert.assertTrue(response.contains(_tag1));
}
}
}
private Map<String, String> addInstanceTagCmd(String tag) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstanceTag);
parameters.put(ClusterSetup.instanceGroupTag, tag);
return parameters;
}
private Map<String, String> removeInstanceTagCmd(String tag) {
Map<String, String> parameters = new HashMap<String, String>();
parameters.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.removeInstanceTag);
parameters.put(ClusterSetup.instanceGroupTag, tag);
return parameters;
}
@Test
public void testAddInstance() throws Exception {
final String clusterName = "clusterTestAddInstance";
// add normal cluster
addCluster(clusterName);
String clusterUrl = getClusterUrl(clusterName);
// Add instances to cluster
String instancesUrl = clusterUrl + "/instances";
addInstancesToCluster(clusterName, "localhost:123", 3, null);
String instances = "localhost:1233;localhost:1234;localhost:1235;localhost:1236";
String response = assertSuccessPostOperation(instancesUrl, addInstanceCmd(instances), false);
for (int i = 3; i <= 6; i++) {
Assert.assertTrue(response.contains("localhost_123" + i));
}
// delete one node without disable
String instanceUrl = instancesUrl + "/localhost_1236";
deleteUrl(instanceUrl, true);
response = getUrl(instancesUrl);
Assert.assertTrue(response.contains("localhost_1236"));
// delete non-exist node
instanceUrl = instancesUrl + "/localhost_12367";
deleteUrl(instanceUrl, true);
response = getUrl(instancesUrl);
Assert.assertFalse(response.contains("localhost_12367"));
// disable node
instanceUrl = instancesUrl + "/localhost_1236";
response = assertSuccessPostOperation(instanceUrl, enableInstanceCmd(false), false);
Assert.assertTrue(response.contains("false"));
deleteUrl(instanceUrl, false);
// add controller cluster
final String controllerClusterName = "controllerClusterTestAddInstance";
addCluster(controllerClusterName);
// add node to controller cluster
String controllers = "controller:9000;controller:9001";
String controllerUrl = getClusterUrl(controllerClusterName) + "/instances";
response = assertSuccessPostOperation(controllerUrl, addInstanceCmd(controllers), false);
Assert.assertTrue(response.contains("controller_9000"));
Assert.assertTrue(response.contains("controller_9001"));
// add a duplicated host
response = assertSuccessPostOperation(instancesUrl, addInstanceCmd("localhost:1234"), true);
// add/remove tags
for (int i = 0; i < 4; i++) {
instanceUrl = instancesUrl + "/localhost_123" + i;
response = assertSuccessPostOperation(instanceUrl, addInstanceTagCmd(_tag1), false);
Assert.assertTrue(response.contains(_tag1));
}
instanceUrl = instancesUrl + "/localhost_1233";
response = assertSuccessPostOperation(instanceUrl, removeInstanceTagCmd(_tag1), false);
Assert.assertFalse(response.contains(_tag1));
}
@Test
public void testGetResources() throws IOException {
final String clusterName = "TestTagAwareness_testGetResources";
final String TAG = "tag";
final String URL_BASE =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups";
_gSetupTool.addCluster(clusterName, true);
HelixAdmin admin = _gSetupTool.getClusterManagementTool();
// Add a tagged resource
IdealState taggedResource = new IdealState("taggedResource");
taggedResource.setInstanceGroupTag(TAG);
taggedResource.setStateModelDefRef("OnlineOffline");
admin.addResource(clusterName, taggedResource.getId(), taggedResource);
// Add an untagged resource
IdealState untaggedResource = new IdealState("untaggedResource");
untaggedResource.setStateModelDefRef("OnlineOffline");
admin.addResource(clusterName, untaggedResource.getId(), untaggedResource);
// Now make a REST call for all resources
Reference resourceRef = new Reference(URL_BASE);
Request request = new Request(Method.GET, resourceRef);
Response response = _gClient.handle(request);
ZNRecord responseRecord =
ClusterRepresentationUtil.JsonToObject(ZNRecord.class, response.getEntityAsText());
// Ensure that the tagged resource has information and the untagged one doesn't
Assert.assertNotNull(responseRecord.getMapField("ResourceTags"));
Assert
.assertEquals(TAG, responseRecord.getMapField("ResourceTags").get(taggedResource.getId()));
Assert.assertFalse(responseRecord.getMapField("ResourceTags").containsKey(
untaggedResource.getId()));
}
@Test
public void testGetInstances() throws IOException {
final String clusterName = "TestTagAwareness_testGetResources";
final String[] TAGS = {
"tag1", "tag2"
};
final String URL_BASE =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/instances";
_gSetupTool.addCluster(clusterName, true);
HelixAdmin admin = _gSetupTool.getClusterManagementTool();
// Add 4 participants, each with differint tag characteristics
InstanceConfig instance1 = new InstanceConfig("localhost_1");
instance1.addTag(TAGS[0]);
admin.addInstance(clusterName, instance1);
InstanceConfig instance2 = new InstanceConfig("localhost_2");
instance2.addTag(TAGS[1]);
admin.addInstance(clusterName, instance2);
InstanceConfig instance3 = new InstanceConfig("localhost_3");
instance3.addTag(TAGS[0]);
instance3.addTag(TAGS[1]);
admin.addInstance(clusterName, instance3);
InstanceConfig instance4 = new InstanceConfig("localhost_4");
admin.addInstance(clusterName, instance4);
// Now make a REST call for all resources
Reference resourceRef = new Reference(URL_BASE);
Request request = new Request(Method.GET, resourceRef);
Response response = _gClient.handle(request);
ListInstancesWrapper responseWrapper =
ClusterRepresentationUtil.JsonToObject(ListInstancesWrapper.class,
response.getEntityAsText());
Map<String, List<String>> tagInfo = responseWrapper.tagInfo;
// Ensure tag ownership is reported correctly
Assert.assertTrue(tagInfo.containsKey(TAGS[0]));
Assert.assertTrue(tagInfo.containsKey(TAGS[1]));
Assert.assertTrue(tagInfo.get(TAGS[0]).contains("localhost_1"));
Assert.assertFalse(tagInfo.get(TAGS[0]).contains("localhost_2"));
Assert.assertTrue(tagInfo.get(TAGS[0]).contains("localhost_3"));
Assert.assertFalse(tagInfo.get(TAGS[0]).contains("localhost_4"));
Assert.assertFalse(tagInfo.get(TAGS[1]).contains("localhost_1"));
Assert.assertTrue(tagInfo.get(TAGS[1]).contains("localhost_2"));
Assert.assertTrue(tagInfo.get(TAGS[1]).contains("localhost_3"));
Assert.assertFalse(tagInfo.get(TAGS[1]).contains("localhost_4"));
}
}
| 9,174 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/AdminTestBase.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.logging.Level;
import org.apache.helix.TestHelper;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.util.ZKClientPool;
import org.apache.helix.webapp.AdminTestHelper.AdminThread;
import org.apache.helix.zookeeper.zkclient.ZkServer;
import org.restlet.Client;
import org.restlet.data.Protocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.AssertJUnit;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
public class AdminTestBase {
private static Logger LOG = LoggerFactory.getLogger(AdminTestBase.class);
public static final String ZK_ADDR = "localhost:2187";
protected final static int ADMIN_PORT = 2202;
protected static ZkServer _zkServer;
protected static ZkClient _gZkClient;
protected static ClusterSetup _gSetupTool;
protected static Client _gClient;
static AdminThread _adminThread;
@BeforeSuite
public void beforeSuite() throws Exception {
// TODO: use logging.properties file to config java.util.logging.Logger levels
java.util.logging.Logger topJavaLogger = java.util.logging.Logger.getLogger("");
topJavaLogger.setLevel(Level.WARNING);
// start zk
_zkServer = TestHelper.startZkServer(ZK_ADDR);
AssertJUnit.assertTrue(_zkServer != null);
ZKClientPool.reset();
_gZkClient =
new ZkClient(ZK_ADDR, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT,
new ZNRecordSerializer());
_gSetupTool = new ClusterSetup(_gZkClient);
// start admin
_adminThread = new AdminThread(ZK_ADDR, ADMIN_PORT);
_adminThread.start();
// create a client
_gClient = new Client(Protocol.HTTP);
// wait for the web service to start
Thread.sleep(100);
}
@AfterSuite
public void afterSuite() {
// System.out.println("START AdminTestBase.afterSuite() at " + new
// Date(System.currentTimeMillis()));
// stop admin
_adminThread.stop();
// stop zk
ZKClientPool.reset();
_gZkClient.close();
TestHelper.stopZkServer(_zkServer);
// System.out.println("END AdminTestBase.afterSuite() at " + new
// Date(System.currentTimeMillis()));
}
}
| 9,175 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/TestResetResource.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.mock.participant.ErrTransition;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.webapp.resources.JsonParameters;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResetResource extends AdminTestBase {
@Test
public void testResetNode() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 5;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
n, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
Map<String, Set<String>> errPartitions = new HashMap<String, Set<String>>() {
{
put("SLAVE-MASTER", TestHelper.setOf("TestDB0_4"));
put("OFFLINE-SLAVE", TestHelper.setOf("TestDB0_8"));
}
};
// start mock participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
if (i == 0) {
participants[i] =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].setTransition(new ErrTransition(errPartitions));
} else {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
}
participants[i].syncStart();
}
// verify cluster
Map<String, Map<String, String>> errStateMap = new HashMap<String, Map<String, String>>();
errStateMap.put("TestDB0", new HashMap<String, String>());
errStateMap.get("TestDB0").put("TestDB0_4", "localhost_12918");
errStateMap.get("TestDB0").put("TestDB0_8", "localhost_12918");
boolean result =
ClusterStateVerifier
.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName, errStateMap)));
Assert.assertTrue(result, "Cluster verification fails");
// reset resource "TestDB0"
participants[0].setTransition(null);
String resourceName = "TestDB0";
String resourceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups/"
+ resourceName;
Map<String, String> paramMap = new HashMap<String, String>();
paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.resetResource);
TestHelixAdminScenariosRest.assertSuccessPostOperation(resourceUrl, paramMap, false);
result =
ClusterStateVerifier
.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName)));
Assert.assertTrue(result, "Cluster verification fails");
// clean up
controller.syncStop();
for (int i = 0; i < 5; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,176 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/TestResetPartitionState.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.helix.NotificationContext;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.mock.participant.ErrTransition;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.webapp.resources.JsonParameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResetPartitionState extends AdminTestBase {
private final static Logger LOG = LoggerFactory.getLogger(TestResetPartitionState.class);
String getClusterUrl(String cluster) {
return "http://localhost:" + ADMIN_PORT + "/clusters" + "/" + cluster;
}
String getInstanceUrl(String cluster, String instance) {
return "http://localhost:" + ADMIN_PORT + "/clusters/" + cluster + "/instances/" + instance;
}
String getResourceUrl(String cluster, String resourceGroup) {
return "http://localhost:" + ADMIN_PORT + "/clusters/" + cluster + "/resourceGroups/"
+ resourceGroup;
}
AtomicInteger _errToOfflineInvoked = new AtomicInteger(0);
class ErrTransitionWithResetCnt extends ErrTransition {
public ErrTransitionWithResetCnt(Map<String, Set<String>> errPartitions) {
super(errPartitions);
}
@Override
public void doTransition(Message message, NotificationContext context) {
super.doTransition(message, context);
String fromState = message.getFromState();
String toState = message.getToState();
if (fromState.equals("ERROR") && toState.equals("OFFLINE")) {
// System.err.println("doReset() invoked");
_errToOfflineInvoked.incrementAndGet();
}
}
}
@Test()
public void testResetPartitionState() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 5;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
n, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
Map<String, Set<String>> errPartitions = new HashMap<String, Set<String>>();
errPartitions.put("SLAVE-MASTER", TestHelper.setOf("TestDB0_4"));
errPartitions.put("OFFLINE-SLAVE", TestHelper.setOf("TestDB0_8"));
// start mock participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
if (i == 0) {
participants[i] =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].setTransition(new ErrTransition(errPartitions));
} else {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
}
participants[i].syncStart();
}
// verify cluster
Map<String, Map<String, String>> errStateMap = new HashMap<String, Map<String, String>>();
errStateMap.put("TestDB0", new HashMap<String, String>());
errStateMap.get("TestDB0").put("TestDB0_4", "localhost_12918");
errStateMap.get("TestDB0").put("TestDB0_8", "localhost_12918");
boolean result =
ClusterStateVerifier
.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName, errStateMap)));
Assert.assertTrue(result, "Cluster verification fails");
// reset a non-exist partition, should throw exception
String hostName = "localhost_12918";
String instanceUrl = getInstanceUrl(clusterName, hostName);
Map<String, String> paramMap = new HashMap<String, String>();
paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.resetPartition);
paramMap.put(JsonParameters.PARTITION, "TestDB0_nonExist");
paramMap.put(JsonParameters.RESOURCE, "TestDB0");
LOG.info("IGNORABLE exception: test reset non-exist partition");
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, true);
// reset 2 error partitions
errPartitions.clear();
participants[0].setTransition(new ErrTransitionWithResetCnt(errPartitions));
clearStatusUpdate(clusterName, "localhost_12918", "TestDB0", "TestDB0_4");
_errToOfflineInvoked.set(0);
paramMap.put(JsonParameters.PARTITION, "TestDB0_4 TestDB0_8");
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, false);
for (int i = 0; i < 10; i++) {
Thread.sleep(400); // wait reset to be done
LOG.info("IGNORABLE exception: test reset non-error partition");
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, true);
result =
ClusterStateVerifier
.verifyByZkCallback(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
if (result == true) {
break;
}
}
Assert.assertTrue(result);
Assert.assertEquals(_errToOfflineInvoked.get(), 2, "reset() should be invoked 2 times");
// clean up
controller.syncStop();
for (int i = 0; i < 5; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
private void clearStatusUpdate(String clusterName, String instance, String resource,
String partition) {
// clear status update for error partition so verify() will not fail on
// old errors
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance));
accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getEphemeralOwner(),
resource, partition));
}
// TODO: throw exception in reset()
}
| 9,177 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/AdminTestHelper.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.concurrent.CountDownLatch;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.restlet.Client;
import org.restlet.Request;
import org.restlet.Response;
import org.restlet.data.MediaType;
import org.restlet.data.Method;
import org.restlet.data.Reference;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.testng.Assert;
public class AdminTestHelper {
public static class AdminThread {
Thread _adminThread;
CountDownLatch _stopCountDown = new CountDownLatch(1);
String _zkAddr;
int _port;
public AdminThread(String zkAddr, int port) {
_zkAddr = zkAddr;
_port = port;
}
public void start() {
Thread adminThread = new Thread(new Runnable() {
@Override
public void run() {
HelixAdminWebApp app = null;
try {
app = new HelixAdminWebApp(_zkAddr, _port);
app.start();
// Thread.currentThread().join();
_stopCountDown.await();
} catch (Exception e) {
e.printStackTrace();
} finally {
if (app != null) {
// System.err.println("Stopping HelixAdminWebApp");
app.stop();
}
}
}
});
adminThread.setDaemon(true);
adminThread.start();
}
public void stop() {
_stopCountDown.countDown();
}
}
public static ZNRecord get(Client client, String url) throws IOException {
Reference resourceRef = new Reference(url);
Request request = new Request(Method.GET, resourceRef);
Response response = client.handle(request);
Assert.assertEquals(response.getStatus(), Status.SUCCESS_OK);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
String responseStr = sw.toString();
Assert.assertEquals(responseStr.toLowerCase().indexOf("error"), -1);
Assert.assertEquals(responseStr.toLowerCase().indexOf("exception"), -1);
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(new StringReader(responseStr), ZNRecord.class);
}
public static void delete(Client client, String url) throws IOException {
Reference resourceRef = new Reference(url);
Request request = new Request(Method.DELETE, resourceRef);
Response response = client.handle(request);
Assert.assertEquals(response.getStatus(), Status.SUCCESS_NO_CONTENT);
}
public static ZNRecord post(Client client, String url, String body)
throws IOException {
Reference resourceRef = new Reference(url);
Request request = new Request(Method.POST, resourceRef);
request.setEntity(body, MediaType.APPLICATION_ALL);
Response response = client.handle(request);
Assert.assertEquals(response.getStatus(), Status.SUCCESS_OK);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
if (result != null) {
result.write(sw);
}
String responseStr = sw.toString();
Assert.assertEquals(responseStr.toLowerCase().indexOf("error"), -1);
Assert.assertEquals(responseStr.toLowerCase().indexOf("exception"), -1);
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(new StringReader(responseStr), ZNRecord.class);
}
}
| 9,178 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/TestClusterManagementWebapp.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.model.InstanceConfig.InstanceConfigProperty;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.webapp.resources.ClusterRepresentationUtil;
import org.apache.helix.webapp.resources.InstancesResource.ListInstancesWrapper;
import org.apache.helix.webapp.resources.JsonParameters;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.restlet.Client;
import org.restlet.Request;
import org.restlet.Response;
import org.restlet.data.MediaType;
import org.restlet.data.Method;
import org.restlet.data.Reference;
import org.restlet.representation.Representation;
import org.testng.Assert;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestClusterManagementWebapp extends AdminTestBase {
@Test
public void testInvocation() throws Exception {
verifyAddCluster();
verifyAddStateModel();
verifyAddHostedEntity();
verifyAddInstance();
verifyRebalance();
verifyEnableInstance();
verifyAlterIdealState();
verifyConfigAccessor();
verifyEnableCluster();
System.out.println("Test passed!!");
}
/*
* Test case as steps
*/
String clusterName = "cluster-12345";
String resourceGroupName = "new-entity-12345";
String instance1 = "test-1";
String statemodel = "state_model";
int instancePort = 9999;
int partitions = 10;
int replicas = 3;
void verifyAddStateModel() throws JsonGenerationException, JsonMappingException, IOException {
String httpUrlBase =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName
+ "/StateModelDefs/MasterSlave";
Reference resourceRef = new Reference(httpUrlBase);
Request request = new Request(Method.GET, resourceRef);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
ObjectMapper mapper = new ObjectMapper();
ZNRecord zn = mapper.readValue(new StringReader(sw.toString()), ZNRecord.class);
Map<String, String> paraMap = new HashMap<String, String>();
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addStateModelDef);
ZNRecord r = new ZNRecord("Test");
r.merge(zn);
httpUrlBase = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/StateModelDefs";
resourceRef = new Reference(httpUrlBase);
request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap)
+ "&" + JsonParameters.NEW_STATE_MODEL_DEF + "="
+ ClusterRepresentationUtil.ZNRecordToJson(r), MediaType.APPLICATION_ALL);
response = _gClient.handle(request);
result = response.getEntity();
sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
AssertJUnit.assertTrue(sw.toString().contains("Test"));
}
void verifyAddCluster() throws IOException, InterruptedException {
String httpUrlBase = "http://localhost:" + ADMIN_PORT + "/clusters";
Map<String, String> paraMap = new HashMap<String, String>();
paraMap.put(JsonParameters.CLUSTER_NAME, clusterName);
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addCluster);
Reference resourceRef = new Reference(httpUrlBase);
Request request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap),
MediaType.APPLICATION_ALL);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
ObjectMapper mapper = new ObjectMapper();
ZNRecord zn = mapper.readValue(new StringReader(sw.toString()), ZNRecord.class);
AssertJUnit.assertTrue(zn.getListField("clusters").contains(clusterName));
}
void verifyAddHostedEntity() throws JsonGenerationException, JsonMappingException, IOException {
String httpUrlBase =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups";
Map<String, String> paraMap = new HashMap<String, String>();
paraMap.put(JsonParameters.RESOURCE_GROUP_NAME, resourceGroupName);
paraMap.put(JsonParameters.PARTITIONS, "" + partitions);
paraMap.put(JsonParameters.STATE_MODEL_DEF_REF, "MasterSlave");
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addResource);
Reference resourceRef = new Reference(httpUrlBase);
Request request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap),
MediaType.APPLICATION_ALL);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
ObjectMapper mapper = new ObjectMapper();
ZNRecord zn = mapper.readValue(new StringReader(sw.toString()), ZNRecord.class);
AssertJUnit.assertTrue(zn.getListField("ResourceGroups").contains(resourceGroupName));
httpUrlBase =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups/"
+ resourceGroupName;
resourceRef = new Reference(httpUrlBase);
request = new Request(Method.GET, resourceRef);
response = _gClient.handle(request);
result = response.getEntity();
sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
}
void verifyAddInstance() throws JsonGenerationException, JsonMappingException, IOException {
String httpUrlBase =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/instances";
Map<String, String> paraMap = new HashMap<String, String>();
// Add 1 instance
paraMap.put(JsonParameters.INSTANCE_NAME, instance1 + ":" + instancePort);
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstance);
Reference resourceRef = new Reference(httpUrlBase);
Request request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap),
MediaType.APPLICATION_ALL);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
ObjectMapper mapper = new ObjectMapper();
TypeReference<ListInstancesWrapper> typeRef = new TypeReference<ListInstancesWrapper>() {
};
ListInstancesWrapper wrapper = mapper.readValue(new StringReader(sw.toString()), typeRef);
List<ZNRecord> znList = wrapper.instanceInfo;
AssertJUnit.assertTrue(znList.get(0).getId().equals(instance1 + "_" + instancePort));
// the case to add more than 1 instances
paraMap.clear();
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstance);
String[] instances = {
"test2", "test3", "test4", "test5"
};
String instanceNames = "";
boolean first = true;
for (String instance : instances) {
if (first == true) {
first = false;
} else {
instanceNames += ";";
}
instanceNames += (instance + ":" + instancePort);
}
paraMap.put(JsonParameters.INSTANCE_NAMES, instanceNames);
request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap),
MediaType.APPLICATION_ALL);
response = _gClient.handle(request);
result = response.getEntity();
sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
mapper = new ObjectMapper();
wrapper = mapper.readValue(new StringReader(sw.toString()), typeRef);
znList = wrapper.instanceInfo;
for (String instance : instances) {
boolean found = false;
for (ZNRecord r : znList) {
String instanceId = instance + "_" + instancePort;
if (r.getId().equals(instanceId)) {
found = true;
break;
}
}
AssertJUnit.assertTrue(found);
}
}
void verifyRebalance() throws JsonGenerationException, JsonMappingException, IOException {
String httpUrlBase =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups/"
+ resourceGroupName + "/idealState";
Map<String, String> paraMap = new HashMap<String, String>();
// Add 1 instance
paraMap.put(JsonParameters.REPLICAS, "" + replicas);
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.rebalance);
Reference resourceRef = new Reference(httpUrlBase);
Request request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap),
MediaType.APPLICATION_ALL);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
ObjectMapper mapper = new ObjectMapper();
ZNRecord r = mapper.readValue(new StringReader(sw.toString()), ZNRecord.class);
for (int i = 0; i < partitions; i++) {
String partitionName = resourceGroupName + "_" + i;
assert (r.getMapField(partitionName).size() == replicas);
}
httpUrlBase = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName;
resourceRef = new Reference(httpUrlBase);
request = new Request(Method.GET, resourceRef);
response = _gClient.handle(request);
result = response.getEntity();
sw = new StringWriter();
result.write(sw);
}
void verifyEnableInstance() throws JsonGenerationException, JsonMappingException, IOException {
String httpUrlBase =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/instances/" + instance1
+ "_" + instancePort;
Map<String, String> paraMap = new HashMap<String, String>();
// Add 1 instance
paraMap.put(JsonParameters.ENABLED, "" + false);
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.enableInstance);
Reference resourceRef = new Reference(httpUrlBase);
Request request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap),
MediaType.APPLICATION_ALL);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
ObjectMapper mapper = new ObjectMapper();
ZNRecord r = mapper.readValue(new StringReader(sw.toString()), ZNRecord.class);
AssertJUnit.assertTrue(r.getSimpleField(InstanceConfigProperty.HELIX_ENABLED.toString())
.equals("" + false));
// Then enable it
paraMap.put(JsonParameters.ENABLED, "" + true);
request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap),
MediaType.APPLICATION_ALL);
response = _gClient.handle(request);
result = response.getEntity();
sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
mapper = new ObjectMapper();
r = mapper.readValue(new StringReader(sw.toString()), ZNRecord.class);
AssertJUnit.assertTrue(r.getSimpleField(InstanceConfigProperty.HELIX_ENABLED.toString())
.equals("" + true));
}
void verifyAlterIdealState() throws IOException {
String httpUrlBase =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups/"
+ resourceGroupName + "/idealState";
Reference resourceRef = new Reference(httpUrlBase);
Request request = new Request(Method.GET, resourceRef);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
ObjectMapper mapper = new ObjectMapper();
ZNRecord r = mapper.readValue(new StringReader(sw.toString()), ZNRecord.class);
String partitionName = "new-entity-12345_3";
r.getMapFields().remove(partitionName);
Map<String, String> paraMap = new HashMap<String, String>();
// Add 1 instance
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addIdealState);
resourceRef = new Reference(httpUrlBase);
request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap)
+ "&" + JsonParameters.NEW_IDEAL_STATE + "="
+ ClusterRepresentationUtil.ZNRecordToJson(r), MediaType.APPLICATION_ALL);
response = _gClient.handle(request);
result = response.getEntity();
sw = new StringWriter();
result.write(sw);
// System.out.println(sw.toString());
mapper = new ObjectMapper();
ZNRecord r2 = mapper.readValue(new StringReader(sw.toString()), ZNRecord.class);
AssertJUnit.assertTrue(!r2.getMapFields().containsKey(partitionName));
for (String key : r2.getMapFields().keySet()) {
AssertJUnit.assertTrue(r.getMapFields().containsKey(key));
}
}
// verify get/post configs in different scopes
void verifyConfigAccessor() throws Exception {
ObjectMapper mapper = new ObjectMapper();
// set/get cluster scope configs
String url =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/configs/cluster/"
+ clusterName;
postConfig(_gClient, url, mapper, ClusterSetup.setConfig, "key1=value1,key2=value2");
ZNRecord record = get(_gClient, url, mapper);
Assert.assertEquals(record.getSimpleFields().size(), 2);
Assert.assertEquals(record.getSimpleField("key1"), "value1");
Assert.assertEquals(record.getSimpleField("key2"), "value2");
// set/get participant scope configs
String participantName = "test2_9999";
url =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/configs/participant/"
+ participantName;
postConfig(_gClient, url, mapper, ClusterSetup.setConfig, "key3=value3,key4=value4");
record = get(_gClient, url, mapper);
Assert.assertTrue(record.getSimpleFields().size() >= 2, "Should at least contains 2 keys");
Assert.assertEquals(record.getSimpleField("key3"), "value3");
Assert.assertEquals(record.getSimpleField("key4"), "value4");
// set/get resource scope configs
url =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName
+ "/configs/resource/testResource";
postConfig(_gClient, url, mapper, ClusterSetup.setConfig, "key5=value5,key6=value6");
record = get(_gClient, url, mapper);
Assert.assertEquals(record.getSimpleFields().size(), 2);
Assert.assertEquals(record.getSimpleField("key5"), "value5");
Assert.assertEquals(record.getSimpleField("key6"), "value6");
// set/get partition scope configs
url =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName
+ "/configs/partition/testResource/testPartition";
postConfig(_gClient, url, mapper, ClusterSetup.setConfig, "key7=value7,key8=value8");
record = get(_gClient, url, mapper);
Assert.assertEquals(record.getSimpleFields().size(), 2);
Assert.assertEquals(record.getSimpleField("key7"), "value7");
Assert.assertEquals(record.getSimpleField("key8"), "value8");
// list keys
url = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/configs";
record = get(_gClient, url, mapper);
Assert.assertEquals(record.getListFields().size(), 1);
Assert.assertTrue(record.getListFields().containsKey("scopes"));
Assert.assertTrue(contains(record.getListField("scopes"), "CLUSTER", "PARTICIPANT", "RESOURCE",
"PARTITION"));
// url = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/configs/cluster";
// record = get(client, url, mapper);
// Assert.assertEquals(record.getListFields().size(), 1);
// Assert.assertTrue(record.getListFields().containsKey("CLUSTER"));
// Assert.assertTrue(contains(record.getListField("CLUSTER"), clusterName), "record: " +
// record);
url = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/configs/participant";
record = get(_gClient, url, mapper);
Assert.assertTrue(record.getListFields().containsKey("PARTICIPANT"));
Assert.assertTrue(contains(record.getListField("PARTICIPANT"), participantName));
url = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/configs/resource";
record = get(_gClient, url, mapper);
Assert.assertEquals(record.getListFields().size(), 1);
Assert.assertTrue(record.getListFields().containsKey("RESOURCE"));
Assert.assertTrue(contains(record.getListField("RESOURCE"), "testResource"));
url =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName
+ "/configs/partition/testResource";
record = get(_gClient, url, mapper);
Assert.assertEquals(record.getListFields().size(), 1);
Assert.assertTrue(record.getListFields().containsKey("PARTITION"));
Assert.assertTrue(contains(record.getListField("PARTITION"), "testPartition"));
}
private ZNRecord get(Client client, String url, ObjectMapper mapper) throws Exception {
Request request = new Request(Method.GET, new Reference(url));
Response response = client.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
String responseStr = sw.toString();
Assert.assertTrue(responseStr.toLowerCase().indexOf("error") == -1);
Assert.assertTrue(responseStr.toLowerCase().indexOf("exception") == -1);
ZNRecord record = mapper.readValue(new StringReader(responseStr), ZNRecord.class);
return record;
}
private void postConfig(Client client, String url, ObjectMapper mapper, String command,
String configs) throws Exception {
Map<String, String> params = new HashMap<String, String>();
params.put(JsonParameters.MANAGEMENT_COMMAND, command);
params.put(JsonParameters.CONFIGS, configs);
Request request = new Request(Method.POST, new Reference(url));
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(params),
MediaType.APPLICATION_ALL);
Response response = client.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
String responseStr = sw.toString();
Assert.assertTrue(responseStr.toLowerCase().indexOf("error") == -1);
Assert.assertTrue(responseStr.toLowerCase().indexOf("exception") == -1);
}
void verifyEnableCluster() throws Exception {
System.out.println("START: verifyEnableCluster()");
String httpUrlBase =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/Controller";
Map<String, String> paramMap = new HashMap<String, String>();
paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.enableCluster);
paramMap.put(JsonParameters.ENABLED, "" + false);
Reference resourceRef = new Reference(httpUrlBase);
Request request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paramMap),
MediaType.APPLICATION_ALL);
Response response = _gClient.handle(request);
Representation result = response.getEntity();
StringWriter sw = new StringWriter();
result.write(sw);
System.out.println(sw.toString());
// verify pause znode exists
String pausePath = PropertyPathBuilder.pause(clusterName);
System.out.println("pausePath: " + pausePath);
boolean exists = _gZkClient.exists(pausePath);
Assert.assertTrue(exists, pausePath + " should exist");
// Then enable it
paramMap.put(JsonParameters.ENABLED, "" + true);
request = new Request(Method.POST, resourceRef);
request.setEntity(
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paramMap),
MediaType.APPLICATION_ALL);
response = _gClient.handle(request);
result = response.getEntity();
sw = new StringWriter();
result.write(sw);
System.out.println(sw.toString());
// verify pause znode doesn't exist
exists = _gZkClient.exists(pausePath);
Assert.assertFalse(exists, pausePath + " should be removed");
System.out.println("END: verifyEnableCluster()");
}
private boolean contains(List<String> list, String... items) {
for (String item : items) {
if (!list.contains(item)) {
return false;
}
}
return true;
}
}
| 9,179 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/TestResetInstance.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.mock.participant.ErrTransition;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.webapp.resources.JsonParameters;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResetInstance extends AdminTestBase {
@Test
public void testResetInstance() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 5;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
n, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
Map<String, Set<String>> errPartitions = new HashMap<String, Set<String>>() {
{
put("SLAVE-MASTER", TestHelper.setOf("TestDB0_4"));
put("OFFLINE-SLAVE", TestHelper.setOf("TestDB0_8"));
}
};
// start mock participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
if (i == 0) {
participants[i] =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].setTransition(new ErrTransition(errPartitions));
} else {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
}
participants[i].syncStart();
}
// verify cluster
Map<String, Map<String, String>> errStateMap = new HashMap<String, Map<String, String>>();
errStateMap.put("TestDB0", new HashMap<String, String>());
errStateMap.get("TestDB0").put("TestDB0_4", "localhost_12918");
errStateMap.get("TestDB0").put("TestDB0_8", "localhost_12918");
boolean result =
ClusterStateVerifier
.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName, errStateMap)));
Assert.assertTrue(result, "Cluster verification fails");
// reset node "localhost_12918"
participants[0].setTransition(null);
String hostName = "localhost_12918";
String instanceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/instances/" + hostName;
Map<String, String> paramMap = new HashMap<String, String>();
paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.resetInstance);
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, false);
result =
ClusterStateVerifier
.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName)));
Assert.assertTrue(result, "Cluster verification fails");
// clean up
controller.syncStop();
for (int i = 0; i < 5; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,180 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/TestDisableResource.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.IdealState;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.webapp.resources.JsonParameters;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestDisableResource extends AdminTestBase {
@Test
public void test() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 5;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
n, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
String instanceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups/"
+ "TestDB0";
// Disable TestDB0
Map<String, String> paramMap = new HashMap<String, String>();
paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.enableResource);
paramMap.put(JsonParameters.ENABLED, Boolean.toString(false));
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, false);
BaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, baseAccessor);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0"));
Assert.assertFalse(idealState.isEnabled());
// Re-enable TestDB0
paramMap.put(JsonParameters.ENABLED, Boolean.toString(true));
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, false);
idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0"));
Assert.assertTrue(idealState.isEnabled());
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,181 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/resources/TestJsonParameters.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import org.apache.helix.tools.ClusterSetup;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestJsonParameters {
@Test
public void test() throws Exception {
String jsonPayload =
"{\"command\":\"resetPartition\",\"resource\": \"DB-1\",\"partition\":\"DB-1_22 DB-1_23\"}";
Map<String, String> map = ClusterRepresentationUtil.JsonToMap(jsonPayload);
Assert.assertNotNull(map.get(JsonParameters.MANAGEMENT_COMMAND));
Assert.assertEquals(ClusterSetup.resetPartition, map.get(JsonParameters.MANAGEMENT_COMMAND));
Assert.assertNotNull(map.get(JsonParameters.RESOURCE));
Assert.assertEquals("DB-1", map.get(JsonParameters.RESOURCE));
Assert.assertNotNull(map.get(JsonParameters.PARTITION));
Assert.assertEquals("DB-1_22 DB-1_23", map.get(JsonParameters.PARTITION));
}
}
| 9,182 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/resources/TestResourceUtil.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.model.IdealState;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResourceUtil {
@Test
public void testReadSimpleFieldWithoutDer() throws Exception {
IdealState idealState = new IdealState("MyDB");
idealState.setInstanceGroupTag("MyTag");
String recordStr = ClusterRepresentationUtil.ZNRecordToJson(idealState.getRecord());
String value =
ResourceUtil.extractSimpleFieldFromZNRecord(recordStr,
IdealState.IdealStateProperty.INSTANCE_GROUP_TAG.toString());
Assert.assertEquals(value, "MyTag");
}
}
| 9,183 |
0 |
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/test/java/org/apache/helix/webapp/resources/TestJobQueuesResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import com.google.common.collect.Lists;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.task.Task;
import org.apache.helix.task.TaskCallbackContext;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskFactory;
import org.apache.helix.task.TaskStateModelFactory;
import org.apache.helix.task.beans.JobBean;
import org.apache.helix.task.beans.WorkflowBean;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.webapp.AdminTestBase;
import org.apache.helix.webapp.AdminTestHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.yaml.snakeyaml.Yaml;
public class TestJobQueuesResource extends AdminTestBase {
private static final Logger LOG = LoggerFactory.getLogger(TestJobQueuesResource.class);
@Test
public void test() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 5;
final int p = 20;
final int r = 3;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
_gSetupTool.addCluster(clusterName, true);
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
_gSetupTool.addInstanceToCluster(clusterName, instanceName);
}
// Set up target db
_gSetupTool.addResourceToCluster(clusterName, WorkflowGenerator.DEFAULT_TGT_DB, p,
"MasterSlave");
_gSetupTool.rebalanceStorageCluster(clusterName, WorkflowGenerator.DEFAULT_TGT_DB, r);
Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
taskFactoryReg.put("DummyTask", new TaskFactory() {
@Override
public Task createNewTask(TaskCallbackContext context) {
return new MockTask(context);
}
});
// Start dummy participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
// Register a Task state model factory.
StateMachineEngine stateMachine = participants[i].getStateMachineEngine();
stateMachine.registerStateModelFactory("Task", new TaskStateModelFactory(participants[i],
taskFactoryReg));
participants[i].syncStart();
}
// start controller
String controllerName = "controller";
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, controllerName);
controller.syncStart();
boolean result =
ClusterStateVerifier
.verifyByZkCallback(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(result);
// Start a queue
String queueName = "myQueue1";
LOG.info("Starting job-queue: " + queueName);
String jobQueueYamlConfig = "name: " + queueName;
String resourceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues";
ZNRecord postRet = AdminTestHelper.post(_gClient, resourceUrl, jobQueueYamlConfig);
LOG.info("Started job-queue: " + queueName + ", ret: " + postRet);
LOG.info("Getting all job-queues");
ZNRecord getRet = AdminTestHelper.get(_gClient, resourceUrl);
LOG.info("Got job-queues: " + getRet);
// Enqueue job
resourceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName;
WorkflowBean wfBean = new WorkflowBean();
wfBean.name = queueName;
JobBean jBean1 = new JobBean();
jBean1.name = "myJob1";
jBean1.command = "DummyTask";
jBean1.targetResource = WorkflowGenerator.DEFAULT_TGT_DB;
jBean1.targetPartitionStates = Lists.newArrayList("MASTER");
JobBean jBean2 = new JobBean();
jBean2.name = "myJob2";
jBean2.command = "DummyTask";
jBean2.targetResource = WorkflowGenerator.DEFAULT_TGT_DB;
jBean2.targetPartitionStates = Lists.newArrayList("SLAVE");
wfBean.jobs = Lists.newArrayList(jBean1, jBean2);
String jobYamlConfig = new Yaml().dump(wfBean);
LOG.info("Enqueuing jobs: " + jobQueueYamlConfig);
Map<String, String> paraMap = new HashMap<String, String>();
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, TaskDriver.DriverCommand.start.toString());
String postBody =
String.format("%s=%s&%s=%s", JsonParameters.JSON_PARAMETERS,
ClusterRepresentationUtil.ObjectToJson(paraMap), ResourceUtil.YamlParamKey.NEW_JOB.toString(),
jobYamlConfig);
postRet = AdminTestHelper.post(_gClient, resourceUrl, postBody);
LOG.info("Enqueued job, ret: " + postRet);
// Get job
resourceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName
+ "/" + jBean1.name;
getRet = AdminTestHelper.get(_gClient, resourceUrl);
LOG.info("Got job: " + getRet);
// Stop job queue
resourceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName;
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, TaskDriver.DriverCommand.stop.toString());
postBody = String.format("%s=%s", JsonParameters.JSON_PARAMETERS, ClusterRepresentationUtil.ObjectToJson(paraMap));
postRet = AdminTestHelper.post(_gClient, resourceUrl, postBody);
LOG.info("Stopped job-queue, ret: " + postRet);
// Delete a job
resourceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName
+ "/" + jBean2.name;
AdminTestHelper.delete(_gClient, resourceUrl);
LOG.info("Delete a job: ");
// Resume job queue
resourceUrl =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName;
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, TaskDriver.DriverCommand.resume.toString());
postBody = String.format("%s=%s", JsonParameters.JSON_PARAMETERS, ClusterRepresentationUtil.ObjectToJson(paraMap));
postRet = AdminTestHelper.post(_gClient, resourceUrl, postBody);
LOG.info("Resumed job-queue, ret: " + postRet);
// Flush job queue
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, "persistDataChanges");
postBody =
JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap);
postRet = AdminTestHelper.post(_gClient, resourceUrl, postBody);
LOG.info("Flushed job-queue, ret: " + postRet);
// clean up
controller.syncStop();
for (int i = 0; i < n; i++) {
if (participants[i] != null && participants[i].isConnected()) {
participants[i].syncStop();
}
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,184 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/HelixAdminWebApp.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.zookeeper.datamodel.serializer.ByteArraySerializer;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.webapp.resources.ResourceUtil;
import org.restlet.Component;
import org.restlet.Context;
import org.restlet.data.Protocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HelixAdminWebApp {
public final Logger LOG = LoggerFactory.getLogger(HelixAdminWebApp.class);
private RestAdminApplication _adminApp = null;
private Component _component = null;
private final int _helixAdminPort;
private final String _zkServerAddress;
private ZkClient _zkClient = null;
private ZkClient _rawZkClient = null;
public HelixAdminWebApp(String zkServerAddress, int adminPort) {
_zkServerAddress = zkServerAddress;
_helixAdminPort = adminPort;
}
public synchronized void start() throws Exception {
LOG.info("helixAdminWebApp starting");
if (_component == null) {
_zkClient =
new ZkClient(_zkServerAddress, ZkClient.DEFAULT_SESSION_TIMEOUT,
ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer());
_rawZkClient =
new ZkClient(_zkServerAddress, ZkClient.DEFAULT_SESSION_TIMEOUT,
ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ByteArraySerializer());
_component = new Component();
_component.getServers().add(Protocol.HTTP, _helixAdminPort);
Context applicationContext = _component.getContext().createChildContext();
applicationContext.getAttributes()
.put(RestAdminApplication.ZKSERVERADDRESS, _zkServerAddress);
applicationContext.getAttributes().put(RestAdminApplication.PORT, "" + _helixAdminPort);
applicationContext.getAttributes().put(RestAdminApplication.ZKCLIENT, _zkClient);
applicationContext.getAttributes().put(ResourceUtil.ContextKey.RAW_ZKCLIENT.toString(),
_rawZkClient);
_adminApp = new RestAdminApplication(applicationContext);
// Attach the application to the component and start it
_component.getDefaultHost().attach(_adminApp);
_component.start();
}
LOG.info("helixAdminWebApp started on port: " + _helixAdminPort);
}
public synchronized void stop() {
LOG.info("Stopping helixAdminWebApp");
try {
_component.stop();
LOG.info("Stopped helixAdminWebApp");
} catch (Exception e) {
LOG.error("Exception in stopping helixAdminWebApp", e);
} finally {
if (_zkClient != null) {
_zkClient.close();
}
if (_rawZkClient != null) {
_rawZkClient.close();
}
}
}
}
| 9,185 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/RestAdminApplication.java
|
package org.apache.helix.webapp;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.logging.Level;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.webapp.resources.ClusterResource;
import org.apache.helix.webapp.resources.ClustersResource;
import org.apache.helix.webapp.resources.ConfigResource;
import org.apache.helix.webapp.resources.ConstraintResource;
import org.apache.helix.webapp.resources.ControllerResource;
import org.apache.helix.webapp.resources.ControllerStatusUpdateResource;
import org.apache.helix.webapp.resources.CurrentStateResource;
import org.apache.helix.webapp.resources.CurrentStatesResource;
import org.apache.helix.webapp.resources.ErrorResource;
import org.apache.helix.webapp.resources.ErrorsResource;
import org.apache.helix.webapp.resources.ExternalViewResource;
import org.apache.helix.webapp.resources.IdealStateResource;
import org.apache.helix.webapp.resources.InstanceResource;
import org.apache.helix.webapp.resources.InstancesResource;
import org.apache.helix.webapp.resources.JobQueueResource;
import org.apache.helix.webapp.resources.JobQueuesResource;
import org.apache.helix.webapp.resources.JobResource;
import org.apache.helix.webapp.resources.ResourceGroupResource;
import org.apache.helix.webapp.resources.ResourceGroupsResource;
import org.apache.helix.webapp.resources.SchedulerTasksResource;
import org.apache.helix.webapp.resources.StateModelResource;
import org.apache.helix.webapp.resources.StateModelsResource;
import org.apache.helix.webapp.resources.StatusUpdateResource;
import org.apache.helix.webapp.resources.StatusUpdatesResource;
import org.apache.helix.webapp.resources.WorkflowsResource;
import org.apache.helix.webapp.resources.ZkChildResource;
import org.apache.helix.webapp.resources.ZkPathResource;
import org.restlet.Application;
import org.restlet.Context;
import org.restlet.Request;
import org.restlet.Response;
import org.restlet.Restlet;
import org.restlet.data.MediaType;
import org.restlet.representation.StringRepresentation;
import org.restlet.routing.Router;
import org.restlet.routing.Template;
public class RestAdminApplication extends Application {
public static final String HELP = "help";
public static final String ZKSERVERADDRESS = "zkSvr";
public static final String PORT = "port";
public static final String ZKCLIENT = "zkClient";
public static final int DEFAULT_PORT = 8100;
static {
org.restlet.engine.Engine.setLogLevel(Level.SEVERE);
}
public RestAdminApplication() {
super();
}
public RestAdminApplication(Context context) {
super(context);
}
@Override
public Restlet createInboundRoot() {
Router router = new Router(getContext());
router.setDefaultMatchingMode(Template.MODE_EQUALS);
router.attach("/clusters", ClustersResource.class);
router.attach("/clusters/{clusterName}", ClusterResource.class);
router.attach("/clusters/{clusterName}/resourceGroups", ResourceGroupsResource.class);
router.attach("/clusters/{clusterName}/resourceGroups/{resourceName}",
ResourceGroupResource.class);
router.attach("/clusters/{clusterName}/workflows", WorkflowsResource.class);
router.attach("/clusters/{clusterName}/jobQueues", JobQueuesResource.class);
router.attach("/clusters/{clusterName}/jobQueues/{jobQueue}", JobQueueResource.class);
router.attach("/clusters/{clusterName}/jobQueues/{jobQueue}/{job}", JobResource.class);
router.attach("/clusters/{clusterName}/instances", InstancesResource.class);
router.attach("/clusters/{clusterName}/instances/{instanceName}", InstanceResource.class);
router.attach("/clusters/{clusterName}/instances/{instanceName}/currentState/{resourceName}",
CurrentStateResource.class);
router.attach("/clusters/{clusterName}/instances/{instanceName}/statusUpdate/{resourceName}",
StatusUpdateResource.class);
router.attach("/clusters/{clusterName}/instances/{instanceName}/errors/{resourceName}",
ErrorResource.class);
router.attach("/clusters/{clusterName}/instances/{instanceName}/currentState",
CurrentStatesResource.class);
router.attach("/clusters/{clusterName}/instances/{instanceName}/statusUpdate",
StatusUpdatesResource.class);
router.attach("/clusters/{clusterName}/instances/{instanceName}/errors", ErrorsResource.class);
router.attach("/clusters/{clusterName}/resourceGroups/{resourceName}/idealState",
IdealStateResource.class);
router.attach("/clusters/{clusterName}/resourceGroups/{resourceName}/externalView",
ExternalViewResource.class);
router.attach("/clusters/{clusterName}/StateModelDefs/{modelName}", StateModelResource.class);
router.attach("/clusters/{clusterName}/StateModelDefs", StateModelsResource.class);
router.attach("/clusters/{clusterName}/SchedulerTasks", SchedulerTasksResource.class);
router.attach("/clusters/{clusterName}/Controller", ControllerResource.class);
router.attach("/clusters/{clusterName}/Controller/statusUpdates/{MessageType}/{MessageId}",
ControllerStatusUpdateResource.class);
router.attach("/clusters/{clusterName}/configs", ConfigResource.class);
router.attach("/clusters/{clusterName}/configs/{scope}", ConfigResource.class);
router.attach("/clusters/{clusterName}/configs/{scope}/{scopeKey1}", ConfigResource.class);
router.attach("/clusters/{clusterName}/configs/{scope}/{scopeKey1}/{scopeKey2}",
ConfigResource.class);
router.attach("/clusters/{clusterName}/constraints/{constraintType}", ConstraintResource.class);
router.attach("/clusters/{clusterName}/constraints/{constraintType}/{constraintId}",
ConstraintResource.class);
router.attach("/zkPath", ZkPathResource.class).setMatchingMode(Template.MODE_STARTS_WITH);
router.attach("/zkChild", ZkChildResource.class).setMatchingMode(Template.MODE_STARTS_WITH);
Restlet mainpage = new Restlet() {
@Override
public void handle(Request request, Response response) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("<html>");
stringBuilder.append("<head><title>Restlet Cluster Management page</title></head>");
stringBuilder.append("<body bgcolor=white>");
stringBuilder.append("<table border=\"0\">");
stringBuilder.append("<tr>");
stringBuilder.append("<td>");
stringBuilder.append("<h1>Rest cluster management interface V1</h1>");
stringBuilder.append("</td>");
stringBuilder.append("</tr>");
stringBuilder.append("</table>");
stringBuilder.append("</body>");
stringBuilder.append("</html>");
response.setEntity(new StringRepresentation(stringBuilder.toString(), MediaType.TEXT_HTML));
}
};
router.attach("", mainpage);
return router;
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("java " + RestAdminApplication.class.getName(), cliOptions);
}
@SuppressWarnings("static-access")
private static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(HELP).withDescription("Prints command-line options info")
.create();
helpOption.setArgs(0);
helpOption.setRequired(false);
helpOption.setArgName("print help message");
Option zkServerOption =
OptionBuilder.withLongOpt(ZKSERVERADDRESS).withDescription("Provide zookeeper address")
.create();
zkServerOption.setArgs(1);
zkServerOption.setRequired(true);
zkServerOption.setArgName("ZookeeperServerAddress(Required)");
Option portOption =
OptionBuilder.withLongOpt(PORT).withDescription("Provide web service port").create();
portOption.setArgs(1);
portOption.setRequired(false);
portOption.setArgName("web service port, default: " + DEFAULT_PORT);
Options options = new Options();
options.addOption(helpOption);
options.addOption(zkServerOption);
options.addOption(portOption);
return options;
}
public static void processCommandLineArgs(String[] cliArgs) throws Exception {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("RestAdminApplication: failed to parse command-line options: "
+ pe.toString());
printUsage(cliOptions);
System.exit(1);
}
int port = DEFAULT_PORT;
if (cmd.hasOption(HELP)) {
printUsage(cliOptions);
return;
} else if (cmd.hasOption(PORT)) {
port = Integer.parseInt(cmd.getOptionValue(PORT));
}
HelixAdminWebApp app = new HelixAdminWebApp(cmd.getOptionValue(ZKSERVERADDRESS), port);
app.start();
try {
Thread.currentThread().join();
} finally {
app.stop();
}
}
/**
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
processCommandLineArgs(args);
}
}
| 9,186 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/IdealStateResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Map;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.model.IdealState;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for server-side resource at
* <code>"/clusters/{clusterName}/resourceGroups/{resourceName}/idealState"
* <p>
* <li>GET get ideal state
* <li>POST set ideal state
*/
public class IdealStateResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(IdealStateResource.class);
public IdealStateResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
/**
* Get ideal state
* <p>
* Usage:
* <code>curl http://{host:port}/clusters/{clusterName}/resourceGroups/{resourceName}/idealState
*/
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String resourceName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.RESOURCE_NAME);
presentation = getIdealStateRepresentation(clusterName, resourceName);
}
catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("Exception in get idealState", e);
}
return presentation;
}
StringRepresentation getIdealStateRepresentation(String clusterName, String resourceName)
throws JsonGenerationException, JsonMappingException, IOException {
Builder keyBuilder = new PropertyKey.Builder(clusterName);
ZkClient zkclient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.RAW_ZKCLIENT);
String idealStateStr =
ResourceUtil.readZkAsBytes(zkclient, keyBuilder.idealStates(resourceName));
StringRepresentation representation =
new StringRepresentation(idealStateStr, MediaType.APPLICATION_JSON);
return representation;
}
/**
* Set ideal state
* <p>
* Usage:
* <p>
* <li>Add ideal state:
* <code>curl -d @'{newIdealState.json}' -H 'Content-Type: application/json'
* http://{host:port}/clusters/{cluster}/resourceGroups/{resource}/idealState
* <pre>
* newIdealState:
* jsonParameters={"command":"addIdealState"}&newIdealState={
* "id" : "{MyDB}",
* "simpleFields" : {
* "IDEAL_STATE_MODE" : "AUTO",
* "NUM_PARTITIONS" : "{8}",
* "REBALANCE_MODE" : "SEMI_AUTO",
* "REPLICAS" : "0",
* "STATE_MODEL_DEF_REF" : "MasterSlave",
* "STATE_MODEL_FACTORY_NAME" : "DEFAULT"
* },
* "listFields" : {
* },
* "mapFields" : {
* "{MyDB_0}" : {
* "{localhost_1001}" : "MASTER",
* "{localhost_1002}" : "SLAVE"
* }
* }
* }
* </pre>
* <li>Rebalance cluster:
* <code>curl -d 'jsonParameters={"command":"rebalance","replicas":"{3}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{cluster}/resourceGroups/{resource}/idealState
* <li>Expand resource: <code>n/a
* <li>Add resource property:
* <code>curl -d 'jsonParameters={"command":"addResourceProperty","{REBALANCE_TIMER_PERIOD}":"{500}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{cluster}/resourceGroups/{resource}/idealState
*/
@Override
public Representation post(Representation entity) {
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String resourceName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.RESOURCE_NAME);
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
JsonParameters jsonParameters = new JsonParameters(entity);
String command = jsonParameters.getCommand();
if (command.equalsIgnoreCase(ClusterSetup.addIdealState)) {
ZNRecord newIdealState = jsonParameters.getExtraParameter(JsonParameters.NEW_IDEAL_STATE);
HelixDataAccessor accessor =
ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
accessor.setProperty(accessor.keyBuilder().idealStates(resourceName), new IdealState(
newIdealState));
} else if (command.equalsIgnoreCase(ClusterSetup.rebalance)) {
int replicas = Integer.parseInt(jsonParameters.getParameter(JsonParameters.REPLICAS));
String keyPrefix = jsonParameters.getParameter(JsonParameters.RESOURCE_KEY_PREFIX);
String groupTag = jsonParameters.getParameter(ClusterSetup.instanceGroupTag);
setupTool.rebalanceCluster(clusterName, resourceName, replicas, keyPrefix, groupTag);
} else if (command.equalsIgnoreCase(ClusterSetup.expandResource)) {
setupTool.expandResource(clusterName, resourceName);
} else if (command.equalsIgnoreCase(ClusterSetup.addResourceProperty)) {
Map<String, String> parameterMap = jsonParameters.cloneParameterMap();
parameterMap.remove(JsonParameters.MANAGEMENT_COMMAND);
for (String key : parameterMap.keySet()) {
setupTool.addResourceProperty(clusterName, resourceName, key, parameterMap.get(key));
}
} else {
throw new HelixException("Unsupported command: " + command + ". Should be one of ["
+ ClusterSetup.addIdealState + ", " + ClusterSetup.rebalance + ", "
+ ClusterSetup.expandResource + ", " + ClusterSetup.addResourceProperty + "]");
}
getResponse().setEntity(getIdealStateRepresentation(clusterName, resourceName));
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("Error in posting " + entity, e);
}
return null;
}
}
| 9,187 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ControllerResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.TreeMap;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.PropertyType;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.util.StatusUpdateUtil.Level;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for server-side resource at <code>"/clusters/{clusterName}/Controller"
* <p>
* <li>GET list Helix controller info
* <li>POST enable/disable Helix controller
*/
public class ControllerResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(ControllerResource.class);
public ControllerResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
StringRepresentation getControllerRepresentation(String clusterName)
throws JsonGenerationException, JsonMappingException, IOException {
Builder keyBuilder = new PropertyKey.Builder(clusterName);
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
ZNRecord record = null;
LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader());
if (leader != null) {
record = leader.getRecord();
} else {
record = new ZNRecord("");
DateFormat formatter = new SimpleDateFormat("yyyyMMdd-HHmmss.SSSSSS");
String time = formatter.format(new Date());
Map<String, String> contentMap = new TreeMap<String, String>();
contentMap.put("AdditionalInfo", "No leader exists");
record.setMapField(Level.HELIX_INFO + "-" + time, contentMap);
}
boolean paused = (accessor.getProperty(keyBuilder.pause()) == null ? false : true);
record.setSimpleField(PropertyType.PAUSE.toString(), "" + paused);
String retVal = ClusterRepresentationUtil.ZNRecordToJson(record);
StringRepresentation representation =
new StringRepresentation(retVal, MediaType.APPLICATION_JSON);
return representation;
}
/**
* List Helix controller info
* <p>
* Usage: <code>curl http://{host:port}/clusters/{cluster}/Controller
*/
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
presentation = getControllerRepresentation(clusterName);
} catch (Exception e) {
LOG.error("Exception get controller info", e);
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
}
return presentation;
}
/**
* Enable/disable Helix controller
* <p>
* Usage:
* <code>curl -d 'jsonParameters={"command":"enableCluster","enabled":"{true/false}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{cluster}/Controller
*/
@Override
public Representation post(Representation entity) {
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
JsonParameters jsonParameters = new JsonParameters(entity);
String command = jsonParameters.getCommand();
if (command == null) {
throw new HelixException("Could NOT find 'command' in parameterMap: "
+ jsonParameters._parameterMap);
} else if (command.equalsIgnoreCase(ClusterSetup.enableCluster)) {
boolean enabled = Boolean.parseBoolean(jsonParameters.getParameter(JsonParameters.ENABLED));
setupTool.getClusterManagementTool().enableCluster(clusterName, enabled);
} else {
throw new HelixException("Unsupported command: " + command + ". Should be one of ["
+ ClusterSetup.enableCluster + "]");
}
getResponse().setEntity(getControllerRepresentation(clusterName));
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
}
return null;
}
}
| 9,188 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ClusterResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.List;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for server-side resource at <code> "/clusters/{clusterName}"
* <p>
* <li>GET list cluster information
* <li>POST activate/deactivate a cluster in distributed controller mode
* <li>DELETE remove a cluster
*/
public class ClusterResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(ClusterResource.class);
public ClusterResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
/**
* List cluster information
* <p>
* Usage: <code> curl http://{host:port}/clusters/{clusterName}
*/
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
presentation = getClusterRepresentation(clusterName);
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("Exception in get cluster", e);
}
return presentation;
}
StringRepresentation getClusterRepresentation(String clusterName) throws JsonGenerationException,
JsonMappingException, IOException {
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
List<String> instances =
setupTool.getClusterManagementTool().getInstancesInCluster(clusterName);
ZNRecord clusterSummayRecord = new ZNRecord("Cluster Summary");
clusterSummayRecord.setListField("participants", instances);
List<String> resources =
setupTool.getClusterManagementTool().getResourcesInCluster(clusterName);
clusterSummayRecord.setListField("resources", resources);
List<String> models = setupTool.getClusterManagementTool().getStateModelDefs(clusterName);
clusterSummayRecord.setListField("stateModelDefs", models);
HelixDataAccessor accessor =
ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
Builder keyBuilder = accessor.keyBuilder();
LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader());
if (leader != null) {
clusterSummayRecord.setSimpleField("LEADER", leader.getInstanceName());
} else {
clusterSummayRecord.setSimpleField("LEADER", "");
}
StringRepresentation representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(clusterSummayRecord),
MediaType.APPLICATION_JSON);
return representation;
}
/**
* Activate/deactivate a cluster in distributed controller mode
* <p>
* Usage: <code> curl -d 'jsonParameters=
* {"command":"activateCluster","grandCluster":"{controllerCluster}","enabled":"{true/false}"}' -H
* "Content-Type: application/json" http://{host:port}/clusters/{clusterName}}
*/
@Override
public Representation post(Representation entity) {
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
JsonParameters jsonParameters = new JsonParameters(entity);
String command = jsonParameters.getCommand();
if (command == null) {
throw new HelixException("Could NOT find 'command' in parameterMap: "
+ jsonParameters._parameterMap);
} else if (command.equalsIgnoreCase(ClusterSetup.activateCluster)
|| JsonParameters.CLUSTERSETUP_COMMAND_ALIASES.get(ClusterSetup.activateCluster)
.contains(command)) {
jsonParameters.verifyCommand(ClusterSetup.activateCluster);
boolean enabled = true;
if (jsonParameters.getParameter(JsonParameters.ENABLED) != null) {
enabled = Boolean.parseBoolean(jsonParameters.getParameter(JsonParameters.ENABLED));
}
String grandCluster = jsonParameters.getParameter(JsonParameters.GRAND_CLUSTER);
setupTool.activateCluster(clusterName, grandCluster, enabled);
} else if (command.equalsIgnoreCase(ClusterSetup.expandCluster)) {
setupTool.expandCluster(clusterName);
} else {
throw new HelixException("Unsupported command: " + command + ". Should be one of ["
+ ClusterSetup.activateCluster + ", " + ClusterSetup.expandCluster + "]");
}
getResponse().setEntity(getClusterRepresentation(clusterName));
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
}
return getResponseEntity();
}
/**
* Remove a cluster
* <p>
* Usage: <code> curl -X DELETE http://{host:port}/clusters/{clusterName}
*/
@Override
public Representation delete() {
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
setupTool.deleteCluster(clusterName);
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
}
return null;
}
}
| 9,189 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/SchedulerTasksResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.net.InetAddress;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.manager.zk.DefaultSchedulerMessageHandlerFactory;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageType;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.webapp.RestAdminApplication;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.Form;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This resource can be used to send scheduler tasks to the controller.
*/
public class SchedulerTasksResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(SchedulerTasksResource.class);
public static String CRITERIA = "Criteria";
public static String MESSAGETEMPLATE = "MessageTemplate";
public static String TASKQUEUENAME = "TaskQueueName";
public SchedulerTasksResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
presentation = getSchedulerTasksRepresentation();
}
catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("", e);
}
return presentation;
}
StringRepresentation getSchedulerTasksRepresentation()
throws JsonGenerationException, JsonMappingException, IOException {
String clusterName = (String) getRequest().getAttributes().get("clusterName");
String instanceName = (String) getRequest().getAttributes().get("instanceName");
ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
List<String> instances =
setupTool.getClusterManagementTool().getInstancesInCluster(clusterName);
HelixDataAccessor accessor =
ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
LiveInstance liveInstance =
accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName));
String sessionId = liveInstance.getEphemeralOwner();
StringRepresentation representation = new StringRepresentation("");// (ClusterRepresentationUtil.ObjectToJson(instanceConfigs),
// MediaType.APPLICATION_JSON);
return representation;
}
@Override
public Representation post(Representation entity) {
try {
String clusterName = (String) getRequest().getAttributes().get("clusterName");
Form form = new Form(entity);
ZkClient zkClient =
(ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
String msgTemplateString =
ClusterRepresentationUtil.getFormJsonParameterString(form, MESSAGETEMPLATE);
if (msgTemplateString == null) {
throw new HelixException("SchedulerTasksResource need to have MessageTemplate specified.");
}
Map<String, String> messageTemplate =
ClusterRepresentationUtil.getFormJsonParameters(form, MESSAGETEMPLATE);
String criteriaString = ClusterRepresentationUtil.getFormJsonParameterString(form, CRITERIA);
if (criteriaString == null) {
throw new HelixException("SchedulerTasksResource need to have Criteria specified.");
}
HelixDataAccessor accessor =
ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
LiveInstance leader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
if (leader == null) {
throw new HelixException("There is no leader for the cluster " + clusterName);
}
Message schedulerMessage =
new Message(MessageType.SCHEDULER_MSG, UUID.randomUUID().toString());
schedulerMessage.getRecord().getSimpleFields().put(CRITERIA, criteriaString);
schedulerMessage.getRecord().getMapFields().put(MESSAGETEMPLATE, messageTemplate);
schedulerMessage.setTgtSessionId(leader.getEphemeralOwner());
schedulerMessage.setTgtName("CONTROLLER");
schedulerMessage.setSrcInstanceType(InstanceType.CONTROLLER);
String taskQueueName =
ClusterRepresentationUtil.getFormJsonParameterString(form, TASKQUEUENAME);
if (taskQueueName != null && taskQueueName.length() > 0) {
schedulerMessage.getRecord().setSimpleField(
DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE, taskQueueName);
}
accessor.setProperty(accessor.keyBuilder().controllerMessage(schedulerMessage.getMsgId()),
schedulerMessage);
Map<String, String> resultMap = new HashMap<String, String>();
resultMap.put("StatusUpdatePath", PropertyPathBuilder.controllerStatusUpdate(
clusterName, MessageType.SCHEDULER_MSG.name(), schedulerMessage.getMsgId()));
resultMap.put("MessageType", Message.MessageType.SCHEDULER_MSG.name());
resultMap.put("MsgId", schedulerMessage.getMsgId());
// Assemble the rest URL for task status update
String ipAddress = InetAddress.getLocalHost().getCanonicalHostName();
String url =
"http://" + ipAddress + ":" + getContext().getAttributes().get(RestAdminApplication.PORT)
+ "/clusters/" + clusterName + "/Controller/statusUpdates/SCHEDULER_MSG/"
+ schedulerMessage.getMsgId();
resultMap.put("statusUpdateUrl", url);
getResponse().setEntity(ClusterRepresentationUtil.ObjectToJson(resultMap),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("", e);
}
return null;
}
}
| 9,190 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StateModelResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.webapp.RestAdminApplication;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StateModelResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(StateModelResource.class);
public StateModelResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
String clusterName = (String) getRequest().getAttributes().get("clusterName");
String modelName = (String) getRequest().getAttributes().get("modelName");
presentation = getStateModelRepresentation(clusterName, modelName);
}
catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("", e);
}
return presentation;
}
StringRepresentation getStateModelRepresentation(String clusterName, String modelName)
throws JsonGenerationException, JsonMappingException, IOException {
Builder keyBuilder = new PropertyKey.Builder(clusterName);
ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
String message =
ClusterRepresentationUtil.getClusterPropertyAsString(zkClient, clusterName,
keyBuilder.stateModelDef(modelName), MediaType.APPLICATION_JSON);
StringRepresentation representation =
new StringRepresentation(message, MediaType.APPLICATION_JSON);
return representation;
}
@Override
public Representation post(Representation entity) {
try {
String clusterName = (String) getRequest().getAttributes().get("clusterName");
String modelName = (String) getRequest().getAttributes().get("modelName");
ZkClient zkClient =
(ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
JsonParameters jsonParameters = new JsonParameters(entity);
String command = jsonParameters.getCommand();
if (command.equalsIgnoreCase(ClusterSetup.addStateModelDef)) {
ZNRecord newStateModel =
jsonParameters.getExtraParameter(JsonParameters.NEW_STATE_MODEL_DEF);
HelixDataAccessor accessor =
ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
accessor.setProperty(accessor.keyBuilder().stateModelDef(newStateModel.getId()),
new StateModelDefinition(newStateModel));
} else {
throw new HelixException("Unsupported command: " + command + ". Should be one of ["
+ ClusterSetup.addStateModelDef + "]");
}
getResponse().setEntity(getStateModelRepresentation(clusterName, modelName));
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("Error in posting " + entity, e);
}
return null;
}
}
| 9,191 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ControllerStatusUpdateResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.webapp.RestAdminApplication;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.MediaType;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ControllerStatusUpdateResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(ControllerStatusUpdateResource.class);
public ControllerStatusUpdateResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
String zkServer =
(String) getContext().getAttributes().get(RestAdminApplication.ZKSERVERADDRESS);
String clusterName = (String) getRequest().getAttributes().get("clusterName");
String messageType = (String) getRequest().getAttributes().get("MessageType");
String messageId = (String) getRequest().getAttributes().get("MessageId");
// TODO: need pass sessionId to this represent()
String sessionId = (String) getRequest().getAttributes().get("SessionId");
presentation =
getControllerStatusUpdateRepresentation(zkServer, clusterName, sessionId, messageType,
messageId);
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("", e);
}
return presentation;
}
StringRepresentation getControllerStatusUpdateRepresentation(String zkServerAddress,
String clusterName, String sessionId, String messageType, String messageId)
throws JsonGenerationException, JsonMappingException, IOException {
Builder keyBuilder = new PropertyKey.Builder(clusterName);
ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
String message =
ClusterRepresentationUtil.getPropertyAsString(zkClient, clusterName,
keyBuilder.controllerTaskStatus(messageType, messageId), MediaType.APPLICATION_JSON);
StringRepresentation representation =
new StringRepresentation(message, MediaType.APPLICATION_JSON);
return representation;
}
}
| 9,192 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ConfigResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixException;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.tools.ClusterSetup;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for server-side resource at <code> "/clusters/{clusterName}/configs"
* <p>
* <li>GET get scoped configs
* <li>POST set/remove scoped configs
*/
public class ConfigResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(ConfigResource.class);
public ConfigResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
String getValue(String key) {
return (String) getRequest().getAttributes().get(key);
}
static StringRepresentation getConfigScopes() throws Exception {
StringRepresentation representation = null;
ZNRecord record = new ZNRecord("Config");
List<String> scopeList =
Arrays.asList(ConfigScopeProperty.CLUSTER.toString(),
ConfigScopeProperty.RESOURCE.toString(), ConfigScopeProperty.PARTICIPANT.toString(),
ConfigScopeProperty.PARTITION.toString());
record.setListField("scopes", scopeList);
representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(record),
MediaType.APPLICATION_JSON);
return representation;
}
StringRepresentation getConfigKeys(ConfigScopeProperty scopeProperty, String... keys)
throws Exception {
StringRepresentation representation = null;
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
HelixAdmin admin = setupTool.getClusterManagementTool();
ZNRecord record = new ZNRecord(scopeProperty + " Config");
HelixConfigScope scope = new HelixConfigScopeBuilder(scopeProperty, keys).build();
List<String> configKeys = admin.getConfigKeys(scope);
record.setListField(scopeProperty.toString(), configKeys);
representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(record),
MediaType.APPLICATION_JSON);
return representation;
}
StringRepresentation getConfigs(ConfigScopeProperty scopeProperty, String... keys)
throws Exception {
StringRepresentation representation = null;
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
HelixAdmin admin = setupTool.getClusterManagementTool();
ZNRecord record = new ZNRecord(scopeProperty + " Config");
HelixConfigScope scope = new HelixConfigScopeBuilder(scopeProperty, keys).build();
List<String> configKeys = admin.getConfigKeys(scope);
Map<String, String> configs = admin.getConfig(scope, configKeys);
record.setSimpleFields(configs);
representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(record),
MediaType.APPLICATION_JSON);
return representation;
}
/**
* Get scoped configs
* <p>
* Usage:
* <p>
* <li>Get cluster-level configs:
* <code>curl http://{host:port}/clusters/{clusterName}/configs/cluster
* <li>Get instance-level configs:
* <code>curl http://{host:port}/clusters/{clusterName}/configs/participant/{instanceName}
* <li>Get resource-level configs:
* <code>curl http://{host:port}/clusters/{clusterName}/configs/resource/{resourceName}
*/
@Override
public Representation get() {
StringRepresentation representation = null;
String clusterName = getValue("clusterName");
String scopeStr = getValue("scope");
try {
if (scopeStr == null) {
// path is "/clusters/{clusterName}/configs"
return getConfigScopes();
}
scopeStr = scopeStr.toUpperCase();
ConfigScopeProperty scopeProperty = ConfigScopeProperty.valueOf(scopeStr);
switch (scopeProperty) {
case CLUSTER:
case PARTICIPANT:
case RESOURCE:
String scopeKey1 = getValue("scopeKey1");
if (scopeKey1 == null) {
// path is "/clusters/{clusterName}/configs/cluster|participant|resource"
representation = getConfigKeys(scopeProperty, clusterName);
} else {
// path is "/clusters/{clusterName}/configs/cluster|participant|resource/
// {clusterName}|{participantName}|{resourceName}"
representation = getConfigs(scopeProperty, clusterName, scopeKey1);
}
break;
case PARTITION:
scopeKey1 = getValue("scopeKey1");
String scopeKey2 = getValue("scopeKey2");
if (scopeKey1 == null) {
// path is "/clusters/{clusterName}/configs/partition"
throw new HelixException("Missing resourceName");
} else if (scopeKey2 == null) {
// path is "/clusters/{clusterName}/configs/partition/resourceName"
representation = getConfigKeys(scopeProperty, clusterName, scopeKey1);
} else {
// path is
// "/clusters/{clusterName}/configs/partition/resourceName/partitionName"
representation = getConfigs(scopeProperty, clusterName, scopeKey1, scopeKey2);
}
break;
default:
break;
}
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
representation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("", e);
}
return representation;
}
/**
* set or remove configs depends on "command" field of jsonParameters in POST body
* @param entity
* @param type
* @param scopeArgs
* @throws Exception
*/
void setConfigs(Representation entity, ConfigScopeProperty type, String scopeArgs)
throws Exception {
JsonParameters jsonParameters = new JsonParameters(entity);
String command = jsonParameters.getCommand();
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
if (command.equalsIgnoreCase(ClusterSetup.setConfig)) {
jsonParameters.verifyCommand(ClusterSetup.setConfig);
String propertiesStr = jsonParameters.getParameter(JsonParameters.CONFIGS);
setupTool.setConfig(type, scopeArgs, propertiesStr);
} else if (command.equalsIgnoreCase(ClusterSetup.removeConfig)) {
jsonParameters.verifyCommand(ClusterSetup.removeConfig);
String propertiesStr = jsonParameters.getParameter(JsonParameters.CONFIGS);
setupTool.removeConfig(type, scopeArgs, propertiesStr);
} else {
throw new HelixException("Unsupported command: " + command + ". Should be one of ["
+ ClusterSetup.setConfig + ", " + ClusterSetup.removeConfig + "]");
}
getResponse().setEntity(get());
getResponse().setStatus(Status.SUCCESS_OK);
}
/**
* Set/remove scoped configs
* <p>
* Usage:
* <p>
* <li>Set cluster level configs:
* <code>curl -d 'jsonParameters={"command":"setConfig","configs":"{key1=value1,key2=value2}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{clusterName}/configs/cluster
* <li>Remove cluster level configs:
* <code>curl -d 'jsonParameters={"command":"removeConfig","configs":"{key1,key2}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{clusterName}/configs/cluster
* <li>Set instance level configs:
* <code>curl -d 'jsonParameters={"command":"setConfig","configs":"{key1=value1,key2=value2}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{clusterName}/configs/participant/{instanceName}
* <li>Remove instance level configs:
* <code>curl -d 'jsonParameters={"command":"removeConfig","configs":"{key1,key2}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{clusterName}/configs/participant/{instanceName}
* <li>Set resource level configs:
* <code>curl -d 'jsonParameters={"command":"setConfig","configs":"{key1=value1,key2=value2}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{clusterName}/configs/resource/{resourceName}
* <li>Remove resource level configs:
* <code>curl -d 'jsonParameters={"command":"removeConfig","configs":"{key1,key2}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{clusterName}/configs/resource/{resourceName}
*/
@Override
public Representation post(Representation entity) {
String clusterName = getValue("clusterName");
String scopeStr = getValue("scope").toUpperCase();
try {
ConfigScopeProperty scopeProperty = ConfigScopeProperty.valueOf(scopeStr);
switch (scopeProperty) {
case CLUSTER:
String scopeArgs = clusterName;
setConfigs(entity, scopeProperty, scopeArgs);
break;
case PARTICIPANT:
case RESOURCE:
String scopeKey1 = getValue("scopeKey1");
if (scopeKey1 == null) {
throw new HelixException("Missing resourceName|participantName");
} else {
scopeArgs = clusterName + "," + scopeKey1;
setConfigs(entity, scopeProperty, scopeArgs);
}
break;
case PARTITION:
scopeKey1 = getValue("scopeKey1");
String scopeKey2 = getValue("scopeKey2");
if (scopeKey1 == null || scopeKey2 == null) {
throw new HelixException("Missing resourceName|partitionName");
} else {
scopeArgs = clusterName + "," + scopeKey1 + "," + scopeKey2;
setConfigs(entity, scopeProperty, scopeArgs);
}
break;
default:
break;
}
} catch (Exception e) {
LOG.error("Error in posting " + entity, e);
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
}
return null;
}
}
| 9,193 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/InstanceResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class InstanceResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(InstanceResource.class);
public InstanceResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
presentation = getInstanceRepresentation();
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("Exception in get instance", e);
}
return presentation;
}
StringRepresentation getInstanceRepresentation()
throws JsonGenerationException, JsonMappingException, IOException {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String instanceName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.INSTANCE_NAME);
Builder keyBuilder = new PropertyKey.Builder(clusterName);
ZkClient zkclient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.RAW_ZKCLIENT);
String instanceCfgStr =
ResourceUtil.readZkAsBytes(zkclient, keyBuilder.instanceConfig(instanceName));
StringRepresentation representation =
new StringRepresentation(instanceCfgStr, MediaType.APPLICATION_JSON);
return representation;
}
@Override
public Representation post(Representation entity) {
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String instanceName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.INSTANCE_NAME);
ZkClient zkclient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkclient);
JsonParameters jsonParameters = new JsonParameters(entity);
String command = jsonParameters.getCommand();
if (command.equalsIgnoreCase(ClusterSetup.enableInstance)) {
jsonParameters.verifyCommand(ClusterSetup.enableInstance);
boolean enabled = Boolean.parseBoolean(jsonParameters.getParameter(JsonParameters.ENABLED));
setupTool.getClusterManagementTool().enableInstance(clusterName, instanceName, enabled);
} else if (command.equalsIgnoreCase(ClusterSetup.enablePartition)) {
jsonParameters.verifyCommand(ClusterSetup.enablePartition);
boolean enabled = Boolean.parseBoolean(jsonParameters.getParameter(JsonParameters.ENABLED));
String[] partitions = jsonParameters.getParameter(JsonParameters.PARTITION).split(";");
String resource = jsonParameters.getParameter(JsonParameters.RESOURCE);
setupTool.getClusterManagementTool().enablePartition(enabled, clusterName, instanceName,
resource, Arrays.asList(partitions));
} else if (command.equalsIgnoreCase(ClusterSetup.resetPartition)) {
jsonParameters.verifyCommand(ClusterSetup.resetPartition);
String resource = jsonParameters.getParameter(JsonParameters.RESOURCE);
String[] partitionNames =
jsonParameters.getParameter(JsonParameters.PARTITION).split("\\s+");
setupTool.getClusterManagementTool().resetPartition(clusterName, instanceName, resource,
Arrays.asList(partitionNames));
} else if (command.equalsIgnoreCase(ClusterSetup.resetInstance)) {
jsonParameters.verifyCommand(ClusterSetup.resetInstance);
setupTool.getClusterManagementTool()
.resetInstance(clusterName, Arrays.asList(instanceName));
} else if (command.equalsIgnoreCase(ClusterSetup.addInstanceTag)) {
jsonParameters.verifyCommand(ClusterSetup.addInstanceTag);
String tag = jsonParameters.getParameter(ClusterSetup.instanceGroupTag);
setupTool.getClusterManagementTool().addInstanceTag(clusterName, instanceName, tag);
} else if (command.equalsIgnoreCase(ClusterSetup.removeInstanceTag)) {
jsonParameters.verifyCommand(ClusterSetup.removeInstanceTag);
String tag = jsonParameters.getParameter(ClusterSetup.instanceGroupTag);
setupTool.getClusterManagementTool().removeInstanceTag(clusterName, instanceName, tag);
} else {
throw new HelixException("Unsupported command: " + command + ". Should be one of ["
+ ClusterSetup.enableInstance + ", " + ClusterSetup.enablePartition + ", "
+ ClusterSetup.resetInstance + "]");
}
getResponse().setEntity(getInstanceRepresentation());
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("Exception in post instance", e);
}
return null;
}
@Override
public Representation delete() {
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String instanceName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.INSTANCE_NAME);
ZkClient zkclient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkclient);
setupTool.dropInstanceFromCluster(clusterName, instanceName);
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("Error in delete instance", e);
}
return null;
}
}
| 9,194 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/JobQueueResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskUtil;
import org.apache.helix.task.Workflow;
import org.apache.helix.task.WorkflowContext;
import org.restlet.data.Form;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for server-side resource at <code>"/clusters/{clusterName}/jobQueues/{jobQueue}"
* <p>
* <li>GET list job queue info
* <li>POST start a new job in a job queue, or stop/resume/persistDataChanges/delete a job queue
*/
public class JobQueueResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(JobQueueResource.class);
public JobQueueResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
/**
* List job queue info
* <p>
* Usage: <code>curl http://{host:port}/clusters/{clusterName}/jobQueues/{jobQueue}
*/
@Override
public Representation get() {
StringRepresentation presentation;
try {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String jobQueueName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.JOB_QUEUE);
presentation = getHostedEntitiesRepresentation(clusterName, jobQueueName);
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("Fail to get job queue", e);
}
return presentation;
}
StringRepresentation getHostedEntitiesRepresentation(String clusterName, String jobQueueName)
throws Exception {
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
HelixDataAccessor accessor =
ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
TaskDriver taskDriver = new TaskDriver(zkClient, clusterName);
// Get job queue config
// TODO: fix this to use workflowConfig.
ResourceConfig jobQueueConfig = accessor.getProperty(keyBuilder.resourceConfig(jobQueueName));
// Get job queue context
WorkflowContext ctx = taskDriver.getWorkflowContext(jobQueueName);
// Create the result
ZNRecord hostedEntitiesRecord = new ZNRecord(jobQueueName);
if (jobQueueConfig != null) {
hostedEntitiesRecord.merge(jobQueueConfig.getRecord());
}
if (ctx != null) {
hostedEntitiesRecord.merge(ctx.getRecord());
}
StringRepresentation representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(hostedEntitiesRecord),
MediaType.APPLICATION_JSON);
return representation;
}
/**
* Start a new job in a job queue, or stop/resume/persistDataChanges/delete a job queue
* <p>
* Usage:
* <p>
* <li>Start a new job in a job queue:
* <code>curl -d @'./{input.txt}' -H 'Content-Type: application/json'
* http://{host:port}/clusters/{clusterName}/jobQueues/{jobQueue}
* <p>
* input.txt: <code>jsonParameters={"command":"start"}&newJob={newJobConfig.yaml}
* <p>
* For newJobConfig.yaml, see {@link Workflow#parse(String)}
* <li>Stop/resume/persistDataChanges/delete a job queue:
* <code>curl -d 'jsonParameters={"command":"{stop/resume/persistDataChanges/delete}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{clusterName}/jobQueues/{jobQueue}
*/
@Override
public Representation post(Representation entity) {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String jobQueueName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.JOB_QUEUE);
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
try {
TaskDriver driver = new TaskDriver(zkClient, clusterName);
Form form = new Form(entity);
JsonParameters jsonParameters = new JsonParameters(form);
TaskDriver.DriverCommand cmd = TaskDriver.DriverCommand.valueOf(jsonParameters.getCommand());
switch (cmd) {
case start: {
// Get the job queue and submit it
String yamlPayload =
ResourceUtil.getYamlParameters(form, ResourceUtil.YamlParamKey.NEW_JOB);
if (yamlPayload == null) {
throw new HelixException("Yaml job config is required!");
}
Workflow workflow = Workflow.parse(yamlPayload);
for (String jobName : workflow.getJobConfigs().keySet()) {
Map<String, String> jobCfgMap = workflow.getJobConfigs().get(jobName);
JobConfig.Builder jobCfgBuilder = JobConfig.Builder.fromMap(jobCfgMap);
if (workflow.getTaskConfigs() != null && workflow.getTaskConfigs().containsKey(jobName)) {
jobCfgBuilder.addTaskConfigs(workflow.getTaskConfigs().get(jobName));
}
driver.enqueueJob(jobQueueName, TaskUtil.getDenamespacedJobName(jobQueueName, jobName),
jobCfgBuilder);
}
break;
}
case stop: {
driver.stop(jobQueueName);
break;
}
case resume: {
driver.resume(jobQueueName);
break;
}
case flush: {
driver.flushQueue(jobQueueName);
break;
}
case delete: {
driver.delete(jobQueueName);
break;
}
case clean: {
driver.cleanupQueue(jobQueueName);
break;
}
default:
throw new HelixException("Unsupported job queue command: " + cmd);
}
getResponse().setEntity(getHostedEntitiesRepresentation(clusterName, jobQueueName));
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("Error in posting job queue: " + entity, e);
}
return null;
}
}
| 9,195 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/JobResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskUtil;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for server-side resource at <code>"/clusters/{clusterName}/jobQueues/{jobQueue}/{job}"
* <p>
* <li>GET list job info
*/
public class JobResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(JobResource.class);
public JobResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
/**
* List job info
* <p>
* Usage: <code>curl http://{host:port}/clusters/{clusterName}/jobQueues/{jobQueue}/{job}
*/
@Override
public Representation get() {
StringRepresentation presentation;
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String jobQueueName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.JOB_QUEUE);
String jobName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.JOB);
try {
presentation = getHostedEntitiesRepresentation(clusterName, jobQueueName, jobName);
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("Fail to get job: " + jobName, e);
}
return presentation;
}
@Override
public Representation delete() {
StringRepresentation representation = null;
String clusterName = ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String jobQueueName = ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.JOB_QUEUE);
String jobName = ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.JOB);
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
TaskDriver driver = new TaskDriver(zkClient, clusterName);
try {
driver.deleteJob(jobQueueName, jobName);
getResponse().setStatus(Status.SUCCESS_NO_CONTENT);
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
representation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("Fail to delete job: " + jobName, e);
}
return representation;
}
StringRepresentation getHostedEntitiesRepresentation(String clusterName, String jobQueueName,
String jobName) throws Exception {
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
HelixDataAccessor accessor =
ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
// Get job queue config
String namespacedJobName = TaskUtil.getNamespacedJobName(jobQueueName, jobName);
HelixProperty jobConfig = accessor.getProperty(keyBuilder.resourceConfig(namespacedJobName));
TaskDriver taskDriver = new TaskDriver(zkClient, clusterName);
// Get job queue context
JobContext ctx = taskDriver.getJobContext(namespacedJobName);
// Create the result
ZNRecord hostedEntitiesRecord = new ZNRecord(namespacedJobName);
if (jobConfig != null) {
hostedEntitiesRecord.merge(jobConfig.getRecord());
}
if (ctx != null) {
hostedEntitiesRecord.merge(ctx.getRecord());
}
StringRepresentation representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(hostedEntitiesRecord),
MediaType.APPLICATION_JSON);
return representation;
}
}
| 9,196 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdatesResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.PropertyType;
import org.apache.helix.webapp.RestAdminApplication;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.MediaType;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StatusUpdatesResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(StatusUpdatesResource.class);
public StatusUpdatesResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
String clusterName = (String) getRequest().getAttributes().get("clusterName");
String instanceName = (String) getRequest().getAttributes().get("instanceName");
presentation = getInstanceErrorsRepresentation(clusterName, instanceName);
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("", e);
}
return presentation;
}
StringRepresentation getInstanceErrorsRepresentation(String clusterName, String instanceName)
throws JsonGenerationException, JsonMappingException, IOException {
ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
String instanceSessionId =
ClusterRepresentationUtil.getInstanceSessionId(zkClient, clusterName, instanceName);
String message =
ClusterRepresentationUtil
.getInstancePropertyNameListAsString(zkClient, clusterName, instanceName,
PropertyType.CURRENTSTATES, instanceSessionId, MediaType.APPLICATION_JSON);
StringRepresentation representation =
new StringRepresentation(message, MediaType.APPLICATION_JSON);
return representation;
}
}
| 9,197 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ConstraintResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import org.apache.helix.HelixAdmin;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZKHelixAdmin;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.model.ClusterConstraints.ConstraintType;
import org.apache.helix.tools.ClusterSetup;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for server-side resource at <code>"/clusters/{clusterName}/constraints/{constraintType}"
* <p>
* <li>GET list all constraints
* <li>POST set constraints
* <li>DELETE remove constraints
*/
public class ConstraintResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(ConstraintResource.class);
public ConstraintResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
/**
* List all constraints
* <p>
* Usage: <code>curl http://{host:port}/clusters/{clusterName}/constraints/MESSAGE_CONSTRAINT
*/
@Override
public Representation get() {
StringRepresentation representation = null;
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String constraintTypeStr =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CONSTRAINT_TYPE);
String constraintId =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CONSTRAINT_ID);
try {
ConstraintType constraintType = ConstraintType.valueOf(constraintTypeStr);
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
HelixAdmin admin = new ZKHelixAdmin(zkClient);
ZNRecord record = admin.getConstraints(clusterName, constraintType).getRecord();
if (constraintId == null) {
// get all message constraints
representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(record),
MediaType.APPLICATION_JSON);
} else {
// get a specific constraint
Map<String, String> constraint = record.getMapField(constraintId);
if (constraint == null) {
representation =
new StringRepresentation("No constraint of type: " + constraintType
+ " associated with id: " + constraintId, MediaType.APPLICATION_JSON);
} else {
ZNRecord subRecord = new ZNRecord(record.getId());
subRecord.setMapField(constraintId, constraint);
representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(subRecord),
MediaType.APPLICATION_JSON);
}
}
} catch (IllegalArgumentException e) {
representation =
new StringRepresentation("constraint-type: " + constraintTypeStr + " not recognized.",
MediaType.APPLICATION_JSON);
} catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
representation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("Exception get constraints", e);
}
return representation;
}
/**
* Set constraints
* <p>
* Usage:
* <code>curl -d 'jsonParameters={"constraintAttributes":"RESOURCE={resource},CONSTRAINT_VALUE={1}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{cluster}/constraints/MESSAGE_CONSTRAINT/{constraintId}
*/
@Override
public Representation post(Representation entity) {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String constraintTypeStr =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CONSTRAINT_TYPE);
String constraintId =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CONSTRAINT_ID);
try {
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
JsonParameters jsonParameters = new JsonParameters(entity);
String constraintAttrStr = jsonParameters.getParameter(JsonParameters.CONSTRAINT_ATTRIBUTES);
setupTool.setConstraint(clusterName, constraintTypeStr, constraintId, constraintAttrStr);
} catch (Exception e) {
LOG.error("Error in posting " + entity, e);
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
}
return null;
}
/**
* Remove constraints
* <p>
* Usage:
* <code>curl -X DELETE http://{host:port}/clusters/{cluster}/constraints/MESSAGE_CONSTRAINT/{constraintId}
*/
@Override
public Representation delete() {
String clusterName =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String constraintTypeStr =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CONSTRAINT_TYPE);
String constraintId =
ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CONSTRAINT_ID);
try {
ZkClient zkClient =
ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
setupTool.removeConstraint(clusterName, constraintTypeStr, constraintId);
} catch (Exception e) {
LOG.error("Error in delete constraint", e);
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
}
return null;
}
}
| 9,198 |
0 |
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp
|
Create_ds/helix/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StateModelsResource.java
|
package org.apache.helix.webapp.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.List;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.webapp.RestAdminApplication;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.restlet.data.MediaType;
import org.restlet.data.Status;
import org.restlet.representation.Representation;
import org.restlet.representation.StringRepresentation;
import org.restlet.representation.Variant;
import org.restlet.resource.ServerResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StateModelsResource extends ServerResource {
private final static Logger LOG = LoggerFactory.getLogger(StateModelsResource.class);
public StateModelsResource() {
getVariants().add(new Variant(MediaType.TEXT_PLAIN));
getVariants().add(new Variant(MediaType.APPLICATION_JSON));
setNegotiated(false);
}
@Override
public Representation get() {
StringRepresentation presentation = null;
try {
presentation = getStateModelsRepresentation();
}
catch (Exception e) {
String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
LOG.error("", e);
}
return presentation;
}
StringRepresentation getStateModelsRepresentation() throws JsonGenerationException,
JsonMappingException, IOException {
String clusterName = (String) getRequest().getAttributes().get("clusterName");
ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
List<String> models = setupTool.getClusterManagementTool().getStateModelDefs(clusterName);
ZNRecord modelDefinitions = new ZNRecord("modelDefinitions");
modelDefinitions.setListField("models", models);
StringRepresentation representation =
new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(modelDefinitions),
MediaType.APPLICATION_JSON);
return representation;
}
@Override
public Representation post(Representation entity) {
try {
String clusterName = (String) getRequest().getAttributes().get("clusterName");
ZkClient zkClient =
(ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
;
JsonParameters jsonParameters = new JsonParameters(entity);
String command = jsonParameters.getCommand();
if (command.equalsIgnoreCase(ClusterSetup.addStateModelDef)) {
ZNRecord newStateModel =
jsonParameters.getExtraParameter(JsonParameters.NEW_STATE_MODEL_DEF);
HelixDataAccessor accessor =
ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
accessor.setProperty(accessor.keyBuilder().stateModelDef(newStateModel.getId()),
new StateModelDefinition(newStateModel));
getResponse().setEntity(getStateModelsRepresentation());
} else {
throw new HelixException("Unsupported command: " + command + ". Should be one of ["
+ ClusterSetup.addStateModelDef + "]");
}
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("Error in posting " + entity, e);
}
return null;
}
}
| 9,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.