index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/NoneFeatureConfig.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; import com.amazonaws.services.neptune.propertygraph.Label; public class NoneFeatureConfig { private final Label label; private final String property; public NoneFeatureConfig(Label label, String property) { this.label = label; this.property = property; } public Label label() { return label; } public String property() { return property; } @Override public String toString() { return "NoneFeatureConfig{" + "label=" + label + ", property='" + property + '\'' + '}'; } }
1,100
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/ElementConfig.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; import com.amazonaws.services.neptune.profiles.neptune_ml.common.config.Word2VecConfig; import com.amazonaws.services.neptune.propertygraph.Label; import java.util.Collection; import java.util.Collections; import java.util.stream.Collectors; public class ElementConfig { public static final ElementConfig EMPTY_CONFIG = new ElementConfig( Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); private final Collection<LabelConfigV2> classLabels; private final Collection<NoneFeatureConfig> noneFeatures; private final Collection<TfIdfConfigV2> tfIdfFeatures; private final Collection<DatetimeConfigV2> datetimeFeatures; private final Collection<Word2VecConfig> word2VecFeatures; private final Collection<FastTextConfig> fastTextFeatures; private final Collection<SbertConfig> sbertFeatures; private final Collection<NumericalBucketFeatureConfigV2> numericalBucketFeatures; private final Collection<FeatureOverrideConfigV2> featureOverrides; public ElementConfig(Collection<LabelConfigV2> classLabels, Collection<NoneFeatureConfig> noneFeatures, Collection<TfIdfConfigV2> tfIdfFeatures, Collection<DatetimeConfigV2> datetimeFeatures, Collection<Word2VecConfig> word2VecFeatures, Collection<FastTextConfig> fastTextFeatures, Collection<SbertConfig> sbertFeatures, Collection<NumericalBucketFeatureConfigV2> numericalBucketFeatures, Collection<FeatureOverrideConfigV2> featureOverrides) { this.classLabels = classLabels; this.noneFeatures = noneFeatures; this.tfIdfFeatures = tfIdfFeatures; this.datetimeFeatures = datetimeFeatures; this.word2VecFeatures = word2VecFeatures; this.fastTextFeatures = fastTextFeatures; this.sbertFeatures = sbertFeatures; this.numericalBucketFeatures = numericalBucketFeatures; this.featureOverrides = featureOverrides; } public boolean allowAutoInferFeature(Label label, String property){ if (hasClassificationSpecificationForProperty(label, property)) { return false; } if (hasNoneFeatureSpecification(label, property)){ return false; } if (hasTfIdfSpecification(label, property)){ return false; } if (hasDatetimeSpecification(label, property)){ return false; } if (hasWord2VecSpecification(label, property)){ return false; } if (hasFastTextSpecification(label, property)){ return false; } if (hasSbertSpecification(label, property)){ return false; } if (hasNumericalBucketSpecification(label, property)){ return false; } if (hasFeatureOverrideForProperty(label, property)){ return false; } return true; } public boolean hasClassificationSpecificationsFor(Label label) { return !getClassificationSpecifications(label).isEmpty(); } public Collection<LabelConfigV2> getClassificationSpecifications(Label label) { return classLabels.stream().filter(c -> c.label().equals(label)).collect(Collectors.toList()); } public boolean hasClassificationSpecificationForProperty(Label label, String property) { return getClassificationSpecifications(label).stream().anyMatch(s -> s.property().equals(property)); } public Collection<LabelConfigV2> getAllClassificationSpecifications(){ return classLabels; } public boolean hasNoneFeatureSpecification(Label label, String property) { return getNoneFeatureSpecification(label, property) != null; } public NoneFeatureConfig getNoneFeatureSpecification(Label label, String property) { return noneFeatures.stream() .filter(config -> config.label().equals(label) && config.property().equals(property)) .findFirst() .orElse(null); } public boolean hasTfIdfSpecification(Label label, String property) { return getTfIdfSpecification(label, property) != null; } public TfIdfConfigV2 getTfIdfSpecification(Label label, String property) { return tfIdfFeatures.stream() .filter(config -> config.label().equals(label) && config.property().equals(property)) .findFirst() .orElse(null); } public boolean hasDatetimeSpecification(Label label, String property) { return getDatetimeSpecification(label, property) != null; } public DatetimeConfigV2 getDatetimeSpecification(Label label, String property) { return datetimeFeatures.stream() .filter(config -> config.label().equals(label) && config.property().equals(property)) .findFirst() .orElse(null); } public boolean hasWord2VecSpecification(Label label, String property) { return getWord2VecSpecification(label, property) != null; } public Word2VecConfig getWord2VecSpecification(Label label, String property) { return word2VecFeatures.stream() .filter(config -> config.label().equals(label) && config.property().equals(property)) .findFirst() .orElse(null); } public boolean hasFastTextSpecification(Label label, String property) { return getFastTextSpecification(label, property) != null; } public FastTextConfig getFastTextSpecification(Label label, String property) { return fastTextFeatures.stream() .filter(config -> config.label().equals(label) && config.property().equals(property)) .findFirst() .orElse(null); } public boolean hasSbertSpecification(Label label, String property) { return getSbertSpecification(label, property) != null; } public SbertConfig getSbertSpecification(Label label, String property) { return sbertFeatures.stream() .filter(config -> config.label().equals(label) && config.property().equals(property)) .findFirst() .orElse(null); } public boolean hasNumericalBucketSpecification(Label label, String property) { return getNumericalBucketSpecification(label, property) != null; } public NumericalBucketFeatureConfigV2 getNumericalBucketSpecification(Label label, String property) { return numericalBucketFeatures.stream() .filter(config -> config.label().equals(label) && config.property().equals(property)) .findFirst() .orElse(null); } public boolean hasFeatureOverrideForProperty(Label label, String property) { return featureOverrides.stream() .anyMatch(override -> override.label().equals(label) && override.properties().contains(property)); } public Collection<FeatureOverrideConfigV2> getFeatureOverrides(Label label) { return featureOverrides.stream() .filter(c -> c.label().equals(label)) .collect(Collectors.toList()); } public FeatureOverrideConfigV2 getFeatureOverride(Label label, String property) { return featureOverrides.stream() .filter(config -> config.label().equals(label) && config.properties().contains(property)) .findFirst() .orElse(null); } @Override public String toString() { return "ElementConfig{" + "classLabels=" + classLabels + ", tfIdfFeatures=" + tfIdfFeatures + ", datetimeFeatures=" + datetimeFeatures + ", word2VecFeatures=" + word2VecFeatures + ", numericalBucketFeatures=" + numericalBucketFeatures + ", featureOverrides=" + featureOverrides + '}'; } }
1,101
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/SupportedFastTextLanguages.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; public enum SupportedFastTextLanguages { en, zh, hi, es, fr }
1,102
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/NodeTaskTypeV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; public enum NodeTaskTypeV2 { classification, regression }
1,103
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/SbertConfig.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; import com.amazonaws.services.neptune.propertygraph.Label; public class SbertConfig { private final Label label; private final String property; private final String name; public SbertConfig(Label label, String property, String name) { this.label = label; this.property = property; this.name = name; } public Label label() { return label; } public String property() { return property; } public String name() { return name; } @Override public String toString() { return "SbertConfig{" + "label=" + label + ", property='" + property + '\'' + ", name='" + name + '\'' + '}'; } }
1,104
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/EdgeTaskTypeV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.propertygraph.Label; import org.apache.commons.lang.StringUtils; public enum EdgeTaskTypeV2 { classification, regression, link_prediction { @Override public void validate(String property, Label label){ // Do nothing } }; public void validate(String property, Label label) { ParsingContext context = new ParsingContext(String.format("edge %s specification", name())).withLabel(label); if (StringUtils.isEmpty(property)){ throw new IllegalArgumentException(String.format("Missing or empty 'property' field for %s.", context)); } } }
1,105
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/TaskTypeV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; public enum TaskTypeV2 { link_prediction, node_classification, node_regression, edge_classification, edge_regression }
1,106
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/FastTextConfig.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; import com.amazonaws.services.neptune.propertygraph.Label; import java.util.Optional; public class FastTextConfig { private final Label label; private final String property; private final String language; private final Integer maxLength; public FastTextConfig(Label label, String property, String language, Integer maxLength) { this.label = label; this.property = property; this.language = language; this.maxLength = maxLength; } public Label label() { return label; } public String property() { return property; } public String language() { return language; } public Optional<Integer> maxLength() { return Optional.ofNullable(maxLength); } @Override public String toString() { return "FastTextConfig{" + "label=" + label + ", property='" + property + '\'' + ", language=" + language + ", maxLength=" + maxLength + '}'; } }
1,107
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/config/FeatureEncodingFlag.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.config; public enum FeatureEncodingFlag { none, auto }
1,108
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseFastTextLanguage.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.SupportedFastTextLanguages; import com.fasterxml.jackson.databind.JsonNode; import java.util.Arrays; public class ParseFastTextLanguage { private final JsonNode json; private final ParsingContext context; public ParseFastTextLanguage(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public String parseLanguage() { if (json.has("language")) { if (json.get("language").isTextual()) { return json.get("language").textValue(); } } throw ErrorMessageHelper.errorParsingField( "language", context, "one of the following values: " + ErrorMessageHelper.quoteList(Arrays.asList(SupportedFastTextLanguages.values()))); } }
1,109
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseMaxFeaturesV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.fasterxml.jackson.databind.JsonNode; public class ParseMaxFeaturesV2 { private final JsonNode json; private final ParsingContext context; public ParseMaxFeaturesV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public int parseMaxFeatures() { if (json.has("max_features") && json.path("max_features").isInt()) { return json.path("max_features").asInt(); } else { throw ErrorMessageHelper.errorParsingField("max_features", context, "an integer value"); } } }
1,110
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseSbertTypeName.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.FeatureTypeV2; import com.fasterxml.jackson.databind.JsonNode; import java.util.Arrays; public class ParseSbertTypeName { private final JsonNode json; private final ParsingContext context; public ParseSbertTypeName(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public String parseTypeName() { if (json.has("type") && json.get("type").isTextual()) { return json.get("type").textValue(); } throw ErrorMessageHelper.errorParsingField( "type", context, "one of the following values: " + ErrorMessageHelper.quoteList(Arrays.asList( FeatureTypeV2.text_sbert.name(), FeatureTypeV2.text_sbert128.name(), FeatureTypeV2.text_sbert512.name() ))); } }
1,111
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseLabelsV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.NeptuneMLSourceDataModel; import com.amazonaws.services.neptune.profiles.neptune_ml.common.config.Separator; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.*; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.EdgeTaskTypeV2; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.LabelConfigV2; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.RdfTaskTypeV2; import com.amazonaws.services.neptune.propertygraph.Label; import com.fasterxml.jackson.databind.JsonNode; import java.util.ArrayList; import java.util.Collection; public class ParseLabelsV2 { private final Collection<JsonNode> config; private final Collection<Double> defaultSplitRates; private final NeptuneMLSourceDataModel dataModel; public ParseLabelsV2(Collection<JsonNode> config, Collection<Double> defaultSplitRates, NeptuneMLSourceDataModel dataModel) { this.config = config; this.defaultSplitRates = defaultSplitRates; this.dataModel = dataModel; } public Collection<LabelConfigV2> parseNodeClassLabels() { Collection<LabelConfigV2> nodeClassLabels = new ArrayList<>(); for (JsonNode json : config) { if (dataModel.isRdfLinkPrediction(json)) { ParsingContext context = new ParsingContext("edge"); String subject = new ParseSubject(json, context).parseSubject(); String predicate = dataModel.parseProperty(json, context, null); String object = new ParseObject(json, context).parseObject(); Collection<Double> splitRates = new ParseSplitRate(json, defaultSplitRates, context).parseSplitRates(); nodeClassLabels.add(new LabelConfigV2(null, RdfTaskTypeV2.link_prediction.name(), predicate, subject, object, splitRates, null)); } else if (isNodeClass(json)) { ParsingContext context = new ParsingContext(String.format("node %s", dataModel.nodeTypeName().toLowerCase())); Label nodeType = new ParseNodeType(json, context).parseNodeType(); String property = dataModel.parseProperty(json, context, nodeType); ParsingContext propertyContext = context.withLabel(nodeType).withProperty(property); String taskType = dataModel.parseTaskType(json, propertyContext, nodeType, property); Separator separator = new ParseSeparator(json).parseSeparator(); Collection<Double> splitRates = new ParseSplitRate(json, defaultSplitRates, propertyContext).parseSplitRates(); nodeClassLabels.add(new LabelConfigV2(nodeType, taskType, property, null, null, splitRates, separator)); } } return nodeClassLabels; } private boolean isNodeClass(JsonNode json) { return json.has("node"); } private boolean isEdgeClass(JsonNode json) { return json.has("edge"); } public void validate() { for (JsonNode json : config) { if (!dataModel.isRdfLinkPrediction(json)) { if (!isNodeClass(json) && !isEdgeClass(json)) { throw new IllegalArgumentException(String.format("Illegal target element. Expected %s field.", dataModel.labelFields())); } } } } public Collection<LabelConfigV2> parseEdgeClassLabels() { Collection<LabelConfigV2> edgeClassLabels = new ArrayList<>(); for (JsonNode json : config) { if (isEdgeClass(json)) { ParsingContext context = new ParsingContext("edge label"); Label edgeType = new ParseEdgeType(json, context).parseEdgeType(); String property = new ParseProperty(json, context.withLabel(edgeType)).parseNullableSingleProperty(); ParsingContext propertyContext = context.withLabel(edgeType).withProperty(property); EdgeTaskTypeV2 taskType = new ParseEdgeTaskTypeV2(json, propertyContext).parseTaskType(); taskType.validate(property, edgeType); Separator separator = new ParseSeparator(json).parseSeparator(); Collection<Double> splitRates = new ParseSplitRate(json, defaultSplitRates, propertyContext).parseSplitRates(); edgeClassLabels.add(new LabelConfigV2(edgeType, taskType.name(), property, null, null, splitRates, separator)); } } return edgeClassLabels; } }
1,112
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseTaskTypeV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.TaskTypeV2; import com.fasterxml.jackson.databind.JsonNode; import java.util.ArrayList; import java.util.Collection; public class ParseTaskTypeV2 { private final JsonNode json; private final ParsingContext context; public ParseTaskTypeV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public TaskTypeV2 parseTaskType() { if (json.has("task_type")) { String taskType = json.get("task_type").textValue(); try { return TaskTypeV2.valueOf(taskType); } catch (IllegalArgumentException e) { throw ErrorMessageHelper.invalidFieldValue("task_type", taskType, context, taskTypeNames()); } } throw ErrorMessageHelper.errorParsingField("task_type", context, "one of the following values: " + ErrorMessageHelper.quoteList(taskTypeNames())); } private Collection<String> taskTypeNames() { Collection<String> results = new ArrayList<>(); for (TaskTypeV2 taskTypeV2 : TaskTypeV2.values()) { results.add(taskTypeV2.name()); } return results; } }
1,113
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseBucketCountV2.java
package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.fasterxml.jackson.databind.JsonNode; public class ParseBucketCountV2 { private final JsonNode json; private final ParsingContext context; public ParseBucketCountV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public int parseBucketCount() { if (json.has("bucket_cnt") && json.path("bucket_cnt").isInt()) { return json.path("bucket_cnt").asInt(); } else if (json.has("num_buckets") && json.path("num_buckets").isInt()) { return json.path("num_buckets").asInt(); } else { throw ErrorMessageHelper.errorParsingField("bucket_cnt", context, "an integer"); } } }
1,114
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseFeatureTypeV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.FeatureTypeV2; import com.fasterxml.jackson.databind.JsonNode; import java.util.Arrays; public class ParseFeatureTypeV2 { private final JsonNode json; private final ParsingContext context; public ParseFeatureTypeV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public FeatureTypeV2 parseFeatureType() { if (json.has("type") && json.get("type").isTextual()) { String type = json.get("type").textValue(); if (type.equals("numerical") || type.equals("category") || type.equals("auto") || type.equals("none")) { return FeatureTypeV2.valueOf(type); } else { throw ErrorMessageHelper.invalidFieldValue("type", type, context, Arrays.asList("numerical", "category", "auto", "none")); } } throw ErrorMessageHelper.errorParsingField("type", context, "one of the following values: 'numerical', 'category', 'auto', 'none'"); } }
1,115
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseMinDfV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.fasterxml.jackson.databind.JsonNode; public class ParseMinDfV2 { private final JsonNode json; private final ParsingContext context; public ParseMinDfV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public int parseMinDf() { if (json.has("min_df") && json.path("min_df").isInt()) { return json.path("min_df").asInt(); } else { throw ErrorMessageHelper.errorParsingField("min_df", context, "an integer value"); } } }
1,116
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseDatetimePartsV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.DatetimePartV2; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; public class ParseDatetimePartsV2 { private final JsonNode json; private final ParsingContext context; public ParseDatetimePartsV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public Collection<DatetimePartV2> parseDatetimeParts() { if (json.has("datetime_parts")) { if (json.get("datetime_parts").isArray()) { ArrayNode datetimeParts = (ArrayNode) json.get("datetime_parts"); Collection<DatetimePartV2> results = new ArrayList<>(); for (JsonNode datetimePart : datetimeParts) { String value = datetimePart.textValue(); try { results.add(DatetimePartV2.valueOf(value)); } catch (IllegalArgumentException e) { throw ErrorMessageHelper.invalidFieldValue("datetime_parts", value, context, datetimePartNames()); } } return results.isEmpty() ? Arrays.asList(DatetimePartV2.values()) : results; } else { throw ErrorMessageHelper.errorParsingField("datetime_parts", context, "an array value"); } } else { return Arrays.asList(DatetimePartV2.values()); } } private Collection<String> datetimePartNames() { Collection<String> results = new ArrayList<>(); for (DatetimePartV2 value : DatetimePartV2.values()) { results.add(value.name()); } return results; } }
1,117
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseRdfTaskType.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.RdfTaskTypeV2; import com.fasterxml.jackson.databind.JsonNode; import java.util.Arrays; public class ParseRdfTaskType { private final JsonNode json; private final ParsingContext context; public ParseRdfTaskType(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public RdfTaskTypeV2 parseTaskType() { if (json.has("type") && json.get("type").isTextual()) { String type = json.get("type").textValue(); try { return RdfTaskTypeV2.valueOf(type); } catch (IllegalArgumentException e) { throw ErrorMessageHelper.invalidFieldValue( "type", type, context, ErrorMessageHelper.enumNames(Arrays.asList(RdfTaskTypeV2.values()))); } } throw ErrorMessageHelper.errorParsingField( "type", context, "one of the following values: " + ErrorMessageHelper.quoteList(Arrays.asList(RdfTaskTypeV2.values()))); } }
1,118
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseFeaturesV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.config.*; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.*; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.*; import com.amazonaws.services.neptune.propertygraph.Label; import com.fasterxml.jackson.databind.JsonNode; import java.util.ArrayList; import java.util.Collection; public class ParseFeaturesV2 { private final Collection<JsonNode> features; public ParseFeaturesV2(Collection<JsonNode> features) { this.features = features; } public interface ElementFeatureFilter { boolean isCorrectType(JsonNode json); } public interface LabelSupplier { Label getLabel(JsonNode json, ParsingContext context); } public static ElementFeatureFilter NodeFeatureFilter = json -> json.has("node") && json.has("type"); public static ElementFeatureFilter EdgeFeatureFilter = json -> json.has("edge") && json.has("type"); public static LabelSupplier NodeLabelSupplier = (json, context) -> new ParseNodeType(json, context).parseNodeType(); public static LabelSupplier EdgeLabelSupplier = (json, context) -> new ParseEdgeType(json, context).parseEdgeType(); public void validate() { for (JsonNode feature : features) { if (!isNoneFeature(feature) && !isTfIdfFeature(feature) && !isDatetimeFeature(feature) && !isAutoFeature(feature) && !isWord2VecFeature(feature) && !isFastTextFeature(feature) && !isSbertTextFeature(feature) && !isNumericalBucketFeature(feature) && !isNodeFeatureOverride(feature) && !isEdgeFeatureOverride(feature)) { if (feature.has("type")) { String featureType = feature.get("type").textValue(); throw new IllegalArgumentException( String.format("Illegal feature type: '%s'. Supported values are: %s.", featureType, ErrorMessageHelper.quoteList(FeatureTypeV2.publicFormattedNames()))); } } } } public Collection<NoneFeatureConfig> parseNoneFeatures(ElementFeatureFilter filter, LabelSupplier supplier) { Collection<NoneFeatureConfig> noneFeatures = new ArrayList<>(); for (JsonNode json : features) { if (filter.isCorrectType(json) && isNoneFeature(json)) { ParsingContext context = new ParsingContext(FeatureTypeV2.none.name() + " feature"); Label label = supplier.getLabel(json, context); String property = new ParseProperty(json, context.withLabel(label)).parseSingleProperty(); NoneFeatureConfig config = new NoneFeatureConfig(label, property); noneFeatures.add(config); } } return noneFeatures; } public Collection<TfIdfConfigV2> parseTfIdfFeatures(ElementFeatureFilter filter, LabelSupplier supplier) { Collection<TfIdfConfigV2> tfIdfFeatures = new ArrayList<>(); for (JsonNode json : features) { if (filter.isCorrectType(json) && isTfIdfFeature(json)) { ParsingContext context = new ParsingContext(FeatureTypeV2.text_tfidf.name() + " feature"); Label label = supplier.getLabel(json, context); String property = new ParseProperty(json, context.withLabel(label)).parseSingleProperty(); ParsingContext propertyContext = context.withLabel(label).withProperty(property); Range ngramRange = new ParseRange(json, "ngram_range", propertyContext).parseRange(); int minDf = new ParseMinDfV2(json, propertyContext).parseMinDf(); int maxFeatures = new ParseMaxFeaturesV2(json, propertyContext).parseMaxFeatures(); TfIdfConfigV2 config = new TfIdfConfigV2(label, property, ngramRange, minDf, maxFeatures); tfIdfFeatures.add(config); } } return tfIdfFeatures; } public Collection<DatetimeConfigV2> parseDatetimeFeatures(ElementFeatureFilter filter, LabelSupplier supplier) { Collection<DatetimeConfigV2> datetimeFeatures = new ArrayList<>(); for (JsonNode json : features) { if (filter.isCorrectType(json) && isDatetimeFeature(json)) { ParsingContext context = new ParsingContext(FeatureTypeV2.datetime.name() + " feature"); Label label = supplier.getLabel(json, context); String property = new ParseProperty(json, context.withLabel(label)).parseSingleProperty(); Collection<DatetimePartV2> datetimeParts = new ParseDatetimePartsV2(json, context.withLabel(label).withProperty(property)).parseDatetimeParts(); DatetimeConfigV2 config = new DatetimeConfigV2(label, property, datetimeParts); datetimeFeatures.add(config); } } return datetimeFeatures; } public Collection<Word2VecConfig> parseWord2VecFeatures(ElementFeatureFilter filter, LabelSupplier supplier) { Collection<Word2VecConfig> word2VecFeatures = new ArrayList<>(); for (JsonNode json : features) { if (filter.isCorrectType(json) && isWord2VecFeature(json)) { ParsingContext context = new ParsingContext(FeatureTypeV2.text_word2vec.name() + " feature"); Label label = supplier.getLabel(json, context); String property = new ParseProperty(json, context.withLabel(label)).parseSingleProperty(); Collection<String> language = new ParseWord2VecLanguage(json).parseLanguage(); Word2VecConfig config = new Word2VecConfig(label, property, language); word2VecFeatures.add(config); } } return word2VecFeatures; } public Collection<SbertConfig> parseSbertFeatures(ElementFeatureFilter filter, LabelSupplier supplier) { Collection<SbertConfig> sbertConfigs = new ArrayList<>(); for (JsonNode json : features) { if (filter.isCorrectType(json) && isSbertTextFeature(json)) { ParsingContext context = new ParsingContext(FeatureTypeV2.text_sbert.name() + " feature"); Label label = supplier.getLabel(json, context); String property = new ParseProperty(json, context.withLabel(label)).parseSingleProperty(); String name = new ParseSbertTypeName(json, context).parseTypeName(); SbertConfig config = new SbertConfig(label, property, name); sbertConfigs.add(config); } } return sbertConfigs; } public Collection<FastTextConfig> parseFastTextFeatures(ElementFeatureFilter filter, LabelSupplier supplier) { Collection<FastTextConfig> fastTextFeatures = new ArrayList<>(); for (JsonNode json : features) { if (filter.isCorrectType(json) && isFastTextFeature(json)) { ParsingContext context = new ParsingContext(FeatureTypeV2.text_fasttext.name() + " feature"); Label label = supplier.getLabel(json, context); String property = new ParseProperty(json, context.withLabel(label)).parseSingleProperty(); String language = new ParseFastTextLanguage(json, context).parseLanguage(); Integer maxLength = new ParseMaxLength(json, context).parseMaxLength(); FastTextConfig config = new FastTextConfig(label, property, language, maxLength); fastTextFeatures.add(config); } } return fastTextFeatures; } public Collection<NumericalBucketFeatureConfigV2> parseNumericalBucketFeatures(ElementFeatureFilter filter, LabelSupplier supplier) { Collection<NumericalBucketFeatureConfigV2> numericalBucketFeatures = new ArrayList<>(); for (JsonNode json : features) { if (filter.isCorrectType(json) && isNumericalBucketFeature(json)) { ParsingContext context = new ParsingContext(FeatureTypeV2.bucket_numerical.name() + " feature"); Label label = supplier.getLabel(json, context); FeatureTypeV2.bucket_numerical.validateOverride(json, context); String property = new ParseProperty(json, context.withLabel(label)).parseSingleProperty(); ParsingContext propertyContext = context.withLabel(label).withProperty(property); Range range = new ParseRange(json, "range", propertyContext).parseRange(); int bucketCount = new ParseBucketCountV2(json, propertyContext).parseBucketCount(); int slideWindowSize = new ParseSlideWindowSize(json, propertyContext).parseSlideWindowSize(); ImputerTypeV2 imputerType = new ParseImputerTypeV2(json, propertyContext).parseImputerType(); NumericalBucketFeatureConfigV2 config = new NumericalBucketFeatureConfigV2(label, property, range, bucketCount, slideWindowSize, imputerType); numericalBucketFeatures.add(config); } } return numericalBucketFeatures; } public Collection<FeatureOverrideConfigV2> parseNodeFeatureOverrides() { Collection<FeatureOverrideConfigV2> featureOverrides = new ArrayList<>(); for (JsonNode json : features) { if (isNodeFeatureOverride(json)) { ParsingContext context = new ParsingContext("node feature"); Label nodeType = new ParseNodeType(json, context).parseNodeType(); Collection<String> properties = new ParseProperty(json, context.withLabel(nodeType)).parseMultipleProperties(); ParsingContext propertiesContext = context.withLabel(nodeType).withProperties(properties); FeatureTypeV2 type = new ParseFeatureTypeV2(json, propertiesContext).parseFeatureType(); type.validateOverride(json, context); Norm norm = new ParseNorm(json, propertiesContext).parseNorm(); Separator separator = new ParseSeparator(json).parseSeparator(); ImputerTypeV2 imputerType = new ParseImputerTypeV2(json, propertiesContext).parseImputerType(); FeatureOverrideConfigV2 config = new FeatureOverrideConfigV2(nodeType, properties, type, norm, separator, imputerType); featureOverrides.add(config); } } return featureOverrides; } public Collection<FeatureOverrideConfigV2> parseEdgeFeatureOverrides() { Collection<FeatureOverrideConfigV2> featureOverrides = new ArrayList<>(); for (JsonNode node : features) { if (isEdgeFeatureOverride(node)) { ParsingContext context = new ParsingContext("edge feature"); Label edgeType = new ParseEdgeType(node, context).parseEdgeType(); Collection<String> properties = new ParseProperty(node, context.withLabel(edgeType)).parseMultipleProperties(); ParsingContext propertiesContext = context.withLabel(edgeType).withProperties(properties); FeatureTypeV2 type = new ParseFeatureTypeV2(node, propertiesContext).parseFeatureType(); type.validateOverride(node, context); Norm norm = new ParseNorm(node, propertiesContext).parseNorm(); Separator separator = new ParseSeparator(node).parseSeparator(); ImputerTypeV2 imputerType = new ParseImputerTypeV2(node, context).parseImputerType(); FeatureOverrideConfigV2 config = new FeatureOverrideConfigV2(edgeType, properties, type, norm, separator, imputerType); featureOverrides.add(config); } } return featureOverrides; } private boolean isNoneFeature(JsonNode node) { return isNoneFeatureType(node.get("type").textValue()); } private boolean isTfIdfFeature(JsonNode node) { return isTfIdfType(node.get("type").textValue()); } private boolean isDatetimeFeature(JsonNode node) { return isDatetimeType(node.get("type").textValue()); } private boolean isAutoFeature(JsonNode node) { return isAutoType(node.get("type").textValue()); } private boolean isWord2VecFeature(JsonNode node) { return isWord2VecType(node.get("type").textValue()); } private boolean isFastTextFeature(JsonNode node) { return isFastTextType(node.get("type").textValue()); } private boolean isSbertTextFeature(JsonNode node) { return isSbertTextType(node.get("type").textValue()); } private boolean isNumericalBucketFeature(JsonNode node) { return isBucketNumericalType(node.get("type").textValue()); } private boolean isNodeFeatureOverride(JsonNode node) { if (isNodeFeature(node)) { String type = node.get("type").textValue(); return (isNumericalType(type) || isCategoricalType(type) || isAutoType(type) || isNoneType(type)); } return false; } private boolean isEdgeFeatureOverride(JsonNode node) { if (isEdgeFeature(node)) { String type = node.get("type").textValue(); return (isNumericalType(type) || isCategoricalType(type)); } return false; } private boolean isNodeFeature(JsonNode node) { return node.has("node") && node.has("type"); } private boolean isEdgeFeature(JsonNode node) { return node.has("edge") && node.has("type"); } private boolean isNoneFeatureType(String type) { return isOfType(FeatureTypeV2.none, type); } private boolean isTfIdfType(String type) { return isOfType(FeatureTypeV2.text_tfidf, type); } private boolean isDatetimeType(String type) { return isOfType(FeatureTypeV2.datetime, type); } private boolean isAutoType(String type) { return isOfType(FeatureTypeV2.auto, type); } private boolean isWord2VecType(String type) { return isOfType(FeatureTypeV2.text_word2vec, type); } private boolean isFastTextType(String type) { return isOfType(FeatureTypeV2.text_fasttext, type); } private boolean isSbertTextType(String type) { return isOfType(FeatureTypeV2.text_sbert, type) || isOfType(FeatureTypeV2.text_sbert128, type) || isOfType(FeatureTypeV2.text_sbert512, type); } private boolean isBucketNumericalType(String type) { return isOfType(FeatureTypeV2.bucket_numerical, type); } private boolean isCategoricalType(String type) { return isOfType(FeatureTypeV2.category, type); } private boolean isNumericalType(String type) { return isOfType(FeatureTypeV2.numerical, type); } private boolean isNoneType(String type) { return isOfType(FeatureTypeV2.none, type); } private boolean isOfType(FeatureTypeV2 featureTypeV2, String s) { for (String validName : featureTypeV2.validNames()) { if (validName.equals(s)) { return true; } } return false; } }
1,119
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseMaxLength.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.fasterxml.jackson.databind.JsonNode; public class ParseMaxLength { private final JsonNode json; private final ParsingContext context; public ParseMaxLength(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public Integer parseMaxLength() { if (json.has("max_length")) { if (json.path("max_length").isInt()) { return json.path("max_length").asInt(); } else { throw ErrorMessageHelper.errorParsingField("max_length", context, "an integer value"); } } else { return null; } } }
1,120
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseImputerTypeV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.ImputerTypeV2; import com.fasterxml.jackson.databind.JsonNode; public class ParseImputerTypeV2 { private final JsonNode json; private final ParsingContext context; public ParseImputerTypeV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public ImputerTypeV2 parseImputerType() { if (json.has("imputer")) { String imputerType = json.get("imputer").textValue(); try { return ImputerTypeV2.fromString(imputerType); } catch (IllegalArgumentException e) { throw ErrorMessageHelper.invalidFieldValue("imputer", imputerType, context, ImputerTypeV2.publicFormattedNames()); } } return ImputerTypeV2.none; } }
1,121
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseEdgeTaskTypeV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.EdgeTaskTypeV2; import com.fasterxml.jackson.databind.JsonNode; import java.util.Arrays; public class ParseEdgeTaskTypeV2 { private final JsonNode json; private final ParsingContext context; public ParseEdgeTaskTypeV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public EdgeTaskTypeV2 parseTaskType() { if (json.has("type") && json.get("type").isTextual()) { String type = json.get("type").textValue(); try { return EdgeTaskTypeV2.valueOf(type); } catch (IllegalArgumentException e){ throw ErrorMessageHelper.invalidFieldValue( "type", type, context, ErrorMessageHelper.enumNames(Arrays.asList(EdgeTaskTypeV2.values()))); } } throw ErrorMessageHelper.errorParsingField( "type", context, "one of the following values: " + ErrorMessageHelper.quoteList(Arrays.asList(EdgeTaskTypeV2.values()))); } }
1,122
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/v2/parsing/ParseNodeTaskTypeV2.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.v2.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ErrorMessageHelper; import com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing.ParsingContext; import com.amazonaws.services.neptune.profiles.neptune_ml.v2.config.NodeTaskTypeV2; import com.fasterxml.jackson.databind.JsonNode; import java.util.Arrays; public class ParseNodeTaskTypeV2 { private final JsonNode json; private final ParsingContext context; public ParseNodeTaskTypeV2(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public NodeTaskTypeV2 parseTaskType() { if (json.has("type") && json.get("type").isTextual()) { String type = json.get("type").textValue(); try { return NodeTaskTypeV2.valueOf(type); } catch (IllegalArgumentException e){ throw ErrorMessageHelper.invalidFieldValue( "type", type, context, ErrorMessageHelper.enumNames(Arrays.asList(NodeTaskTypeV2.values()))); } } throw ErrorMessageHelper.errorParsingField( "type", context, "one of the following values: " + ErrorMessageHelper.quoteList(Arrays.asList(NodeTaskTypeV2.values()))); } }
1,123
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/PropertyName.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common; import com.amazonaws.services.neptune.propertygraph.io.PrinterOptions; import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema; public interface PropertyName { String escaped(PropertySchema propertySchema, PrinterOptions printerOptions); String unescaped(PropertySchema propertySchema); }
1,124
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/config/Separator.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.config; import com.fasterxml.jackson.core.JsonGenerator; import org.apache.commons.lang.StringUtils; import java.io.IOException; public class Separator { private static final String DEFAULT_SEPARATOR = ";"; private final String separator; public Separator() { this(null); } public Separator(String separator) { this.separator = separator; } public void writeTo(JsonGenerator generator, boolean isMultiValue) throws IOException { if (StringUtils.isNotEmpty(separator)) { generator.writeStringField("separator", separator); } else if (isMultiValue) { generator.writeStringField("separator", DEFAULT_SEPARATOR); } } }
1,125
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/config/Norm.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.config; import com.fasterxml.jackson.core.JsonGenerator; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; public enum Norm { none { @Override public String formattedName() { return "none"; } }, min_max { @Override public String formattedName() { return "min-max"; } }, standard { @Override public String formattedName() { return "standard"; } }; public abstract String formattedName(); public void addTo(JsonGenerator generator) throws IOException { generator.writeStringField("norm", formattedName()); } @Override public String toString() { return formattedName(); } public static boolean isValid(String s) { for (Norm value : Norm.values()) { if (value.formattedName().equals(s)) { return true; } } return false; } public static Norm fromString(String s) { for (Norm value : Norm.values()) { if (value.formattedName().equals(s)) { return value; } } throw new IllegalArgumentException("Invalid norm value: " + s); } public static Collection<String> formattedNames() { Collection<String> results = new ArrayList<>(); for (Norm feature : Norm.values()) { results.add(feature.formattedName()); } return results; } }
1,126
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/config/Word2VecConfig.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.config; import com.amazonaws.services.neptune.propertygraph.Label; import java.util.Collection; public class Word2VecConfig { private final Label label; private final String property; private final Collection<String> languages; public Word2VecConfig(Label label, String property, Collection<String> languages) { this.label = label; this.property = property; this.languages = languages; } public Label label() { return label; } public String property() { return property; } public Collection<String> languages() { return languages; } @Override public String toString() { return "Word2VecConfig{" + "label=" + label + ", property='" + property + '\'' + ", languages=" + languages + '}'; } }
1,127
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/config/Range.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.config; import com.amazonaws.services.neptune.propertygraph.schema.DataType; public class Range { private final Object low; private final Object high; public Range(Object low, Object high) { DataType lowDataType = DataType.dataTypeFor(low.getClass()); DataType highDataType = DataType.dataTypeFor(high.getClass()); if (!lowDataType.isNumeric() || !highDataType.isNumeric()) { throw new IllegalArgumentException("Low and high values must be numeric"); } DataType dataType = DataType.getBroadestType(lowDataType, highDataType); Object highValue = dataType.convert(high); Object lowValue = dataType.convert(low); this.high = dataType.compare(highValue, lowValue) >= 0 ? highValue : lowValue; this.low = dataType.compare(highValue, lowValue) >= 0 ? lowValue : highValue; } public Object low() { return low; } public Object high() { return high; } @Override public String toString() { return "Range{" + "low=" + low + ", high=" + high + '}'; } }
1,128
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/config/SupportedWord2VecLanguages.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.config; public enum SupportedWord2VecLanguages { en_core_web_lg }
1,129
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseProperty.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.NeptuneMLSourceDataModel; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; public class ParseProperty { private final JsonNode json; private final ParsingContext context; private final NeptuneMLSourceDataModel dataModel; public ParseProperty(JsonNode json, ParsingContext context, NeptuneMLSourceDataModel dataModel) { this.json = json; this.context = context; this.dataModel = dataModel; } public ParseProperty(JsonNode json, ParsingContext context) { this(json, context, NeptuneMLSourceDataModel.PropertyGraph); } public String parseSingleProperty() { String fieldName = dataModel.nodeAttributeNameSingular().toLowerCase(); if (json.has(fieldName) && json.get(fieldName).isTextual()) { return json.get(fieldName).textValue(); } else { throw ErrorMessageHelper.errorParsingField(fieldName, context, String.format("a '%s' field with a string value", fieldName)); } } public String parseNullableSingleProperty() { String fieldName = dataModel.nodeAttributeNameSingular().toLowerCase(); if (json.has(fieldName) && json.get(fieldName).isTextual()) { return json.get(fieldName).textValue(); } else { return ""; } } public Collection<String> parseMultipleProperties() { String fieldNameSingular = dataModel.nodeAttributeNameSingular().toLowerCase(); String fieldNamePlural = dataModel.nodeAttributeNamePlural().toLowerCase(); if (json.has(fieldNameSingular) && json.get(fieldNameSingular).isTextual()) { return Collections.singletonList(json.get(fieldNameSingular).textValue()); } if (json.has(fieldNamePlural) && json.get(fieldNamePlural).isArray()) { ArrayNode properties = (ArrayNode) json.get(fieldNamePlural); Collection<String> results = new ArrayList<>(); for (JsonNode property : properties) { results.add(property.textValue()); } return results; } else { throw new IllegalArgumentException(String.format("Expected a '%s' field with a string value, or a '%s' field with an array value for %s.", fieldNameSingular, fieldNamePlural, context)); } } }
1,130
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseEdgeType.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.amazonaws.services.neptune.propertygraph.Label; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import java.util.*; public class ParseEdgeType { private final JsonNode json; private final ParsingContext parsingContext; public ParseEdgeType(JsonNode json, ParsingContext parsingContext) { this.json = json; this.parsingContext = parsingContext; } public Label parseEdgeType() { if (json.has("edge") && json.path("edge").isArray()){ ArrayNode array = (ArrayNode) json.get("edge"); if (array.size() != 3){ throw error(); } List<String> fromLabels = getLabels(array.get(0)); String edgeLabel = array.get(1).textValue(); List<String> toLabels = getLabels(array.get(2)); if (fromLabels.size() == 1 && toLabels.size() == 1){ return new Label(edgeLabel, fromLabels.get(0), toLabels.get(0)); } else { return new Label(edgeLabel, fromLabels, toLabels); } } else { throw error(); } } private List<String> getLabels(JsonNode jsonNode){ if (jsonNode.isTextual()){ return Collections.singletonList(jsonNode.textValue()); } else if (jsonNode.isArray()){ List<String> values = new ArrayList<>(); for (JsonNode element : jsonNode) { values.add(element.textValue()); } return values; } else { return Collections.emptyList(); } } private IllegalArgumentException error() { return ErrorMessageHelper.errorParsingField("edge", parsingContext, "an array with 3 values"); } }
1,131
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseSplitRate.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import java.util.ArrayList; import java.util.Collection; import java.util.Optional; public class ParseSplitRate { private final JsonNode json; private final Collection<Double> defaultSplitRates; private final ParsingContext context; public ParseSplitRate(JsonNode json, Collection<Double> defaultSplitRates, ParsingContext context) { this.json = json; this.defaultSplitRates = defaultSplitRates; this.context = context; } public Collection<Double> parseSplitRates() { if (json.has("split_rate")){ if (json.get("split_rate").isArray()){ ArrayNode splitRatesArray = (ArrayNode) json.get("split_rate"); if (splitRatesArray.size() == 3) { Collection<Double> splitRates = new ArrayList<>(); for (JsonNode jsonNode : splitRatesArray) { if (jsonNode.isDouble()) { splitRates.add(jsonNode.asDouble()); } else { throw error(); } } Optional<Double> sum = splitRates.stream().reduce(Double::sum); if (sum.orElse(0.0) != 1.0) { throw error(); } return splitRates; } else { throw error(); } } else { throw error(); } } else { return defaultSplitRates; } } private IllegalArgumentException error(){ return ErrorMessageHelper.errorParsingField("split_rate", context, "an array with 3 double values that add up to 1.0"); } }
1,132
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseSubject.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.fasterxml.jackson.databind.JsonNode; public class ParseSubject { private final JsonNode json; private final ParsingContext context; public ParseSubject(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public String parseSubject() { if (json.has("subject")) { JsonNode node = json.get("subject"); if (node.isTextual()) { return node.textValue(); } else { throw error(); } } else { return null; } } private IllegalArgumentException error() { return ErrorMessageHelper.errorParsingField("subject", context, "a text value"); } }
1,133
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseSeparator.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.config.Separator; import com.fasterxml.jackson.databind.JsonNode; public class ParseSeparator { private final JsonNode json; public ParseSeparator(JsonNode json) { this.json = json; } public Separator parseSeparator(){ if (json.has("separator")){ return new Separator( json.get("separator").textValue()); } return new Separator(); } }
1,134
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseObject.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.fasterxml.jackson.databind.JsonNode; public class ParseObject { private final JsonNode json; private final ParsingContext context; public ParseObject(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public String parseObject() { if (json.has("object")) { JsonNode node = json.get("object"); if (node.isTextual()) { return node.textValue(); } else { throw error(); } } else { return null; } } private IllegalArgumentException error() { return ErrorMessageHelper.errorParsingField("object", context, "a text value"); } }
1,135
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseNorm.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.config.Norm; import com.fasterxml.jackson.databind.JsonNode; public class ParseNorm { private final JsonNode json; private final ParsingContext context; public ParseNorm(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public Norm parseNorm(){ if (json.has("norm")){ String norm = json.get("norm").textValue(); if (Norm.isValid(norm)){ return Norm.fromString(norm); } else { throw ErrorMessageHelper.invalidFieldValue("norm", norm, context, Norm.formattedNames()); } } return Norm.min_max; } }
1,136
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseWord2VecLanguage.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.config.SupportedWord2VecLanguages; import com.fasterxml.jackson.databind.JsonNode; import java.util.ArrayList; import java.util.Collection; public class ParseWord2VecLanguage { private final JsonNode json; public ParseWord2VecLanguage(JsonNode json) { this.json = json; } public Collection<String> parseLanguage() { Collection<String> results = new ArrayList<>(); if (json.has("language")) { if (json.get("language").isArray()) { JsonNode arrayNode = json.get("language"); for (JsonNode jsonNode : arrayNode) { results.add(jsonNode.textValue()); } } else if (json.get("language").isTextual()) { results.add(json.get("language").textValue()); } } if (results.isEmpty()) { results.add(SupportedWord2VecLanguages.en_core_web_lg.name()); } return results; } }
1,137
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseSlideWindowSize.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.fasterxml.jackson.databind.JsonNode; public class ParseSlideWindowSize { private final JsonNode json; private final ParsingContext context; public ParseSlideWindowSize(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public int parseSlideWindowSize() { if (json.has("slide_window_size")) { if (json.path("slide_window_size").isInt()){ return json.path("slide_window_size").asInt(); } else { throw ErrorMessageHelper.errorParsingField("slide_window_size", context, "an integer"); } } else { return 0; } } }
1,138
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ErrorMessageHelper.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import java.util.Collection; import java.util.List; import java.util.stream.Collectors; public class ErrorMessageHelper { public static IllegalArgumentException invalidFieldValue(String fieldName, String value, ParsingContext context, Collection<String> validValues) { return new IllegalArgumentException(String.format("Invalid '%s' value for %s: '%s'. Valid values are: %s.", fieldName, context, value, ErrorMessageHelper.quoteList(validValues))); } public static IllegalArgumentException errorParsingField(String fieldName, ParsingContext context, String expected) { return new IllegalArgumentException(String.format("Error parsing '%s' field for %s. Expected %s.", fieldName, context, expected)); } public static String quoteList(Collection<String> values) { return values.stream().map(s -> String.format("'%s'", s)).collect(Collectors.joining(", ")); } public static String quoteList(List<Enum<?>> enums) { return quoteList(enumNames(enums)); } public static Collection<String> enumNames(List<Enum<?>> enums) { return enums.stream().map(Enum::name).collect(Collectors.toList()); } }
1,139
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseNodeType.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.amazonaws.services.neptune.propertygraph.Label; import com.fasterxml.jackson.databind.JsonNode; import java.util.ArrayList; import java.util.Collection; public class ParseNodeType { private final JsonNode json; private final ParsingContext context; public ParseNodeType(JsonNode json, ParsingContext context) { this.json = json; this.context = context; } public Label parseNodeType(){ if (json.has("node")){ JsonNode node = json.get("node"); if (node.isTextual()){ return new Label(node.textValue()); } else if (node.isArray()){ Collection<String> values = new ArrayList<>(); for (JsonNode element : node) { values.add(element.textValue()); } return new Label(values); } else { throw error(); } } else { throw error(); } } private IllegalArgumentException error() { return ErrorMessageHelper.errorParsingField("node", context, "a text value or array of text values"); } }
1,140
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParsingContext.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.NeptuneMLSourceDataModel; import com.amazonaws.services.neptune.propertygraph.Label; import org.apache.commons.lang.StringUtils; import java.util.Collection; import java.util.Collections; public class ParsingContext { private final String description; private final Label label; private final Collection<String> properties; private final NeptuneMLSourceDataModel dataModel; private ParsingContext(String description, Label label, Collection<String> properties, NeptuneMLSourceDataModel dataModel) { this.description = description; this.label = label; this.properties = properties; this.dataModel = dataModel; } public ParsingContext(String description, NeptuneMLSourceDataModel dataModel) { this(description, null, Collections.emptyList(), dataModel); } public ParsingContext(String description) { this(description, null, Collections.emptyList(), NeptuneMLSourceDataModel.PropertyGraph); } public ParsingContext withLabel(Label label) { return new ParsingContext(description, label, properties, dataModel); } public ParsingContext withProperties(Collection<String> properties) { return new ParsingContext(description, label, properties, dataModel); } public ParsingContext withProperty(String property) { if (StringUtils.isNotEmpty(property)){ return new ParsingContext(description, label, Collections.singleton(property), dataModel); } else { return this; } } @Override public String toString() { if (label != null && properties.size() == 1) { return String.format("%s (%s: %s, %s: %s)", description, dataModel.nodeTypeName(), label.allLabelsAsArrayString(), dataModel.nodeAttributeNameSingular(), properties.iterator().next()); } else if (label != null && !properties.isEmpty()) { return String.format("%s (%s: %s, %s: [%s])", description, dataModel.nodeTypeName(), label.allLabelsAsArrayString(), dataModel.nodeAttributeNamePlural(), String.join(", ", properties)); } else if (label != null) { return String.format("%s (%s: %s)", description, dataModel.nodeTypeName(), label.allLabelsAsArrayString()); } else { return description; } } }
1,141
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/profiles/neptune_ml/common/parsing/ParseRange.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.profiles.neptune_ml.common.parsing; import com.amazonaws.services.neptune.profiles.neptune_ml.common.config.Range; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; public class ParseRange { private final JsonNode json; private final String fieldName; private final ParsingContext context; public ParseRange(JsonNode json, String fieldName, ParsingContext context) { this.json = json; this.fieldName = fieldName; this.context = context; } public Range parseRange() { if (json.has(fieldName) && json.path(fieldName).isArray()) { ArrayNode rangeNode = (ArrayNode) json.path(fieldName); if (rangeNode.size() != 2) { throw error(); } if (!rangeNode.get(0).isNumber() || !rangeNode.get(1).isNumber()) { throw error(); } return new Range(rangeNode.get(0).numberValue(), rangeNode.get(1).numberValue()); } else { throw error(); } } private IllegalArgumentException error() { return (ErrorMessageHelper.errorParsingField(fieldName, context, "an array with 2 numeric values")); } }
1,142
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/EndpointValidator.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Collection; public class EndpointValidator { private static final org.slf4j.Logger logger = LoggerFactory.getLogger(EndpointValidator.class); public static Collection<String> validate(Collection<String> endpoints) { Collection<String> validatedEndpoints = new ArrayList<>(); for (String endpoint : endpoints) { validatedEndpoints.add(validate(endpoint)); } return validatedEndpoints; } public static String validate(String endpoint) { if (endpoint.startsWith("ws://") || endpoint.startsWith("wss://") || endpoint.startsWith("http://") || endpoint.startsWith("https://")) { logger.warn("Endpoint cannot contain protocol. Removing protocol: {}", endpoint); endpoint = endpoint.substring(endpoint.indexOf("//") + 2); } if (endpoint.contains(":")) { logger.warn("Endpoint cannot contain port. Removing port: {}", endpoint); endpoint = endpoint.substring(0, endpoint.indexOf(":")); } return endpoint; } }
1,143
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/FeatureToggles.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import java.util.Collection; public class FeatureToggles { private final Collection<FeatureToggle> features; public FeatureToggles(Collection<FeatureToggle> features) { this.features = features; } public boolean containsFeature(FeatureToggle feature) { return features.contains(feature); } }
1,144
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/Logger.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; public interface Logger { void log(String s); }
1,145
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/NeptuneExportEventHandler.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import com.amazonaws.services.neptune.cluster.Cluster; import com.amazonaws.services.neptune.io.Directories; import com.amazonaws.services.neptune.propertygraph.ExportStats; import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema; public interface NeptuneExportEventHandler { NeptuneExportEventHandler NULL_EVENT_HANDLER = new NeptuneExportEventHandler() { @Override public void onError() { // Do nothing } @Override public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster) throws Exception { //Do nothing } @Override public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster, GraphSchema graphSchema) throws Exception { //Do nothing } }; void onError(); void onExportComplete(Directories directories, ExportStats stats, Cluster cluster) throws Exception; void onExportComplete(Directories directories, ExportStats stats, Cluster cluster, GraphSchema graphSchema) throws Exception; }
1,146
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/NeptuneExportService.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.neptune.profiles.incremental_export.IncrementalExportEventHandler; import com.amazonaws.services.neptune.profiles.neptune_ml.NeptuneMachineLearningExportEventHandlerV1; import com.amazonaws.services.neptune.profiles.neptune_ml.NeptuneMachineLearningExportEventHandlerV2; import com.amazonaws.services.neptune.util.EnvironmentVariableUtils; import com.amazonaws.services.neptune.util.S3ObjectInfo; import com.amazonaws.services.neptune.util.TransferManagerWrapper; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.ListObjectsRequest; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.Tag; import com.amazonaws.services.s3.transfer.Download; import com.amazonaws.services.s3.transfer.TransferManager; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.commons.io.FileUtils; import org.apache.commons.lang.StringUtils; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.util.*; public class NeptuneExportService { private static final org.slf4j.Logger logger = LoggerFactory.getLogger(NeptuneExportService.class); public static final List<Tag> NEPTUNE_EXPORT_TAGS = Collections.singletonList(new Tag("application", "neptune-export")); public static final String NEPTUNE_ML_PROFILE_NAME = "neptune_ml"; public static final String INCREMENTAL_EXPORT_PROFILE_NAME = "incremental_export"; public static final int MAX_FILE_DESCRIPTOR_COUNT = 9000; private final String cmd; private final String localOutputPath; private final boolean cleanOutputPath; private final String outputS3Path; private final boolean createExportSubdirectory; private final boolean overwriteExisting; private final boolean uploadToS3OnError; private final String configFileS3Path; private final String queriesFileS3Path; private final String completionFileS3Path; private final ObjectNode completionFilePayload; private final ObjectNode additionalParams; private final int maxConcurrency; private final String s3Region; private final int maxFileDescriptorCount; private final String sseKmsKeyId; private final AWSCredentialsProvider s3CredentialsProvider; public NeptuneExportService(String cmd, String localOutputPath, boolean cleanOutputPath, String outputS3Path, boolean createExportSubdirectory, boolean overwriteExisting, boolean uploadToS3OnError, String configFileS3Path, String queriesFileS3Path, String completionFileS3Path, ObjectNode completionFilePayload, ObjectNode additionalParams, int maxConcurrency, String s3Region, int maxFileDescriptorCount, String sseKmsKeyId, AWSCredentialsProvider s3CredentialsProvider) { this.cmd = cmd; this.localOutputPath = localOutputPath; this.cleanOutputPath = cleanOutputPath; this.outputS3Path = outputS3Path; this.createExportSubdirectory = createExportSubdirectory; this.overwriteExisting = overwriteExisting; this.uploadToS3OnError = uploadToS3OnError; this.configFileS3Path = configFileS3Path; this.queriesFileS3Path = queriesFileS3Path; this.completionFileS3Path = completionFileS3Path; this.completionFilePayload = completionFilePayload; this.additionalParams = additionalParams; this.maxConcurrency = maxConcurrency; this.s3Region = s3Region; this.maxFileDescriptorCount = maxFileDescriptorCount; this.sseKmsKeyId = sseKmsKeyId; this.s3CredentialsProvider = s3CredentialsProvider; } public S3ObjectInfo execute() throws IOException { Args args; try { args = new Args(cmd); if (StringUtils.isNotEmpty(configFileS3Path)) { args.removeOptions("-c", "--config-file"); } if (StringUtils.isNotEmpty(queriesFileS3Path)) { args.removeOptions("--queries"); } if (args.contains("create-pg-config") || args.contains("export-pg") || args.contains("export-pg-from-config") || args.contains("export-pg-from-queries") || args.contains("export-rdf")) { args.removeOptions("-d", "--dir"); args.addOption("-d", new File(localOutputPath, "output").getAbsolutePath()); if (maxConcurrency > 0 && !args.contains("--clone-cluster-max-concurrency")) { args.addOption("--clone-cluster-max-concurrency", String.valueOf(maxConcurrency)); } if (!args.contains("--clone-cluster-correlation-id")){ String correlationId = EnvironmentVariableUtils.getOptionalEnv("AWS_BATCH_JOB_ID", null); if (StringUtils.isNotEmpty(correlationId)){ args.addOption("--clone-cluster-correlation-id", correlationId); } } } } catch (Exception e) { throw new RuntimeException(e); } try (TransferManagerWrapper transferManager = new TransferManagerWrapper(s3Region, s3CredentialsProvider)) { if (cleanOutputPath) { clearTempFiles(); } if (StringUtils.isNotEmpty(configFileS3Path)) { updateArgs(args, "--config-file", downloadFile(transferManager.get(), configFileS3Path)); } if (StringUtils.isNotEmpty(queriesFileS3Path)) { updateArgs(args, "--queries", downloadFile(transferManager.get(), queriesFileS3Path)); } } if (additionalParams.has(NEPTUNE_ML_PROFILE_NAME) && (!args.contains("--profile", NEPTUNE_ML_PROFILE_NAME))) { args.addOption("--profile", NEPTUNE_ML_PROFILE_NAME); } Collection<String> profiles = args.getOptionValues("--profile"); if (!createExportSubdirectory && !overwriteExisting) { checkS3OutputIsEmpty(); } EventHandlerCollection eventHandlerCollection = new EventHandlerCollection(); Collection<CompletionFileWriter> completionFileWriters = new ArrayList<>(); ExportToS3NeptuneExportEventHandler.S3UploadParams s3UploadParams = new ExportToS3NeptuneExportEventHandler.S3UploadParams() .setCreateExportSubdirectory(createExportSubdirectory) .setOverwriteExisting(overwriteExisting); ExportToS3NeptuneExportEventHandler exportToS3EventHandler = new ExportToS3NeptuneExportEventHandler( localOutputPath, outputS3Path, s3Region, completionFileS3Path, completionFilePayload, uploadToS3OnError, s3UploadParams, profiles, completionFileWriters, sseKmsKeyId, s3CredentialsProvider); eventHandlerCollection.addHandler(exportToS3EventHandler); if (profiles.contains(NEPTUNE_ML_PROFILE_NAME)) { JsonNode neptuneMlNode = additionalParams.path(NEPTUNE_ML_PROFILE_NAME); boolean useV2 = args.contains("--feature-toggle", FeatureToggle.NeptuneML_V2.name()) || (neptuneMlNode.has("version") && neptuneMlNode.get("version").textValue().startsWith("v2.")); boolean useV1 = (neptuneMlNode.has("version") && neptuneMlNode.get("version").textValue().startsWith("v1.")); if (useV1) { NeptuneMachineLearningExportEventHandlerV1 neptuneMlEventHandler = new NeptuneMachineLearningExportEventHandlerV1( outputS3Path, s3Region, createExportSubdirectory, additionalParams, args, profiles, sseKmsKeyId, s3CredentialsProvider); eventHandlerCollection.addHandler(neptuneMlEventHandler); } else { NeptuneMachineLearningExportEventHandlerV2 neptuneMlEventHandler = new NeptuneMachineLearningExportEventHandlerV2( outputS3Path, s3Region, createExportSubdirectory, additionalParams, args, profiles, sseKmsKeyId, s3CredentialsProvider); eventHandlerCollection.addHandler(neptuneMlEventHandler); } } if (profiles.contains(INCREMENTAL_EXPORT_PROFILE_NAME)) { IncrementalExportEventHandler incrementalExportEventHandler = new IncrementalExportEventHandler(additionalParams); completionFileWriters.add(incrementalExportEventHandler); eventHandlerCollection.addHandler(incrementalExportEventHandler); } /** * We are removing a buffer of 1000 for maxFileDescriptorCount used at {@link com.amazonaws.services.neptune.propertygraph.io.LabelWriters#put} * since the value received from neptune-export service is set as the `nofile` ulimit in the AWS Batch * container properties and there might be other processes on the container having open files. * This ensures we close the leastRecentlyAccessed files before exceeding the hard limit for `nofile` ulimit. */ final int maxFileDescriptorCountAfterRemovingBuffer = Math.max(maxFileDescriptorCount - 1000, MAX_FILE_DESCRIPTOR_COUNT); eventHandlerCollection.onBeforeExport(args, s3UploadParams); logger.info("Args after service init: {}", String.join(" ", args.values())); new NeptuneExportRunner(args.values(), eventHandlerCollection, false, maxFileDescriptorCountAfterRemovingBuffer).run(); return exportToS3EventHandler.result(); } private void checkS3OutputIsEmpty() { AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient(); S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(outputS3Path); ObjectListing listing = s3.listObjects( new ListObjectsRequest( s3ObjectInfo.bucket(), s3ObjectInfo.key(), null, null, 1)); if (!listing.getObjectSummaries().isEmpty()) { throw new IllegalStateException(String.format("S3 destination contains existing objects: %s. Set 'overwriteExisting' parameter to 'true' to allow overwriting existing objects.", outputS3Path)); } } private void clearTempFiles() throws IOException { File directory = new File(localOutputPath); if (directory.exists() && directory.isDirectory()) { FileUtils.deleteDirectory(directory); } } private void updateArgs(Args args, String option, Object value) { if (value != null) { args.addOption(option, value.toString()); } } private File downloadFile(TransferManager transferManager, String s3Path) { if (StringUtils.isEmpty(s3Path)) { return null; } S3ObjectInfo configFileS3ObjectInfo = new S3ObjectInfo(s3Path); File file = configFileS3ObjectInfo.createDownloadFile(localOutputPath); logger.info("Bucket: " + configFileS3ObjectInfo.bucket()); logger.info("Key : " + configFileS3ObjectInfo.key()); logger.info("File : " + file); Download download = transferManager.download( configFileS3ObjectInfo.bucket(), configFileS3ObjectInfo.key(), file); try { download.waitForCompletion(); } catch (InterruptedException e) { logger.warn(e.getMessage()); Thread.currentThread().interrupt(); } return file.getAbsoluteFile(); } }
1,147
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/EventHandlerCollection.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import com.amazonaws.services.neptune.cluster.Cluster; import com.amazonaws.services.neptune.io.Directories; import com.amazonaws.services.neptune.propertygraph.ExportStats; import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; public class EventHandlerCollection implements NeptuneExportServiceEventHandler { private final List<NeptuneExportEventHandler> exportHandlers = new ArrayList<>(); private final List<NeptuneExportServiceEventHandler> serviceHandlers = new ArrayList<>(); private static final org.slf4j.Logger logger = LoggerFactory.getLogger(EventHandlerCollection.class); public <T extends NeptuneExportEventHandler> void addHandler(T handler){ exportHandlers.add(handler); if (NeptuneExportServiceEventHandler.class.isAssignableFrom(handler.getClass())){ serviceHandlers.add((NeptuneExportServiceEventHandler) handler); } } @Override public void onError() { for (NeptuneExportEventHandler handler : exportHandlers) { try { handler.onError(); } catch (Exception e) { logger.warn("Error while handling export error with {}", handler.getClass().getSimpleName(), e); } } } @Override public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster) throws Exception { boolean error = false; for (int i = exportHandlers.size(); i-- > 0; ) { NeptuneExportEventHandler handler = exportHandlers.get(i); try { handler.onExportComplete(directories, stats, cluster); } catch (Exception e) { error = true; logger.error("Error while executing {}", handler.getClass().getSimpleName(), e); } } if (error){ throw new RuntimeException("One or more errors occurred while executing onExportComplete event handlers. See the logs for details."); } } @Override public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster, GraphSchema graphSchema) throws Exception { boolean error = false; for (int i = exportHandlers.size(); i-- > 0; ) { NeptuneExportEventHandler handler = exportHandlers.get(i); try { handler.onExportComplete(directories, stats, cluster, graphSchema); } catch (Exception e) { error = true; logger.error("Error while executing {}", handler.getClass().getSimpleName(), e); } } if (error){ throw new RuntimeException("One or more errors occurred while executing onExportComplete event handlers. See the logs for details."); } } @Override public void onBeforeExport(Args args, ExportToS3NeptuneExportEventHandler.S3UploadParams s3UploadParams) { boolean error = false; for (NeptuneExportServiceEventHandler handler : serviceHandlers) { try { handler.onBeforeExport(args, s3UploadParams); } catch (Exception e) { error = true; logger.error("Error while executing {}", handler.getClass().getSimpleName(), e); } } if (error){ throw new RuntimeException("One or more errors occurred while executing onBeforeExport event handlers. See the logs for details."); } } }
1,148
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/ParamConverter.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.util.Iterator; public class ParamConverter { private static final String REGEX = "([a-z])([A-Z]+)"; private static final String REPLACEMENT = "$1-$2"; public static String toCliArg(String v) { return v.replaceAll(REGEX, REPLACEMENT).toLowerCase(); } public static String singularize(String v) { if (v.endsWith("ies")) { return String.format("%sy", v.substring(0, v.length() - 3)); } else if (v.endsWith("s")) { return v.substring(0, v.length() - 1); } else { return v; } } public static Args fromJson(String cmd, JsonNode json) { Args args = new Args(cmd); ObjectNode params = (ObjectNode) json; Iterator<String> paramNamesIterator = params.fieldNames(); while (paramNamesIterator.hasNext()) { String paramName = paramNamesIterator.next(); String argName = toCliArg(paramName); JsonNode paramNode = params.get(paramName); if (paramNode.isArray()) { argName = singularize(argName); ArrayNode arrayNode = (ArrayNode) paramNode; for (JsonNode jsonNode : arrayNode) { addArg(argName, jsonNode, args); } } else { addArg(argName, paramNode, args); } } return args; } private static void addArg(String argName, JsonNode argValue, Args args) { String prefix = argName.startsWith("-") ? "" : "--"; argName = String.format("%s%s", prefix, argName); if (argValue.isBoolean()) { if (argValue.asBoolean()){ args.addFlag(argName); } } else if (argValue.isObject()) { String value = String.format("'%s'", argValue.toPrettyString()); args.addOption(argName, value); } else if (argValue.isTextual()){ String value = String.format("'%s'", argValue.textValue()); args.addOption(argName, value); } else { String value = argValue.toString(); args.addOption(argName, value); } } }
1,149
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/JobSize.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; public enum JobSize { small { @Override public int maxConcurrency() { return 8; } }, medium { @Override public int maxConcurrency() { return 32; } }, large { @Override public int maxConcurrency() { return 64; } }, xlarge { @Override public int maxConcurrency() { return 96; } }; public static JobSize parse(String value) { try { return JobSize.valueOf(value.toLowerCase()); } catch (IllegalArgumentException e) { return small; } } public abstract int maxConcurrency(); }
1,150
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/NeptuneExportRunner.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import com.amazonaws.services.neptune.NeptuneExportCli; import com.amazonaws.services.neptune.NeptuneExportCommand; import com.amazonaws.services.neptune.NeptuneExportEventHandlerHost; import com.amazonaws.services.neptune.util.GitProperties; import org.apache.commons.lang.StringUtils; import static com.amazonaws.services.neptune.export.NeptuneExportService.MAX_FILE_DESCRIPTOR_COUNT; public class NeptuneExportRunner { private final String[] args; private final NeptuneExportEventHandler eventHandler; private final boolean isCliInvocation; private final int maxFileDescriptorCount; public NeptuneExportRunner(String[] args) { this(args, NeptuneExportEventHandler.NULL_EVENT_HANDLER, true, MAX_FILE_DESCRIPTOR_COUNT); } public NeptuneExportRunner(String[] args, NeptuneExportEventHandler eventHandler, boolean isCliInvocation, int maxFileDescriptorCount) { this.args = args; this.eventHandler = eventHandler; this.isCliInvocation = isCliInvocation; this.maxFileDescriptorCount = maxFileDescriptorCount; } public void run() { System.err.println(String.format("neptune-export.jar: %s", GitProperties.fromResource()) ); Args argsCollection = new Args(this.args); if (argsCollection.contains("--log-level")){ String logLevel = argsCollection.getFirstOptionValue("--log-level"); if (StringUtils.isNotEmpty(logLevel)){ System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", logLevel); } } com.github.rvesse.airline.Cli<Runnable> cli = new com.github.rvesse.airline.Cli<>(NeptuneExportCli.class); try { Runnable cmd = cli.parse(this.args); if (NeptuneExportEventHandlerHost.class.isAssignableFrom(cmd.getClass())) { NeptuneExportEventHandlerHost eventHandlerHost = (NeptuneExportEventHandlerHost) cmd; eventHandlerHost.setEventHandler(eventHandler); } if (NeptuneExportCommand.class.isAssignableFrom(cmd.getClass())){ ((NeptuneExportCommand) cmd).setIsCliInvocation(isCliInvocation); } if (NeptuneExportCommand.class.isAssignableFrom(cmd.getClass())){ ((NeptuneExportCommand) cmd).setMaxFileDescriptorCount(maxFileDescriptorCount); } cmd.run(); } catch (Exception e) { System.err.println(e.getMessage()); System.err.println(); System.exit(-1); } } }
1,151
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/NeptuneExportServiceEventHandler.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; public interface NeptuneExportServiceEventHandler extends NeptuneExportEventHandler { void onBeforeExport(Args args, ExportToS3NeptuneExportEventHandler.S3UploadParams s3UploadParams); }
1,152
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/CompletionFileWriter.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import com.fasterxml.jackson.databind.node.ObjectNode; public interface CompletionFileWriter { void updateCompletionFile(ObjectNode completionFilePayload); }
1,153
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/ExportToS3NeptuneExportEventHandler.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import com.amazonaws.AmazonClientException; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.event.ProgressEvent; import com.amazonaws.event.ProgressListener; import com.amazonaws.services.neptune.cluster.Cluster; import com.amazonaws.services.neptune.io.Directories; import com.amazonaws.services.neptune.propertygraph.ExportStats; import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema; import com.amazonaws.services.neptune.util.CheckedActivity; import com.amazonaws.services.neptune.util.S3ObjectInfo; import com.amazonaws.services.neptune.util.Timer; import com.amazonaws.services.neptune.util.TransferManagerWrapper; import com.amazonaws.services.s3.Headers; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.ObjectTagging; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.SSEAlgorithm; import com.amazonaws.services.s3.model.Tag; import com.amazonaws.services.s3.transfer.*; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.commons.io.FileUtils; import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang.StringUtils; import org.slf4j.LoggerFactory; import java.io.*; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Matcher; import java.util.regex.Pattern; import static com.amazonaws.services.neptune.export.NeptuneExportService.NEPTUNE_EXPORT_TAGS; import static java.nio.charset.StandardCharsets.UTF_8; public class ExportToS3NeptuneExportEventHandler implements NeptuneExportEventHandler { public static class S3UploadParams { private boolean createExportSubdirectory = true; private boolean overwriteExisting = false; public boolean createExportSubdirectory() { return createExportSubdirectory; } public S3UploadParams setCreateExportSubdirectory(boolean createExportSubdirectory) { this.createExportSubdirectory = createExportSubdirectory; return this; } public boolean overwriteExisting() { return overwriteExisting; } public S3UploadParams setOverwriteExisting(boolean overwriteExisting) { this.overwriteExisting = overwriteExisting; return this; } @Override public String toString() { return "{" + "createExportSubdirectory=" + createExportSubdirectory + ", overwriteExisting=" + overwriteExisting + '}'; } } public static ObjectTagging createObjectTags(Collection<String> profiles) { List<Tag> tags = new ArrayList<>(NEPTUNE_EXPORT_TAGS); if (!profiles.isEmpty()) { String profilesTagValue = String.join(":", profiles); tags.add(new Tag("neptune-export:profiles", profilesTagValue)); } return new ObjectTagging(tags); } private static final org.slf4j.Logger logger = LoggerFactory.getLogger(ExportToS3NeptuneExportEventHandler.class); private final String localOutputPath; private final String outputS3Path; private final String s3Region; private final String completionFileS3Path; private final ObjectNode completionFilePayload; private final boolean uploadToS3OnError; private final S3UploadParams s3UploadParams; private final Collection<String> profiles; private final Collection<CompletionFileWriter> completionFileWriters; private final AtomicReference<S3ObjectInfo> result = new AtomicReference<>(); private static final Pattern STATUS_CODE_5XX_PATTERN = Pattern.compile("Status Code: (5\\d+)"); private final String sseKmsKeyId; private final AWSCredentialsProvider s3CredentialsProvider; public ExportToS3NeptuneExportEventHandler(String localOutputPath, String outputS3Path, String s3Region, String completionFileS3Path, ObjectNode completionFilePayload, boolean uploadToS3OnError, S3UploadParams s3UploadParams, Collection<String> profiles, Collection<CompletionFileWriter> completionFileWriters, String sseKmsKeyId, AWSCredentialsProvider s3CredentialsProvider) { this.localOutputPath = localOutputPath; this.outputS3Path = outputS3Path; this.s3Region = s3Region; this.completionFileS3Path = completionFileS3Path; this.completionFilePayload = completionFilePayload; this.uploadToS3OnError = uploadToS3OnError; this.s3UploadParams = s3UploadParams; this.profiles = profiles; this.completionFileWriters = completionFileWriters; this.sseKmsKeyId = sseKmsKeyId; this.s3CredentialsProvider = s3CredentialsProvider; } @Override public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster) throws Exception { onExportComplete(directories, stats, cluster, new GraphSchema()); } @Override public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster, GraphSchema graphSchema) throws Exception { try { long size = Files.walk(directories.rootDirectory()).mapToLong(p -> p.toFile().length()).sum(); logger.info("Total size of exported files: {}", FileUtils.byteCountToDisplaySize(size)); } catch (Exception e) { // Ignore } if (StringUtils.isEmpty(outputS3Path)) { return; } logger.info("S3 upload params: {}", s3UploadParams); try (TransferManagerWrapper transferManager = new TransferManagerWrapper(s3Region, s3CredentialsProvider)) { File outputDirectory = directories.rootDirectory().toFile(); S3ObjectInfo outputS3ObjectInfo = calculateOutputS3Path(outputDirectory); Timer.timedActivity("uploading files to S3", (CheckedActivity.Runnable) () -> { deleteS3Directories(directories, outputS3ObjectInfo); uploadExportFilesToS3(transferManager.get(), outputDirectory, outputS3ObjectInfo); uploadCompletionFileToS3(transferManager.get(), outputDirectory, outputS3ObjectInfo, stats, graphSchema); }); result.set(outputS3ObjectInfo); } } public S3ObjectInfo result() { return result.get(); } @Override public void onError() { if (!uploadToS3OnError) { return; } logger.warn("Uploading results of failed export to S3"); if (StringUtils.isEmpty(outputS3Path)) { logger.warn("S3 output path is empty"); return; } try { Path outputPath = Paths.get(localOutputPath); long size = Files.walk(outputPath).mapToLong(p -> p.toFile().length()).sum(); logger.warn("Total size of failed export files: {}", FileUtils.byteCountToDisplaySize(size)); try (TransferManagerWrapper transferManager = new TransferManagerWrapper(s3Region, s3CredentialsProvider)) { String s3Suffix = UUID.randomUUID().toString().replace("-", ""); File outputDirectory = outputPath.toFile(); S3ObjectInfo outputS3ObjectInfo = calculateOutputS3Path(outputDirectory) .replaceOrAppendKey("/tmp", "/failed") .withNewKeySuffix(s3Suffix); Timer.timedActivity("uploading failed export files to S3", (CheckedActivity.Runnable) () -> { uploadExportFilesToS3(transferManager.get(), outputDirectory, outputS3ObjectInfo); uploadGcLogToS3(transferManager.get(), outputDirectory, outputS3ObjectInfo); }); logger.warn("Failed export S3 location: {}", outputS3ObjectInfo.toString()); } } catch (Exception e) { logger.error("Failed to upload failed export files to S3", e); } } private void uploadGcLogToS3(TransferManager transferManager, File directory, S3ObjectInfo outputS3ObjectInfo) throws IOException { File gcLog = new File(directory, "./../gc.log"); if (!gcLog.exists()) { logger.warn("Ignoring request to upload GC log to S3 because GC log does not exist"); return; } S3ObjectInfo gcLogS3ObjectInfo = outputS3ObjectInfo.withNewKeySuffix("gc.log"); try (InputStream inputStream = new FileInputStream(gcLog)) { PutObjectRequest putObjectRequest = new PutObjectRequest(gcLogS3ObjectInfo.bucket(), gcLogS3ObjectInfo.key(), inputStream, S3ObjectInfo.createObjectMetadata(gcLog.length(), sseKmsKeyId)).withTagging(createObjectTags(profiles)); Upload upload = transferManager.upload(putObjectRequest); upload.waitForUploadResult(); } catch (InterruptedException e) { logger.warn(e.getMessage()); Thread.currentThread().interrupt(); } } private S3ObjectInfo calculateOutputS3Path(File outputDirectory) { S3ObjectInfo outputBaseS3ObjectInfo = new S3ObjectInfo(outputS3Path); if (s3UploadParams.createExportSubdirectory()) { return outputBaseS3ObjectInfo.withNewKeySuffix(outputDirectory.getName()); } else { return outputBaseS3ObjectInfo; } } private void uploadCompletionFileToS3(TransferManager transferManager, File directory, S3ObjectInfo outputS3ObjectInfo, ExportStats stats, GraphSchema graphSchema) throws IOException { if (StringUtils.isEmpty(completionFileS3Path)) { return; } if (directory == null || !directory.exists()) { logger.warn("Ignoring request to upload completion file to S3 because directory from which to upload files does not exist"); return; } String completionFilename = s3UploadParams.createExportSubdirectory() ? directory.getName() : String.valueOf(System.currentTimeMillis()); File completionFile = new File(localOutputPath, completionFilename + ".json"); ObjectNode neptuneExportNode = JsonNodeFactory.instance.objectNode(); completionFilePayload.set("neptuneExport", neptuneExportNode); neptuneExportNode.put("outputS3Path", outputS3ObjectInfo.toString()); stats.addTo(neptuneExportNode, graphSchema); for (CompletionFileWriter completionFileWriter : completionFileWriters) { completionFileWriter.updateCompletionFile(completionFilePayload); } try (Writer writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(completionFile), UTF_8))) { ObjectWriter objectWriter = new ObjectMapper().writer().withDefaultPrettyPrinter(); writer.write(objectWriter.writeValueAsString(completionFilePayload)); } S3ObjectInfo completionFileS3ObjectInfo = new S3ObjectInfo(completionFileS3Path).replaceOrAppendKey( "_COMPLETION_ID_", FilenameUtils.getBaseName(completionFile.getName()), completionFile.getName()); logger.info("Uploading completion file to {}", completionFileS3ObjectInfo.key()); try (InputStream inputStream = new FileInputStream(completionFile)) { PutObjectRequest putObjectRequest = new PutObjectRequest(completionFileS3ObjectInfo.bucket(), completionFileS3ObjectInfo.key(), inputStream, S3ObjectInfo.createObjectMetadata(completionFile.length(), sseKmsKeyId)) .withTagging(createObjectTags(profiles)); Upload upload = transferManager.upload(putObjectRequest); upload.waitForUploadResult(); } catch (InterruptedException e) { logger.warn(e.getMessage()); Thread.currentThread().interrupt(); } } private void uploadExportFilesToS3(TransferManager transferManager, File directory, S3ObjectInfo outputS3ObjectInfo) { if (directory == null || !directory.exists()) { logger.warn("Ignoring request to upload files to S3 because upload directory from which to upload files does not exist"); return; } boolean allowRetry = true; int retryCount = 0; while (allowRetry){ try { ObjectMetadataProvider metadataProvider = (file, objectMetadata) -> { S3ObjectInfo.createObjectMetadata(file.length(), sseKmsKeyId, objectMetadata); }; ObjectTaggingProvider taggingProvider = uploadContext -> createObjectTags(profiles); logger.info("Uploading export files to {}", outputS3ObjectInfo.toString()); MultipleFileUpload upload = transferManager.uploadDirectory( outputS3ObjectInfo.bucket(), outputS3ObjectInfo.key(), directory, true, metadataProvider, taggingProvider); AmazonClientException amazonClientException = upload.waitForException(); if (amazonClientException != null){ String errorMessage = amazonClientException.getMessage(); Matcher exMsgStatusCodeMatcher = STATUS_CODE_5XX_PATTERN.matcher(errorMessage); logger.error("Upload to S3 failed: {}", errorMessage); // only retry if exception is retryable, the status code is 5xx, and we have retry counts left if (amazonClientException.isRetryable() && exMsgStatusCodeMatcher.find() && retryCount <= 2) { retryCount++; logger.info("Retrying upload to S3 [RetryCount: {}]", retryCount); } else { allowRetry = false; logger.warn("Cancelling upload to S3 [RetryCount: {}]", retryCount); throw new RuntimeException(String.format("Upload to S3 failed [Directory: %s, S3 location: %s, Reason: %s, RetryCount: %s]", directory, outputS3ObjectInfo, errorMessage, retryCount)); } } else { allowRetry = false; } } catch (InterruptedException e) { logger.warn(e.getMessage()); Thread.currentThread().interrupt(); } } } private void deleteS3Directories(Directories directories, S3ObjectInfo outputS3ObjectInfo) { if (!s3UploadParams.overwriteExisting()) { return; } List<S3ObjectInfo> leafS3Directories = new ArrayList<>(); Path rootDirectory = directories.rootDirectory(); for (Path subdirectory : directories.subdirectories()) { String newKey = rootDirectory.relativize(subdirectory).toString(); leafS3Directories.add(outputS3ObjectInfo.withNewKeySuffix(newKey)); } } }
1,154
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/Args.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import org.codehaus.plexus.util.cli.CommandLineUtils; import java.util.*; public class Args { private final List<String> args = new ArrayList<>(); public Args(String[] args) { this.args.addAll(Arrays.asList(args)); } public Args(String cmd) { String[] values; try { values = CommandLineUtils.translateCommandline(cmd); } catch (Exception e) { throw new RuntimeException(e); } args.addAll(Arrays.asList(values)); } public void removeOptions(String... options) { for (String option : options) { int index = args.indexOf(option); while (index >= 0) { args.remove(index + 1); args.remove(index); index = args.indexOf(option); } } } public void removeFlags(String... flags) { for (String flag : flags) { int index = args.indexOf(flag); while (index >= 0) { args.remove(index); index = args.indexOf(flag); } } } public void addOption(String option, String value) { args.add(option); args.add(value); } public boolean contains(String name) { for (String arg : args) { if (arg.equals(name)) { return true; } } return false; } public boolean contains(String name, String value) { Iterator<String> iterator = args.iterator(); while (iterator.hasNext()) { String arg = iterator.next(); if (arg.equals(name)) { if (iterator.hasNext() && iterator.next().equals(value)) { return true; } } } return false; } public String[] values() { return args.toArray(new String[]{}); } @Override public String toString() { return String.join(" ", args); } public void addFlag(String flag) { args.add(flag); } public void replace(String original, String replacement) { args.replaceAll(s -> { if (s.equals(original)) { return replacement; } else { return s; } }); } public boolean containsAny(String... values) { for (String value : values) { if (args.contains(value)) { return true; } } return false; } public String getFirstOptionValue(String name) { Iterator<String> iterator = args.iterator(); while (iterator.hasNext()) { String arg = iterator.next(); if (arg.equals(name)) { if (iterator.hasNext()) { return iterator.next(); } } } return null; } public Collection<String> getOptionValues(String name) { Collection<String> values = new ArrayList<>(); Iterator<String> iterator = args.iterator(); while (iterator.hasNext()) { String arg = iterator.next(); if (arg.equals(name)) { if (iterator.hasNext()) { values.add(iterator.next()); } } } return values; } }
1,155
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/FeatureToggle.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; public enum FeatureToggle { FilterByPropertyKeys, ExportByIndividualLabels, NeptuneML_V2, Edge_Features, Inject_Fault, Simulate_Cloned_Cluster, Keep_Rewritten_Files, Infer_RDF_Prefixes, }
1,156
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/NeptuneExportLambda.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.export; import java.io.BufferedWriter; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.neptune.util.AWSCredentialsUtil; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import com.amazonaws.services.lambda.runtime.Context; import com.amazonaws.services.lambda.runtime.RequestStreamHandler; import com.amazonaws.services.neptune.util.EnvironmentVariableUtils; import com.amazonaws.services.neptune.util.S3ObjectInfo; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import static com.amazonaws.services.neptune.RunNeptuneExportSvc.DEFAULT_MAX_FILE_DESCRIPTOR_COUNT; import static java.nio.charset.StandardCharsets.UTF_8; public class NeptuneExportLambda implements RequestStreamHandler { public static final String TEMP_PATH = "/tmp/neptune"; private final String localOutputPath; private final boolean cleanOutputPath; private final int maxFileDescriptorCount; public NeptuneExportLambda() { this(TEMP_PATH, true, DEFAULT_MAX_FILE_DESCRIPTOR_COUNT); } public NeptuneExportLambda(String localOutputPath, boolean cleanOutputPath, int maxFileDescriptorCount) { this.localOutputPath = localOutputPath; this.cleanOutputPath = cleanOutputPath; this.maxFileDescriptorCount = maxFileDescriptorCount; } @Override public void handleRequest(InputStream inputStream, OutputStream outputStream, Context context) throws IOException { Logger logger = s -> context.getLogger().log(s); ObjectMapper objectMapper = new ObjectMapper(); JsonNode json = objectMapper.readTree(IOUtils.toString(inputStream, UTF_8.name())); String cmd = json.has("command") ? json.path("command").textValue() : EnvironmentVariableUtils.getOptionalEnv("COMMAND", "export-pg"); ObjectNode params = json.has("params") ? (ObjectNode) json.get("params") : objectMapper.readTree("{}").deepCopy(); String outputS3Path = json.has("outputS3Path") ? json.path("outputS3Path").textValue() : EnvironmentVariableUtils.getOptionalEnv("OUTPUT_S3_PATH", ""); String sseKmsKeyId = json.has("sseKmsKeyId") ? json.path("sseKmsKeyId").textValue() : EnvironmentVariableUtils.getOptionalEnv("SSE_KMS_KEY_ID", ""); boolean createExportSubdirectory = Boolean.parseBoolean( json.has("createExportSubdirectory") ? json.path("createExportSubdirectory").toString() : EnvironmentVariableUtils.getOptionalEnv("CREATE_EXPORT_SUBDIRECTORY", "true")); boolean overwriteExisting = Boolean.parseBoolean( json.has("overwriteExisting") ? json.path("overwriteExisting").toString() : EnvironmentVariableUtils.getOptionalEnv("OVERWRITE_EXISTING", "false")); boolean uploadToS3OnError = Boolean.parseBoolean( json.has("uploadToS3OnError") ? json.path("uploadToS3OnError").toString() : EnvironmentVariableUtils.getOptionalEnv("UPLOAD_TO_S3_ON_ERROR", "true")); String configFileS3Path = json.has("configFileS3Path") ? json.path("configFileS3Path").textValue() : EnvironmentVariableUtils.getOptionalEnv("CONFIG_FILE_S3_PATH", ""); String queriesFileS3Path = json.has("queriesFileS3Path") ? json.path("queriesFileS3Path").textValue() : EnvironmentVariableUtils.getOptionalEnv("QUERIES_FILE_S3_PATH", ""); String completionFileS3Path = json.has("completionFileS3Path") ? json.path("completionFileS3Path").textValue() : EnvironmentVariableUtils.getOptionalEnv("COMPLETION_FILE_S3_PATH", ""); String s3Region = json.has("s3Region") ? json.path("s3Region").textValue() : EnvironmentVariableUtils.getOptionalEnv("S3_REGION", EnvironmentVariableUtils.getOptionalEnv("AWS_REGION", "")); ObjectNode completionFilePayload = json.has("completionFilePayload") ? json.path("completionFilePayload").deepCopy() : objectMapper.readTree( EnvironmentVariableUtils.getOptionalEnv( "COMPLETION_FILE_PAYLOAD", "{}")). deepCopy(); ObjectNode additionalParams = json.has("additionalParams") ? json.path("additionalParams").deepCopy() : objectMapper.readTree("{}").deepCopy(); int maxConcurrency = json.has("jobSize") ? JobSize.parse(json.path("jobSize").textValue()).maxConcurrency() : -1; // We are masking 3/4 of the KMS Key ID as it is potentially sensitive information. String maskedKeyId = StringUtils.isBlank(sseKmsKeyId) ? sseKmsKeyId : sseKmsKeyId.substring(0, sseKmsKeyId.length()/4) + sseKmsKeyId.substring(sseKmsKeyId.length()/4).replaceAll("\\w","*"); AWSCredentialsProvider s3CredentialsProvider = getS3CredentialsProvider(json, params, s3Region); logger.log("cmd : " + cmd); logger.log("params : " + params.toPrettyString()); logger.log("outputS3Path : " + outputS3Path); logger.log("createExportSubdirectory : " + createExportSubdirectory); logger.log("overwriteExisting : " + overwriteExisting); logger.log("uploadToS3OnError : " + uploadToS3OnError); logger.log("configFileS3Path : " + configFileS3Path); logger.log("queriesFileS3Path : " + queriesFileS3Path); logger.log("completionFileS3Path : " + completionFileS3Path); logger.log("s3Region : " + s3Region); logger.log("sseKmsKeyId : " + maskedKeyId); logger.log("completionFilePayload : " + completionFilePayload.toPrettyString()); logger.log("additionalParams : " + additionalParams.toPrettyString()); logger.log("maxFileDescriptorCount : " + maxFileDescriptorCount); if (!cmd.contains(" ") && !params.isEmpty()) { cmd = ParamConverter.fromJson(cmd, params).toString(); } logger.log("revised cmd : " + cmd); NeptuneExportService neptuneExportService = new NeptuneExportService( cmd, localOutputPath, cleanOutputPath, outputS3Path, createExportSubdirectory, overwriteExisting, uploadToS3OnError, configFileS3Path, queriesFileS3Path, completionFileS3Path, completionFilePayload, additionalParams, maxConcurrency, s3Region, maxFileDescriptorCount, sseKmsKeyId, s3CredentialsProvider); S3ObjectInfo outputS3ObjectInfo = neptuneExportService.execute(); if (StringUtils.isEmpty(outputS3Path)) { return; } if (outputS3ObjectInfo != null) { try (Writer writer = new BufferedWriter(new OutputStreamWriter(outputStream, UTF_8))) { writer.write(outputS3ObjectInfo.toString()); } } else { System.exit(-1); } } private AWSCredentialsProvider getS3CredentialsProvider(JsonNode json, ObjectNode params, String region) { String s3RoleArn = json.has("s3RoleArn") ? json.path("s3RoleArn").textValue() : EnvironmentVariableUtils.getOptionalEnv("S3_ROLE_ARN", ""); String s3RoleSessionName = json.has("s3RoleSessionName") ? json.path("s3RoleSessionName").textValue() : EnvironmentVariableUtils.getOptionalEnv("S3_ROLE_SESSION_NAME", "Neptune-Export"); String s3RoleExternalId = json.has("s3RoleExternalId") ? json.path("s3RoleExternalId").textValue() : EnvironmentVariableUtils.getOptionalEnv("S3_ROLE_EXTERNAL_ID", ""); String credentialsProfile = params.has("credentials-profile") ? params.path("credentials-profile").textValue() : EnvironmentVariableUtils.getOptionalEnv("CREDENTIALS_PROFILE", ""); String credentialsConfigFilePath = params.has("credentials-config-file") ? params.path("credentials-config-file").textValue() : EnvironmentVariableUtils.getOptionalEnv("CREDENTIALS_CONFIG_FILE", ""); AWSCredentialsProvider sourceCredentialsProvider = AWSCredentialsUtil.getProfileCredentialsProvider(credentialsProfile, credentialsConfigFilePath); if (StringUtils.isEmpty(s3RoleArn)) { return sourceCredentialsProvider; } return AWSCredentialsUtil.getSTSAssumeRoleCredentialsProvider(s3RoleArn, s3RoleSessionName, s3RoleExternalId, sourceCredentialsProvider, region); } }
1,157
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/TupleQueryHandler.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; import org.apache.commons.lang.StringUtils; import org.eclipse.rdf4j.model.*; import org.eclipse.rdf4j.query.BindingSet; import org.eclipse.rdf4j.query.QueryResultHandlerException; import org.eclipse.rdf4j.query.TupleQueryResultHandler; import org.eclipse.rdf4j.query.TupleQueryResultHandlerException; import org.eclipse.rdf4j.repository.RepositoryConnection; import org.eclipse.rdf4j.rio.RDFWriter; import java.util.List; class TupleQueryHandler implements TupleQueryResultHandler { private final RDFWriter writer; private final ValueFactory factory; public TupleQueryHandler(RDFWriter writer, ValueFactory factory) { this.writer = writer; this.factory = factory; } @Override public void handleBoolean(boolean value) throws QueryResultHandlerException { } @Override public void handleLinks(List<String> linkUrls) throws QueryResultHandlerException { } @Override public void startQueryResult(List<String> bindingNames) throws TupleQueryResultHandlerException { writer.startRDF(); } @Override public void endQueryResult() throws TupleQueryResultHandlerException { writer.endRDF(); } @Override public void handleSolution(BindingSet bindingSet) throws TupleQueryResultHandlerException { Value s = bindingSet.getValue("s"); Value p = bindingSet.getValue("p"); Value o = bindingSet.getValue("o"); Value g = bindingSet.getValue("g"); if (s == null || p == null || o == null || g == null){ throw new IllegalArgumentException("SPARQL query must return results with s, p, o and g values. For example: SELECT * FROM NAMED <http://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph> WHERE { GRAPH ?g {?s a <http://kelvinlawrence.net/air-routes/class/Airport>. ?s ?p ?o}} LIMIT 10"); } Resource subject = s.isIRI() ? factory.createIRI(s.stringValue()) : factory.createBNode(s.stringValue()); IRI predicate = factory.createIRI(p.stringValue()); IRI graph = getNonDefaultNamedGraph(g, factory); Statement statement = factory.createStatement(subject, predicate, o, graph); writer.handleStatement(statement); } private IRI getNonDefaultNamedGraph(Value g, ValueFactory factory) { String s = g.stringValue(); if (StringUtils.isEmpty(s) || s.equalsIgnoreCase("http://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph")) { return null; } return factory.createIRI(s); } }
1,158
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/ExportRdfEdgesJob.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig; import com.amazonaws.services.neptune.util.CheckedActivity; import com.amazonaws.services.neptune.util.Timer; public class ExportRdfEdgesJob implements ExportRdfJob { private final NeptuneSparqlClient client; private final RdfTargetConfig targetConfig; public ExportRdfEdgesJob(NeptuneSparqlClient client, RdfTargetConfig targetConfig) { this.client = client; this.targetConfig = targetConfig; } @Override public void execute() throws Exception { Timer.timedActivity("exporting RDF edges as " + targetConfig.format().description(), (CheckedActivity.Runnable) () -> { System.err.println("Creating edge statement files"); client.executeGraphQuery("CONSTRUCT {\n" + " ?s ?p ?o \n" + "}\n" + "WHERE {\n" + " ?s ?p ?o . \n" + " FILTER(!isLiteral(?o))\n" + "}", targetConfig); }); } }
1,159
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/NeptuneSparqlClient.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.neptune.auth.NeptuneSigV4SignerException; import com.amazonaws.services.neptune.cluster.ConnectionConfig; import com.amazonaws.services.neptune.export.FeatureToggles; import com.amazonaws.services.neptune.io.OutputWriter; import com.amazonaws.services.neptune.rdf.io.NeptuneExportSparqlRepository; import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig; import com.amazonaws.services.neptune.util.EnvironmentVariableUtils; import org.apache.http.client.HttpClient; import org.eclipse.rdf4j.http.client.HttpClientSessionManager; import org.eclipse.rdf4j.http.client.RDF4JProtocolSession; import org.eclipse.rdf4j.http.client.SPARQLProtocolSession; import org.eclipse.rdf4j.model.ValueFactory; import org.eclipse.rdf4j.query.resultio.TupleQueryResultFormat; import org.eclipse.rdf4j.repository.RepositoryConnection; import org.eclipse.rdf4j.repository.base.AbstractRepository; import org.eclipse.rdf4j.repository.sparql.SPARQLRepository; import org.eclipse.rdf4j.rio.ParserConfig; import org.eclipse.rdf4j.rio.RDFFormat; import org.eclipse.rdf4j.rio.RDFWriter; import org.eclipse.rdf4j.rio.helpers.BasicParserSettings; import org.joda.time.DateTime; import java.io.IOException; import java.util.List; import java.util.Random; import java.util.stream.Collectors; public class NeptuneSparqlClient implements AutoCloseable { private static final ParserConfig PARSER_CONFIG = new ParserConfig().addNonFatalError(BasicParserSettings.VERIFY_URI_SYNTAX); public static NeptuneSparqlClient create(ConnectionConfig config, FeatureToggles featureToggles) { String serviceRegion = config.useIamAuth() ? EnvironmentVariableUtils.getMandatoryEnv("SERVICE_REGION") : null; AWSCredentialsProvider credentialsProvider = config.useIamAuth() ? config.getCredentialsProvider() : null; return new NeptuneSparqlClient( config.endpoints().stream() .map(e -> { try { return updateParser(new NeptuneExportSparqlRepository( sparqlEndpoint(e, config.port()), credentialsProvider, serviceRegion, config)); } catch (NeptuneSigV4SignerException e1) { throw new RuntimeException(e1); } } ) .peek(AbstractRepository::init) .collect(Collectors.toList()), featureToggles); } private static SPARQLRepository updateParser(SPARQLRepository repository) { HttpClientSessionManager sessionManager = repository.getHttpClientSessionManager(); repository.setHttpClientSessionManager(new HttpClientSessionManager() { @Override public HttpClient getHttpClient() { return sessionManager.getHttpClient(); } @Override public SPARQLProtocolSession createSPARQLProtocolSession(String s, String s1) { SPARQLProtocolSession session = sessionManager.createSPARQLProtocolSession(s, s1); session.setParserConfig(PARSER_CONFIG); session.setPreferredTupleQueryResultFormat(TupleQueryResultFormat.JSON); return session; } @Override public RDF4JProtocolSession createRDF4JProtocolSession(String s) { return sessionManager.createRDF4JProtocolSession(s); } @Override public void shutDown() { sessionManager.shutDown(); } }); return repository; } private static String sparqlEndpoint(String endpoint, int port) { return String.format("https://%s:%s", endpoint, port); } private final List<SPARQLRepository> repositories; private final Random random = new Random(DateTime.now().getMillis()); private final FeatureToggles featureToggles; private NeptuneSparqlClient(List<SPARQLRepository> repositories, FeatureToggles featureToggles) { this.repositories = repositories; this.featureToggles = featureToggles; } public void executeTupleQuery(String sparql, RdfTargetConfig targetConfig) throws IOException { SPARQLRepository repository = chooseRepository(); ValueFactory factory = repository.getValueFactory(); try (RepositoryConnection connection = repository.getConnection(); OutputWriter outputWriter = targetConfig.createOutputWriter()) { RDFWriter writer = targetConfig.createRDFWriter(outputWriter, featureToggles); connection.prepareTupleQuery(sparql).evaluate(new TupleQueryHandler(writer, factory)); } catch (Exception e) { if (repository instanceof NeptuneExportSparqlRepository) { throw new RuntimeException(((NeptuneExportSparqlRepository) repository).getErrorMessageFromTrailers(), e); } else { throw new RuntimeException(e); } } } public void executeGraphQuery(String sparql, RdfTargetConfig targetConfig) throws IOException { SPARQLRepository repository = chooseRepository(); try (RepositoryConnection connection = repository.getConnection(); OutputWriter outputWriter = targetConfig.createOutputWriter()) { RDFWriter writer = targetConfig.createRDFWriter(outputWriter, featureToggles); connection.prepareGraphQuery(sparql).evaluate(new GraphQueryHandler(writer)); } catch (Exception e) { if (repository instanceof NeptuneExportSparqlRepository) { throw new RuntimeException(((NeptuneExportSparqlRepository) repository).getErrorMessageFromTrailers(), e); } else { throw new RuntimeException(e); } } } private SPARQLRepository chooseRepository() { return repositories.get(random.nextInt(repositories.size())); } @Override public void close() { repositories.forEach(AbstractRepository::shutDown); } }
1,160
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/GraphQueryHandler.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; import org.eclipse.rdf4j.model.Statement; import org.eclipse.rdf4j.repository.RepositoryConnection; import org.eclipse.rdf4j.rio.RDFHandler; import org.eclipse.rdf4j.rio.RDFHandlerException; import org.eclipse.rdf4j.rio.RDFWriter; class GraphQueryHandler implements RDFHandler { private final RDFWriter writer; public GraphQueryHandler(RDFWriter writer) { this.writer = writer; } @Override public void startRDF() throws RDFHandlerException { writer.startRDF(); } @Override public void endRDF() throws RDFHandlerException { writer.endRDF(); } @Override public void handleNamespace(String s, String s1) throws RDFHandlerException { } @Override public void handleStatement(Statement statement) throws RDFHandlerException { writer.handleStatement(statement); } @Override public void handleComment(String s) throws RDFHandlerException { } }
1,161
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/ExportRdfJob.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; public interface ExportRdfJob { void execute() throws Exception; }
1,162
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/ExportRdfFromQuery.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig; import com.amazonaws.services.neptune.util.CheckedActivity; import com.amazonaws.services.neptune.util.Timer; public class ExportRdfFromQuery implements ExportRdfJob { private final NeptuneSparqlClient client; private final RdfTargetConfig targetConfig; private final String query; public ExportRdfFromQuery(NeptuneSparqlClient client, RdfTargetConfig targetConfig, String query) { this.client = client; this.targetConfig = targetConfig; this.query = query; } @Override public void execute() throws Exception { Timer.timedActivity("exporting RDF from query as " + targetConfig.format().description(), (CheckedActivity.Runnable) () -> { System.err.println("Creating edge statement files"); if (query.contains("CONSTRUCT ") || query.contains("DESCRIBE ")) { client.executeGraphQuery(query, targetConfig); } else { client.executeTupleQuery(query, targetConfig); } }); } }
1,163
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/ExportRdfGraphJob.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig; import com.amazonaws.services.neptune.util.CheckedActivity; import com.amazonaws.services.neptune.util.Timer; public class ExportRdfGraphJob implements ExportRdfJob { private final NeptuneSparqlClient client; private final RdfTargetConfig targetConfig; public ExportRdfGraphJob(NeptuneSparqlClient client, RdfTargetConfig targetConfig) { this.client = client; this.targetConfig = targetConfig; } @Override public void execute() throws Exception { Timer.timedActivity("exporting RDF as " + targetConfig.format().description(), (CheckedActivity.Runnable) () -> { System.err.println("Creating statement files"); client.executeTupleQuery("SELECT * WHERE { GRAPH ?g { ?s ?p ?o } }", targetConfig); }); } }
1,164
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/Prefixes.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; import com.amazonaws.services.neptune.export.FeatureToggle; import com.amazonaws.services.neptune.export.FeatureToggles; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.io.LineIterator; import org.eclipse.rdf4j.rio.RDFWriter; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.nio.file.Path; import java.util.HashMap; import java.util.Map; public class Prefixes { private final Map<String, String> prefixes = new HashMap<>(); private final int offset; private final boolean inferPrefixes; public Prefixes(FeatureToggles featureToggles) { prefixes.put("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "rdf"); //prefixes.put("http://www.w3.org/2000/01/rdf-schema#", "rdfs"); prefixes.put("http://www.w3.org/2001/XMLSchema#", "xsd"); offset = prefixes.size(); inferPrefixes = featureToggles.containsFeature(FeatureToggle.Infer_RDF_Prefixes); } public void parse(String s, RDFWriter writer) { if(inferPrefixes) { int i = s.indexOf("#"); if (i > 0 && i < (s.length() - 1)) { String uri = s.substring(0, i + 1); if (!prefixes.containsKey(uri)) { String prefix = "s" + (prefixes.size() - offset); prefixes.put(uri, prefix); writer.handleNamespace(prefix, uri); } } } } public void addTo(Path filePath) throws IOException { File source = filePath.toFile(); LineIterator lineIterator = FileUtils.lineIterator(source); File tempFile = File.createTempFile(source.getName(), ".tmp"); BufferedWriter writer = new BufferedWriter(new FileWriter(tempFile)); try { writer.write(allHeaders()); while (lineIterator.hasNext()) { writer.write(lineIterator.next()); writer.write(System.lineSeparator()); } } finally { IOUtils.closeQuietly(writer); LineIterator.closeQuietly(lineIterator); } FileUtils.deleteQuietly(source); FileUtils.moveFile(tempFile, source); } private String allHeaders() { StringBuilder builder = new StringBuilder(); for (Map.Entry<String, String> entry : prefixes.entrySet()) { builder.append("@prefix "); builder.append(entry.getValue()); builder.append(": <"); builder.append(entry.getKey()); builder.append("> ."); builder.append(System.lineSeparator()); } builder.append(System.lineSeparator()); return builder.toString(); } }
1,165
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/RdfExportScope.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf; public enum RdfExportScope { graph, edges, query }
1,166
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/NeptuneStreamsSimpleJsonNQuadsWriter.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf.io; import com.amazonaws.services.neptune.io.OutputWriter; import com.amazonaws.services.neptune.io.Status; import com.amazonaws.services.neptune.io.StatusOutputFormat; import com.amazonaws.services.neptune.util.NotImplementedException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import org.eclipse.rdf4j.common.text.ASCIIUtil; import org.eclipse.rdf4j.common.text.StringUtil; import org.eclipse.rdf4j.model.*; import org.eclipse.rdf4j.rio.*; import org.eclipse.rdf4j.rio.helpers.NTriplesUtil; import org.eclipse.rdf4j.rio.nquads.NQuadsWriter; import java.io.IOException; import java.io.StringWriter; import java.util.Collection; public class NeptuneStreamsSimpleJsonNQuadsWriter implements RDFWriter { private static final String REGEX_LAST_NEWLINE = String.format("%s$", System.lineSeparator()); private final JsonGenerator generator; private final Status status = new Status(StatusOutputFormat.Description, "records"); private final OutputWriter outputWriter; public NeptuneStreamsSimpleJsonNQuadsWriter(OutputWriter outputWriter) { this.outputWriter = outputWriter; try { this.generator = new JsonFactory().createGenerator(outputWriter.writer()); } catch (IOException e) { throw new RuntimeException(e); } } @Override public RDFFormat getRDFFormat() { return RDFFormat.NQUADS; } @Override public RDFWriter setWriterConfig(WriterConfig writerConfig) { throw new NotImplementedException(); } @Override public WriterConfig getWriterConfig() { throw new NotImplementedException(); } @Override public Collection<RioSetting<?>> getSupportedSettings() { throw new NotImplementedException(); } @Override public <T> RDFWriter set(RioSetting<T> rioSetting, T t) { throw new NotImplementedException(); } @Override public void startRDF() throws RDFHandlerException { // Do nothing } @Override public void endRDF() throws RDFHandlerException { // Do nothing } @Override public void handleNamespace(String s, String s1) throws RDFHandlerException { // Do nothing } @Override public void handleStatement(Statement statement) throws RDFHandlerException { try { outputWriter.startCommit(); generator.writeStartObject(); generator.writeStringField("id", ""); generator.writeStringField("from", ""); generator.writeStringField("to", ""); generator.writeStringField("type", ""); generator.writeStringField("key", ""); generator.writeStringField("value", ""); generator.writeStringField("dataType", ""); // generator.writeStringField("s", getValue(statement.getSubject())); // generator.writeStringField("p", getValue(statement.getPredicate())); // generator.writeStringField("o", getValue(statement.getObject())); // // if (statement.getContext() != null) { // generator.writeStringField("g", getValue(statement.getContext())); // } else { // generator.writeStringField("g", ""); // } generator.writeStringField("s", ""); generator.writeStringField("p", ""); generator.writeStringField("o", ""); generator.writeStringField("g", ""); generator.writeFieldName("stmt"); StringWriter stringWriter = new StringWriter(); NQuadsWriter nQuadsWriter = new NQuadsWriter(stringWriter); nQuadsWriter.startRDF(); nQuadsWriter.handleStatement(statement); nQuadsWriter.endRDF(); generator.writeString(stringWriter.toString().replaceAll(REGEX_LAST_NEWLINE, "")); generator.writeStringField("op", "ADD"); generator.writeEndObject(); generator.writeRaw(outputWriter.lineSeparator()); generator.flush(); outputWriter.endCommit(); status.update(); } catch (IOException e) { throw new RDFHandlerException(e); } } @Override public void handleComment(String s) throws RDFHandlerException { // Do nothing } private String getValue(Value value) throws IOException { if (value instanceof IRI) { return getIRI((IRI) value); } else if (value instanceof BNode) { return getBNode((BNode) value); } else { if (!(value instanceof Literal)) { throw new IllegalArgumentException("Unknown value type: " + value.getClass()); } return getLiteral((Literal) value); } } private String getIRI(IRI iri) throws IOException { StringWriter appendable = new StringWriter(); StringUtil.simpleEscapeIRI(iri.toString(), appendable, true); return appendable.toString(); } private String getBNode(BNode bNode) throws IOException { StringWriter appendable = new StringWriter(); String nextId = bNode.getID(); appendable.append("_:"); if (nextId.isEmpty()) { appendable.append("genid"); appendable.append(Integer.toHexString(bNode.hashCode())); } else { if (!ASCIIUtil.isLetter(nextId.charAt(0))) { appendable.append("genid"); appendable.append(Integer.toHexString(nextId.charAt(0))); } for (int i = 0; i < nextId.length(); ++i) { if (ASCIIUtil.isLetterOrNumber(nextId.charAt(i))) { appendable.append(nextId.charAt(i)); } else { appendable.append(Integer.toHexString(nextId.charAt(i))); } } } return appendable.toString(); } private String getLiteral(Literal lit) throws IOException { StringWriter appendable = new StringWriter(); NTriplesUtil.append(lit, appendable, true, true); return appendable.toString(); } }
1,167
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/EnhancedTurtleWriter.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf.io; import com.amazonaws.services.neptune.io.Status; import com.amazonaws.services.neptune.io.OutputWriter; import com.amazonaws.services.neptune.io.StatusOutputFormat; import com.amazonaws.services.neptune.rdf.Prefixes; import org.eclipse.rdf4j.model.Resource; import org.eclipse.rdf4j.model.Statement; import org.eclipse.rdf4j.rio.RDFHandlerException; import org.eclipse.rdf4j.rio.turtle.TurtleWriter; import java.io.IOException; public class EnhancedTurtleWriter extends TurtleWriter { private final OutputWriter writer; private final Prefixes prefixes; private final Status status = new Status(StatusOutputFormat.Description, "statements"); public EnhancedTurtleWriter(OutputWriter writer, Prefixes prefixes) { super(writer.writer()); this.writer = writer; this.prefixes = prefixes; } @Override public void handleStatement(Statement statement) throws RDFHandlerException { prefixes.parse(statement.getSubject().stringValue(), this); prefixes.parse(statement.getPredicate().toString(), this); prefixes.parse(statement.getObject().stringValue(), this); Resource context = statement.getContext(); if (context != null){ prefixes.parse(context.stringValue(), this); } writer.startCommit(); super.handleStatement(statement); writer.endCommit(); status.update(); } @Override protected void writeNamespace(String prefix, String name) throws IOException { writer.startCommit(); super.writeNamespace(prefix, name); writer.endCommit(); } }
1,168
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/NeptuneExportSparqlRepository.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf.io; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.neptune.auth.NeptuneApacheHttpSigV4Signer; import com.amazonaws.neptune.auth.NeptuneSigV4Signer; import com.amazonaws.neptune.auth.NeptuneSigV4SignerException; import com.amazonaws.services.neptune.cluster.ConnectionConfig; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpException; import org.apache.http.HttpRequestInterceptor; import org.apache.http.HttpResponseInterceptor; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.conn.EofSensorInputStream; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.io.ChunkedInputStream; import org.apache.http.protocol.HttpContext; import org.eclipse.rdf4j.http.client.util.HttpClientBuilders; import org.eclipse.rdf4j.repository.sparql.SPARQLRepository; import java.io.InputStream; import java.io.UnsupportedEncodingException; import java.lang.reflect.Method; import java.net.URLDecoder; import java.util.HashMap; import java.util.Map; public class NeptuneExportSparqlRepository extends SPARQLRepository { private final String regionName; private final AWSCredentialsProvider awsCredentialsProvider; private final ConnectionConfig config; private NeptuneSigV4Signer<HttpUriRequest> v4Signer; private HttpContext lastContext; public NeptuneExportSparqlRepository(String endpointUrl, AWSCredentialsProvider awsCredentialsProvider, String regionName, ConnectionConfig config) throws NeptuneSigV4SignerException { super(getSparqlEndpoint(endpointUrl)); if (config == null) { throw new IllegalArgumentException("ConnectionConfig is required to be non-null"); } this.config = config; this.awsCredentialsProvider = awsCredentialsProvider; this.regionName = regionName; this.initAuthenticatingHttpClient(); Map<String, String> additionalHeaders = new HashMap<>(); additionalHeaders.put("te", "trailers"); //Asks Neptune to send trailing headers which may contain error messages this.setAdditionalHttpHeaders(additionalHeaders); } protected void initAuthenticatingHttpClient() throws NeptuneSigV4SignerException { HttpClientBuilder httpClientBuilder = config.useSsl() ? HttpClientBuilders.getSSLTrustAllHttpClientBuilder() : HttpClientBuilder.create(); httpClientBuilder.addInterceptorLast((HttpResponseInterceptor) (response, context) -> { lastContext = context; HttpEntity entity = response.getEntity(); if (entity != null) { context.setAttribute("raw-response-inputstream", entity.getContent()); } }); if (config.useIamAuth()) { v4Signer = new NeptuneApacheHttpSigV4Signer(regionName, awsCredentialsProvider); HttpClient v4SigningClient = httpClientBuilder.addInterceptorLast((HttpRequestInterceptor) (req, ctx) -> { if (req instanceof HttpUriRequest) { HttpUriRequest httpUriReq = (HttpUriRequest) req; try { v4Signer.signRequest(httpUriReq); } catch (NeptuneSigV4SignerException var5) { throw new HttpException("Problem signing the request: ", var5); } } else { throw new HttpException("Not an HttpUriRequest"); } }).build(); setHttpClient(v4SigningClient); } else { setHttpClient(httpClientBuilder.build()); } } private static String getSparqlEndpoint(String endpointUrl) { return endpointUrl + "/sparql"; } /** * Attempts to extract error messages from trailing headers from the most recent response received by 'repository'. * If no trailers are found an empty String is returned. */ public String getErrorMessageFromTrailers() { if (this.lastContext == null) { return ""; } InputStream responseInStream = (InputStream) this.lastContext.getAttribute("raw-response-inputstream"); ChunkedInputStream chunkedInStream; if (responseInStream instanceof ChunkedInputStream) { chunkedInStream = (ChunkedInputStream) responseInStream; } else if (responseInStream instanceof EofSensorInputStream) { // HTTPClient 4.5.13 provides no methods for accessing trailers from a wrapped stream requiring the use of // reflection to break encapsulation. This bug is being tracked in https://issues.apache.org/jira/browse/HTTPCLIENT-2263. try { Method getWrappedStream = EofSensorInputStream.class.getDeclaredMethod("getWrappedStream"); getWrappedStream.setAccessible(true); chunkedInStream = (ChunkedInputStream) getWrappedStream.invoke(responseInStream); getWrappedStream.setAccessible(false); } catch (Exception e) { return ""; } } else { return ""; } Header[] trailers = chunkedInStream.getFooters(); StringBuilder messageBuilder = new StringBuilder(); for (Header trailer : trailers) { try { messageBuilder.append(URLDecoder.decode(trailer.toString(), "UTF-8")); } catch (UnsupportedEncodingException e) { messageBuilder.append(trailer); } messageBuilder.append('\n'); } return messageBuilder.toString(); } protected void setLastContext(HttpContext context) { this.lastContext = context; } }
1,169
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/EnhancedNTriplesWriter.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf.io; import com.amazonaws.services.neptune.io.OutputWriter; import com.amazonaws.services.neptune.io.Status; import com.amazonaws.services.neptune.io.StatusOutputFormat; import com.amazonaws.services.neptune.rdf.Prefixes; import org.eclipse.rdf4j.model.Resource; import org.eclipse.rdf4j.model.Statement; import org.eclipse.rdf4j.rio.RDFHandlerException; import org.eclipse.rdf4j.rio.ntriples.NTriplesWriter; public class EnhancedNTriplesWriter extends NTriplesWriter { private final OutputWriter writer; private final Prefixes prefixes; private final Status status = new Status(StatusOutputFormat.Description,"statements"); public EnhancedNTriplesWriter(OutputWriter writer, Prefixes prefixes) { super(writer.writer()); this.writer = writer; this.prefixes = prefixes; } @Override public void handleStatement(Statement statement) throws RDFHandlerException { prefixes.parse(statement.getSubject().stringValue(), this); prefixes.parse(statement.getPredicate().toString(), this); prefixes.parse(statement.getObject().stringValue(), this); Resource context = statement.getContext(); if (context != null) { prefixes.parse(context.stringValue(), this); } writer.startCommit(); super.handleStatement(statement); writer.endCommit(); status.update(); } @Override public void handleNamespace(String prefix, String name) { writer.startCommit(); super.handleNamespace(prefix, name); writer.endCommit(); } }
1,170
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/NeptuneStreamsJsonNQuadsWriter.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf.io; import com.amazonaws.services.neptune.io.OutputWriter; import com.amazonaws.services.neptune.io.Status; import com.amazonaws.services.neptune.io.StatusOutputFormat; import com.amazonaws.services.neptune.util.NotImplementedException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import org.eclipse.rdf4j.model.Statement; import org.eclipse.rdf4j.rio.*; import org.eclipse.rdf4j.rio.nquads.NQuadsWriter; import java.io.IOException; import java.io.StringWriter; import java.util.Collection; public class NeptuneStreamsJsonNQuadsWriter implements RDFWriter { private static final String REGEX_LAST_NEWLINE = String.format("%s$", System.lineSeparator()); private final JsonGenerator generator; private final Status status = new Status(StatusOutputFormat.Description,"statements"); private final OutputWriter outputWriter; public NeptuneStreamsJsonNQuadsWriter(OutputWriter outputWriter) { this.outputWriter = outputWriter; try { this.generator = new JsonFactory().createGenerator(outputWriter.writer()); } catch (IOException e) { throw new RuntimeException(e); } } @Override public RDFFormat getRDFFormat() { return RDFFormat.NQUADS; } @Override public RDFWriter setWriterConfig(WriterConfig writerConfig) { throw new NotImplementedException(); } @Override public WriterConfig getWriterConfig() { throw new NotImplementedException(); } @Override public Collection<RioSetting<?>> getSupportedSettings() { throw new NotImplementedException(); } @Override public <T> RDFWriter set(RioSetting<T> rioSetting, T t) { throw new NotImplementedException(); } @Override public void startRDF() throws RDFHandlerException { // Do nothing } @Override public void endRDF() throws RDFHandlerException { // Do nothing } @Override public void handleNamespace(String s, String s1) throws RDFHandlerException { // Do nothing } @Override public void handleStatement(Statement statement) throws RDFHandlerException { try { outputWriter.startCommit(); generator.writeStartObject(); generator.writeObjectFieldStart("eventId"); generator.writeNumberField("commitNum", -1); generator.writeNumberField("opNum", 0); generator.writeEndObject(); generator.writeObjectFieldStart("data"); generator.writeFieldName("stmt"); StringWriter stringWriter = new StringWriter(); NQuadsWriter nQuadsWriter = new NQuadsWriter(stringWriter); nQuadsWriter.startRDF(); nQuadsWriter.handleStatement(statement); nQuadsWriter.endRDF(); generator.writeString(stringWriter.toString().replaceAll(REGEX_LAST_NEWLINE, "")); generator.writeEndObject(); generator.writeStringField("op", "ADD"); generator.writeEndObject(); generator.flush(); outputWriter.endCommit(); status.update(); } catch (IOException e) { throw new RDFHandlerException(e); } } @Override public void handleComment(String s) throws RDFHandlerException { // Do nothing } }
1,171
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/RdfTargetConfig.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf.io; import com.amazonaws.services.neptune.export.FeatureToggles; import com.amazonaws.services.neptune.io.Directories; import com.amazonaws.services.neptune.io.KinesisConfig; import com.amazonaws.services.neptune.io.OutputWriter; import com.amazonaws.services.neptune.io.Target; import com.amazonaws.services.neptune.rdf.Prefixes; import org.eclipse.rdf4j.rio.RDFWriter; import java.io.IOException; public class RdfTargetConfig { private final Directories directories; private final Target output; private final KinesisConfig kinesisConfig; private final RdfExportFormat format; public RdfTargetConfig(Directories directories, KinesisConfig kinesisConfig, Target output, RdfExportFormat format) { this.directories = directories; this.output = output; this.kinesisConfig = kinesisConfig; this.format = format; } public OutputWriter createOutputWriter() throws IOException { return output.createOutputWriter( () -> directories.createStatementsFilePath("statements", format), kinesisConfig); } public RDFWriter createRDFWriter(OutputWriter outputWriter, FeatureToggles featureToggles) { return format.createWriter(outputWriter, new Prefixes(featureToggles)); } public RdfExportFormat format() { return format; } }
1,172
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/RdfExportFormat.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf.io; import com.amazonaws.services.neptune.io.FileExtension; import com.amazonaws.services.neptune.io.OutputWriter; import com.amazonaws.services.neptune.rdf.Prefixes; import org.eclipse.rdf4j.rio.RDFWriter; public enum RdfExportFormat implements FileExtension { turtle { @Override RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) { return new EnhancedTurtleWriter(writer, prefixes); } @Override public String extension() { return "ttl"; } @Override public String description() { return "Turtle"; } }, nquads { @Override RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) { return new EnhancedNQuadsWriter(writer, prefixes); } @Override public String extension() { return "nq"; } @Override public String description() { return "NQUADS"; } }, ntriples { @Override RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) { return new EnhancedNTriplesWriter(writer, prefixes); } @Override public String extension() { return "nt"; } @Override public String description() { return "NTRIPLES"; } }, neptuneStreamsJson { @Override RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) { return new NeptuneStreamsJsonNQuadsWriter(writer); } @Override public String extension() { return "json"; } @Override public String description() { return "JSON (Neptune Streams format)"; } }, neptuneStreamsSimpleJson { @Override RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) { return new NeptuneStreamsSimpleJsonNQuadsWriter(writer); } @Override public String extension() { return "json"; } @Override public String description() { return "JSON (Neptune Streams simple format)"; } };; abstract RDFWriter createWriter(OutputWriter writer, Prefixes prefixes); public abstract String description(); }
1,173
0
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/EnhancedNQuadsWriter.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.rdf.io; import com.amazonaws.services.neptune.io.OutputWriter; import com.amazonaws.services.neptune.io.Status; import com.amazonaws.services.neptune.io.StatusOutputFormat; import com.amazonaws.services.neptune.rdf.Prefixes; import org.eclipse.rdf4j.model.Resource; import org.eclipse.rdf4j.model.Statement; import org.eclipse.rdf4j.rio.RDFHandlerException; import org.eclipse.rdf4j.rio.nquads.NQuadsWriter; public class EnhancedNQuadsWriter extends NQuadsWriter { private final OutputWriter writer; private final Prefixes prefixes; private final Status status = new Status(StatusOutputFormat.Description,"statements"); public EnhancedNQuadsWriter(OutputWriter writer, Prefixes prefixes) { super(writer.writer()); this.writer = writer; this.prefixes = prefixes; } @Override public void handleStatement(Statement statement) throws RDFHandlerException { prefixes.parse(statement.getSubject().stringValue(), this); prefixes.parse(statement.getPredicate().toString(), this); prefixes.parse(statement.getObject().stringValue(), this); Resource context = statement.getContext(); if (context != null){ prefixes.parse(context.stringValue(), this); } writer.startCommit(); super.handleStatement(statement); writer.endCommit(); status.update(); } @Override public void handleNamespace(String prefix, String name) { writer.startCommit(); super.handleNamespace(prefix, name); writer.endCommit(); } }
1,174
0
Create_ds/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish
Create_ds/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish/core/QueryRegistryTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import io.mantisrx.publish.proto.MantisServerSubscription; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; class QueryRegistryTest { @Test void registerQueryTest() { try { QueryRegistry queryRegistry = new QueryRegistry.Builder().build(); fail(); } catch (IllegalArgumentException ignored) { } QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build(); String targetApp = QueryRegistry.ANY; try { queryRegistry.registerQuery(targetApp, null, "true"); fail(); } catch (Exception ignored) { } try { queryRegistry.registerQuery(targetApp, "subId", null); fail(); } catch (Exception ignored) { } queryRegistry.registerQuery("myApp", "subId", "true"); queryRegistry.registerQuery("myApp2", "subId", "false"); List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp("myApp"); assertEquals(1, currentSubs.size()); List<MantisServerSubscription> currentSubs2 = queryRegistry.getCurrentSubscriptionsForApp("myApp2"); assertEquals(1, currentSubs2.size()); Map<String, List<MantisServerSubscription>> allSubscriptions = queryRegistry.getAllSubscriptions(); assertEquals(2, allSubscriptions.size()); assertTrue(allSubscriptions.containsKey("myApp")); assertTrue(allSubscriptions.containsKey("myApp2")); } @Test void registerQueryForAnyLookupSpecificAppTest() { try { QueryRegistry queryRegistry = new QueryRegistry.Builder().build(); fail(); } catch (IllegalArgumentException ignored) { } QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build(); String targetApp = QueryRegistry.ANY; queryRegistry.registerQuery(targetApp, "subId", "true"); List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp("myApp"); assertEquals(1, currentSubs.size()); } @Test void registerQueryForAppLookupAnyTest() { try { QueryRegistry queryRegistry = new QueryRegistry.Builder().build(); fail(); } catch (IllegalArgumentException ignored) { } QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build(); String targetApp = QueryRegistry.ANY; queryRegistry.registerQuery("myApp", "subId", "true"); List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp); assertEquals(0, currentSubs.size()); } @Test @Disabled("time-based, non-deterministic") void deregisterQueryTest() throws InterruptedException { try { QueryRegistry queryRegistry = new QueryRegistry.Builder().build(); fail(); } catch (IllegalArgumentException ignored) { } QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build(); String targetApp = "myapp"; try { queryRegistry.registerQuery(targetApp, null, "true"); fail(); } catch (Exception ignored) { } try { queryRegistry.registerQuery(targetApp, "subId", null); fail(); } catch (Exception ignored) { } queryRegistry.registerQuery(targetApp, "subId", "true"); List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp); assertEquals(1, currentSubs.size()); queryRegistry.deregisterQuery(targetApp, "subId", "true"); Thread.sleep(500); currentSubs = queryRegistry.getCurrentSubscriptionsForApp(QueryRegistry.ANY); assertEquals(0, currentSubs.size()); } @Test @Disabled("time-based, non-deterministic") void registerIdenticalQueryGetsDedupedTest() { QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build(); String targetApp = "myApp"; int concurrency = 5; CountDownLatch latch = new CountDownLatch(1); CountDownLatch endLatch = new CountDownLatch(concurrency); Runnable task = () -> { try { latch.await(); queryRegistry.registerQuery(targetApp, "subId", "true"); endLatch.countDown(); } catch (InterruptedException e) { e.printStackTrace(); } }; ExecutorService executorService = Executors.newFixedThreadPool(concurrency); for (int i = 0; i < concurrency; i++) { executorService.submit(task); } latch.countDown(); try { endLatch.await(); List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp); assertEquals(1, currentSubs.size()); assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId()); } catch (InterruptedException e) { e.printStackTrace(); fail(); } } @Test @Disabled("time-based, non-deterministic") void registerIdenticalQueryRemovalTest() throws InterruptedException { QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build(); String targetApp = "myApp"; int concurrency = 5; CountDownLatch latch = new CountDownLatch(1); CountDownLatch endLatch = new CountDownLatch(concurrency); CountDownLatch removeQueryEndLatch = new CountDownLatch(concurrency - 1); Runnable addQueryTask = () -> { try { latch.await(); queryRegistry.registerQuery(targetApp, "subId", "true"); endLatch.countDown(); } catch (InterruptedException e) { e.printStackTrace(); } }; Runnable removeQueryTask = () -> { try { latch.await(); queryRegistry.deregisterQuery(targetApp, "subId", "true"); removeQueryEndLatch.countDown(); } catch (InterruptedException ignored) { } }; ExecutorService executorService = Executors.newFixedThreadPool(concurrency * 2); for (int i = 0; i < concurrency; i++) { executorService.submit(addQueryTask); } for (int i = 0; i < concurrency - 1; i++) { executorService.submit(removeQueryTask); } latch.countDown(); removeQueryEndLatch.await(); List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp); assertEquals(1, currentSubs.size()); assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId()); } @Test @Disabled("time-based, non-deterministic") void registerQueryMultipleAppsRemovalTest() throws InterruptedException { QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build(); String targetApp = "myApp"; String targetApp2 = "myApp2"; int concurrency = 5; CountDownLatch latch = new CountDownLatch(1); CountDownLatch endLatch = new CountDownLatch(concurrency); CountDownLatch removeQueryEndLatch = new CountDownLatch(concurrency - 1); Runnable addQueryTask = () -> { try { latch.await(); queryRegistry.registerQuery(targetApp, "subId", "true"); endLatch.countDown(); } catch (InterruptedException ignored) { } }; Runnable addQueryTask2 = () -> { try { latch.await(); queryRegistry.registerQuery(targetApp2, "subId", "true"); endLatch.countDown(); } catch (InterruptedException ignored) { } }; Runnable removeQueryTask = () -> { try { latch.await(); queryRegistry.deregisterQuery(targetApp, "subId", "true"); removeQueryEndLatch.countDown(); } catch (InterruptedException ignored) { } }; Runnable removeQueryTask2 = () -> { try { latch.await(); queryRegistry.deregisterQuery(targetApp2, "subId", "true"); removeQueryEndLatch.countDown(); } catch (InterruptedException ignored) { } }; ExecutorService executorService = Executors.newFixedThreadPool(concurrency * 2); for (int i = 0; i < concurrency; i++) { executorService.submit(addQueryTask); executorService.submit(addQueryTask2); } for (int i = 0; i < concurrency - 1; i++) { executorService.submit(removeQueryTask); executorService.submit(removeQueryTask2); } latch.countDown(); removeQueryEndLatch.await(); List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp); assertEquals(1, currentSubs.size()); List<MantisServerSubscription> currentSubs2 = queryRegistry.getCurrentSubscriptionsForApp(targetApp2); assertEquals(1, currentSubs2.size()); assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId()); } }
1,175
0
Create_ds/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish
Create_ds/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish/core/EventFilterTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.core; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.mantisrx.common.utils.MantisSourceJobConstants; import org.junit.jupiter.api.Test; import rx.functions.Func1; public class EventFilterTest { private ObjectMapper mapper = new ObjectMapper(); @Test public void missingClientIdFails() { try { new EventFilter(null); fail(); } catch (Exception ignored) { } } @Test public void basicFilterTest() throws JsonProcessingException { String clientId = "myClientId"; EventFilter filter = new EventFilter(clientId); Map<String, List<String>> params = new HashMap<>(); List<String> subIdParam = new ArrayList<>(); subIdParam.add("mySubId"); params.put(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME, subIdParam); Func1<String, Boolean> materializedFilter = filter.call(params); List<String> matchedClients = new ArrayList<>(); matchedClients.add(clientId + "_" + "mySubId"); matchedClients.add(clientId + "_" + "BlahSubId"); Map<String, Object> payLoad = new HashMap<>(); payLoad.put("ts", System.currentTimeMillis()); payLoad.put("matchedClients", matchedClients); payLoad.put("type", "EVENT"); String payloadStr = mapper.writeValueAsString(payLoad); assertTrue(materializedFilter.call(payloadStr)); List<String> matchedClients2 = new ArrayList<>(); matchedClients.add(clientId + "_" + "mySubId2"); matchedClients.add(clientId + "_" + "BlahSubId"); payLoad = new HashMap<>(); payLoad.put("ts", System.currentTimeMillis()); payLoad.put("matchedClients", matchedClients2); payLoad.put("type", "EVENT"); payloadStr = mapper.writeValueAsString(payLoad); assertFalse(materializedFilter.call(payloadStr)); } @Test public void basicEmptyEventFilterTest() throws JsonProcessingException { String clientId = "myClientId"; EventFilter filter = new EventFilter(clientId); Map<String, List<String>> params = new HashMap<>(); List<String> subIdParam = new ArrayList<>(); subIdParam.add("mySubId"); params.put(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME, subIdParam); Func1<String, Boolean> materializedFilter = filter.call(params); List<String> matchedClients = new ArrayList<>(); matchedClients.add(clientId + "_" + "mySubId"); matchedClients.add(clientId + "_" + "BlahSubId"); Map<String, Object> payLoad = new HashMap<>(); String payloadStr = mapper.writeValueAsString(payLoad); assertFalse(materializedFilter.call(payloadStr)); try { assertFalse(materializedFilter.call(null)); } catch (Exception e) { fail(); } } @Test public void missingSubIdParamAlwaysPasses() throws JsonProcessingException { String clientId = "myClientId"; EventFilter filter = new EventFilter(clientId); Map<String, List<String>> params = new HashMap<>(); Func1<String, Boolean> materializedFilter = filter.call(params); List<String> matchedClients = new ArrayList<>(); matchedClients.add(clientId + "_" + "mySubId"); matchedClients.add(clientId + "_" + "BlahSubId"); Map<String, Object> payLoad = new HashMap<>(); payLoad.put("ts", System.currentTimeMillis()); payLoad.put("matchedClients", matchedClients); payLoad.put("type", "EVENT"); String payloadStr = mapper.writeValueAsString(payLoad); assertTrue(materializedFilter.call(payloadStr)); } }
1,176
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/QueryRegistry.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.core; import static io.mantisrx.connector.publish.core.ObjectUtils.checkNotNull; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import io.mantisrx.publish.proto.MantisServerSubscription; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class QueryRegistry { public static final String ANY = "ANY"; private static final Logger LOGGER = LoggerFactory.getLogger(QueryRegistry.class); private final Map<String, String> emptyMap = new HashMap<>(0); private final ConcurrentMap<String, QueryMap> appToSubscriptionMap = new ConcurrentHashMap<>(); private final String clientIdPrefix; private QueryRegistry(String clientIdPrefix) { this.clientIdPrefix = clientIdPrefix; } public void registerQuery(String targetApp, String subId, String query) { registerQuery(targetApp, subId, query, this.emptyMap, false); } public void registerQuery(String targetApp, String subId, String query, Map<String, String> additionalParams, boolean validateQueryAsGroovy) { checkNotNull("subscriptionId", subId); checkNotNull("query", query); checkNotNull("targetAppName", targetApp); Map<String, String> addParams = (additionalParams == null) ? emptyMap : additionalParams; appToSubscriptionMap.putIfAbsent(targetApp, new QueryMap(clientIdPrefix)); appToSubscriptionMap.get(targetApp).registerQuery(subId, query, additionalParams, validateQueryAsGroovy); } public boolean deregisterQuery(String targetApp, String subId, String query) { appToSubscriptionMap.computeIfPresent(targetApp, (k, v) -> { v.deregisterQuery(subId, query); return v; }); return true; } public List<MantisServerSubscription> getCurrentSubscriptionsForApp(String app) { List<MantisServerSubscription> subsForApp = (appToSubscriptionMap.containsKey(app)) ? appToSubscriptionMap.get(app).getCurrentSubscriptions() : new ArrayList<>(); if (!app.equals(ANY) && appToSubscriptionMap.containsKey(ANY)) { subsForApp.addAll(appToSubscriptionMap.get(ANY).getCurrentSubscriptions()); } return subsForApp; } /** * Returns a list of {@link MantisServerSubscription}s. * * @param queryParams key-value pairs of stream-queries. */ public List<MantisServerSubscription> getCurrentSubscriptions(Map<String, List<String>> queryParams) { String app = ANY; if (queryParams.containsKey("app")) { app = queryParams.get("app").get(0); } return getCurrentSubscriptionsForApp(app); } public Map<String, List<MantisServerSubscription>> getAllSubscriptions() { Map<String, List<MantisServerSubscription>> allSubMap = new HashMap<>(); appToSubscriptionMap.forEach((s, q) -> { allSubMap.put(s, q.getCurrentSubscriptions()); }); return allSubMap; } private String addMantisPrefix(String subId) { return clientIdPrefix + "_" + subId; } public static class Builder { private String prefix = null; public Builder() { } public Builder withClientIdPrefix(String prefix) { checkNotNull("prefix", prefix); this.prefix = prefix; return this; } public QueryRegistry build() { checkNotNull("prefix", this.prefix); return new QueryRegistry(prefix); } } }
1,177
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/QueryMap.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.core; import static io.mantisrx.connector.publish.core.ObjectUtils.checkNotNull; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import io.mantisrx.publish.proto.MantisServerSubscription; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class QueryMap { private static final Logger LOGGER = LoggerFactory.getLogger(QueryMap.class); private final Map<String, String> emptyMap = new HashMap<>(0); private final ConcurrentHashMap<String, MantisServerSubscriptionWrapper> subscriptionMap = new ConcurrentHashMap<>(); private final ConcurrentMap<String, ConcurrentMap<String, MantisServerSubscriptionWrapper>> appToSubscriptionMap = new ConcurrentHashMap<>(); private final String clientIdPrefix; QueryMap(String clientIdPrefix) { this.clientIdPrefix = clientIdPrefix; } void registerQuery(String subId, String query, Map<String, String> emptyMap) { registerQuery(subId, query, this.emptyMap, false); } void registerQuery(String subId, String query, Map<String, String> additionalParams, boolean validateQueryAsGroovy) { checkNotNull("subscriptionId", subId); checkNotNull("query", query); Map<String, String> addParams = (additionalParams == null) ? emptyMap : additionalParams; subscriptionMap.computeIfAbsent(subId, (s) -> new MantisServerSubscriptionWrapper(addMantisPrefix(subId), query, addParams)).incrementAndGetRefCount(); } boolean deregisterQuery(String subId, String query) { MantisServerSubscriptionWrapper subscription = subscriptionMap.computeIfPresent(subId, (k, v) -> { v.decrementRefCount(); return v; }); if (subscription != null) { if (subscription.getRefCount() <= 0) { LOGGER.info("Subscription ref count is 0 for subscriptionId " + subId + " removing subscription"); subscriptionMap.remove(subId); } else { LOGGER.info("Subscription ref count decremented for subscriptionId " + subId); } } else { LOGGER.info("Subscription " + subId + " not found"); } return true; } public List<MantisServerSubscription> getCurrentSubscriptions() { return subscriptionMap.values().stream().map(MantisServerSubscriptionWrapper::getSubscription).collect(Collectors.toList()); } private String addMantisPrefix(String subId) { return clientIdPrefix + "_" + subId; } public static class Builder { String prefix = null; Builder() { } Builder withClientIdPrefix(String prefix) { checkNotNull("prefix", prefix); this.prefix = prefix; return this; } QueryMap build() { checkNotNull("prefix", this.prefix); return new QueryMap(prefix); } } public static class MantisServerSubscriptionWrapper { private final MantisServerSubscription subscription; // Used to dedup erroneous subscriptions from client. AtomicInteger refCount = new AtomicInteger(); MantisServerSubscriptionWrapper(String subId, String query, Map<String, String> additionalParams) { this.subscription = new MantisServerSubscription(subId, query, additionalParams); } MantisServerSubscription getSubscription() { return this.subscription; } int incrementAndGetRefCount() { return refCount.incrementAndGet(); } void decrementRefCount() { refCount.decrementAndGet(); } int getRefCount() { return refCount.get(); } @Override public String toString() { return "MantisServerSubscriptionWrapper{" + " subscription=" + subscription + ", refCount=" + refCount + '}'; } } }
1,178
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/ObjectUtils.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.core; import java.util.Arrays; import java.util.List; public class ObjectUtils { public static void checkNotNull(String paramName, String param) { if (param == null || param.isEmpty()) { throw new IllegalArgumentException(paramName + " cannot be null"); } } public static void checkArgCondition(String paramName, boolean condition) { if (!condition) { throw new IllegalArgumentException(paramName + " is invalid"); } } public static List<String> convertCommaSeparatedStringToList(String str) { return Arrays.asList(str.trim().split("\\,")); } }
1,179
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/EventFilter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.core; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import com.mantisrx.common.utils.MantisSourceJobConstants; import org.apache.log4j.Logger; import rx.functions.Func1; public class EventFilter implements Func1<Map<String, List<String>>, Func1<String, Boolean>> { private static final Logger LOGGER = Logger.getLogger(EventFilter.class); private final String clientId; public EventFilter(String clientId) { ObjectUtils.checkNotNull("clientId", clientId); this.clientId = clientId; } @Override public Func1<String, Boolean> call(Map<String, List<String>> parameters) { Func1<String, Boolean> filter = t1 -> true; if (parameters != null) { if (parameters.containsKey(MantisSourceJobConstants.FILTER_PARAM_NAME)) { String filterBy = parameters.get(MantisSourceJobConstants.FILTER_PARAM_NAME).get(0); List<String> terms = convertCommaSeparatedEventsToList(filterBy); LOGGER.info("terms: " + terms); // Create filter function based on parameter value. filter = new SourceEventFilter(terms); } else if (parameters.containsKey(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME)) { String subId = parameters.get(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME).get(0); List<String> terms = new ArrayList<String>(); terms.add(clientId + "_" + subId); filter = new SourceEventFilter(terms); } return filter; } return filter; } private List<String> convertCommaSeparatedEventsToList(String filterBy) { List<String> terms = new ArrayList<>(); if (filterBy != null && !filterBy.isEmpty()) { terms = Arrays.asList(filterBy.split("\\s*,\\s*")); } return terms; } private static class SourceEventFilter implements Func1<String, Boolean> { private List<String> terms; SourceEventFilter(List<String> terms) { this.terms = terms; LOGGER.info("Initiated with terms" + terms); } @Override public Boolean call(String data) { boolean match = true; if (data != null && !data.isEmpty()) { for (String term : terms) { if (!data.contains(term)) { match = false; break; } } } else { match = false; } return match; } } }
1,180
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/NettyExceptionHandler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.source.http; import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultHttpResponse; import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.LastHttpContent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class NettyExceptionHandler extends SimpleChannelInboundHandler<HttpRequest> { private final Map<String, String> responseHeaders = new HashMap<>(); private static Logger logger = LoggerFactory.getLogger(NettyExceptionHandler.class); // MetricGroupId metricGroupId; // Counter invalidRequestCount; public NettyExceptionHandler() { // metricGroupId = new MetricGroupId(METRIC_GROUP + "_incoming"); // // Metrics m = new Metrics.Builder().id(metricGroupId).addCounter("InvalidRequestCount").build(); // // m = MetricsRegistry.getInstance().registerAndGet(m); // // invalidRequestCount = m.getCounter("InvalidRequestCount"); } @Override protected void channelRead0(ChannelHandlerContext ctx, HttpRequest message) { // we can't deal with this message. No one in the pipeline handled it. Log it. logger.warn("Unknown message received: {}", message); // invalidRequestCount.increment(); sendResponse( ctx, false, message + " Bad request received.", HttpResponseStatus.BAD_REQUEST, responseHeaders) ; } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { // invalidRequestCount.increment(); logger.warn("Unhandled exception", cause); sendResponse( ctx, false, "Internal server error: " + cause.getMessage(), HttpResponseStatus.INTERNAL_SERVER_ERROR, responseHeaders); } /** * Sends the given response and status code to the given channel. * * @param channelHandlerContext identifying the open channel * @param keepAlive If the connection should be kept alive. * @param message which should be sent * @param statusCode of the message to send * @param headers additional header values */ public static CompletableFuture<Void> sendResponse( ChannelHandlerContext channelHandlerContext, boolean keepAlive, String message, HttpResponseStatus statusCode, Map<String, String> headers) { HttpResponse response = new DefaultHttpResponse(HTTP_1_1, statusCode); response.headers().set(CONTENT_TYPE, "application/json"); for (Map.Entry<String, String> headerEntry : headers.entrySet()) { response.headers().set(headerEntry.getKey(), headerEntry.getValue()); } if (keepAlive) { response.headers().set(CONNECTION, HttpHeaderValues.KEEP_ALIVE); } byte[] buf = message.getBytes(StandardCharsets.UTF_8); ByteBuf b = Unpooled.copiedBuffer(buf); HttpUtil.setContentLength(response, buf.length); // write the initial line and the header. channelHandlerContext.write(response); channelHandlerContext.write(b); ChannelFuture lastContentFuture = channelHandlerContext.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT); // close the connection, if no keep-alive is needed if (!keepAlive) { lastContentFuture.addListener(ChannelFutureListener.CLOSE); } return toCompletableFuture(lastContentFuture); } private static CompletableFuture<Void> toCompletableFuture(final ChannelFuture channelFuture) { final CompletableFuture<Void> completableFuture = new CompletableFuture<>(); channelFuture.addListener(future -> { if (future.isSuccess()) { completableFuture.complete(null); } else { completableFuture.completeExceptionally(future.cause()); } }); return completableFuture; } }
1,181
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/SourceHttpServer.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.source.http; import io.mantisrx.connector.publish.core.QueryRegistry; import rx.subjects.Subject; public interface SourceHttpServer { public static final String METRIC_GROUP = "PushServer"; enum State { NOTINITED, INITED, RUNNING, SHUTDOWN } void init(QueryRegistry registry, Subject<String, String> eventSubject, int port) throws InterruptedException; void startServer(); void shutdownServer(); }
1,182
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/NettySourceHttpServer.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.source.http; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import io.mantisrx.connector.publish.core.QueryRegistry; import io.mantisrx.runtime.Context; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelOption; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioServerSocketChannel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.subjects.Subject; public class NettySourceHttpServer implements SourceHttpServer { private static final Logger LOGGER = LoggerFactory.getLogger(NettySourceHttpServer.class); private final NioEventLoopGroup workerGroup; private final NioEventLoopGroup bossGroup; private Runnable nettyServerRunnable; private volatile boolean isInitialized = false; private volatile boolean isStarted = false; public NettySourceHttpServer(Context context, int threadCount) { this.bossGroup = new NioEventLoopGroup(threadCount); this.workerGroup = new NioEventLoopGroup(); } @Override public void init(QueryRegistry queryRegistry, Subject<String, String> eventSubject, int port) { if (!isInitialized) { nettyServerRunnable = () -> { try { ServerBootstrap b = new ServerBootstrap(); b.option(ChannelOption.SO_BACKLOG, 1024); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .childHandler(new HttpServerInitializer(queryRegistry, eventSubject)); Channel ch = b.bind(port).sync().channel(); ch.closeFuture().sync(); } catch (Exception e) { LOGGER.error(e.getMessage()); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }; isInitialized = true; } } @Override public void startServer() { if (isInitialized && !isStarted) { ExecutorService executor = Executors.newSingleThreadExecutor(); executor.submit(nettyServerRunnable); Runtime.getRuntime().addShutdownHook(new Thread(this::shutdownServer)); isStarted = true; } else { throw new IllegalStateException("Server already started"); } } @Override public void shutdownServer() { if (isInitialized && isStarted) { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } }
1,183
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/HttpServerInitializer.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.source.http; import io.mantisrx.connector.publish.core.QueryRegistry; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelPipeline; import io.netty.channel.socket.SocketChannel; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpServerCodec; import rx.subjects.Subject; public class HttpServerInitializer extends ChannelInitializer<SocketChannel> { private final QueryRegistry registry; private final Subject<String, String> eventSubject; private static final int DEFAULT_MAX_INITIAL_LENGTH = 4096; private static final int DEFAULT_MAX_HEADER_SIZE = 16384; private static final int DEFAULT_MAX_CHUNK_SIZE = 32768; private static final int DEFAULT_MAX_CONTENT_LENGTH = 1048576; public HttpServerInitializer(QueryRegistry registry, Subject<String, String> eventSubject) { this.registry = registry; this.eventSubject = eventSubject; } @Override protected void initChannel(SocketChannel ch) { ChannelPipeline p = ch.pipeline(); p.addLast("http", new HttpServerCodec(DEFAULT_MAX_INITIAL_LENGTH, DEFAULT_MAX_HEADER_SIZE, DEFAULT_MAX_CHUNK_SIZE)); p.addLast("inflater", new HttpContentDecompressor()); p.addLast("aggregator", new HttpObjectAggregator(DEFAULT_MAX_CONTENT_LENGTH)); p.addLast(new HttpSourceServerHandler(registry, eventSubject)); p.addLast(new NettyExceptionHandler()); } }
1,184
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/SourceSink.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.source.http; import java.util.List; import java.util.Map; import io.mantisrx.connector.publish.core.EventFilter; import io.mantisrx.runtime.Context; import io.mantisrx.runtime.PortRequest; import io.mantisrx.runtime.sink.ServerSentEventsSink; import io.mantisrx.runtime.sink.Sink; import io.mantisrx.runtime.sink.predicate.Predicate; import rx.Observable; import rx.functions.Func2; public class SourceSink implements Sink<String> { private final String clientId; private Func2<Map<String, List<String>>, Context, Void> preProcessor = new NoOpProcessor(); private Func2<Map<String, List<String>>, Context, Void> postProcessor = new NoOpProcessor(); static class NoOpProcessor implements Func2<Map<String, List<String>>, Context, Void> { @Override public Void call(Map<String, List<String>> t1, Context t2) { return null; } } public SourceSink(Func2<Map<String, List<String>>, Context, Void> preProcessor, Func2<Map<String, List<String>>, Context, Void> postProcessor, String mantisClientId) { this.postProcessor = postProcessor; this.preProcessor = preProcessor; this.clientId = mantisClientId; } @Override public void call(Context context, PortRequest portRequest, Observable<String> observable) { observable = observable.filter(t1 -> !t1.isEmpty()); ServerSentEventsSink<String> sink = new ServerSentEventsSink.Builder<String>() .withEncoder(data -> data) .withPredicate(new Predicate<>("description", new EventFilter(clientId))) .withRequestPreprocessor(preProcessor) .withRequestPostprocessor(postProcessor) .build(); observable.subscribe(); sink.call(context, portRequest, observable); } }
1,185
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/PushHttpSource.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.source.http; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.concurrent.atomic.AtomicReference; import com.mantisrx.common.utils.MantisSourceJobConstants; import io.mantisrx.connector.publish.core.QueryRegistry; import io.mantisrx.runtime.Context; import io.mantisrx.runtime.WorkerMap; import io.mantisrx.runtime.parameter.ParameterDefinition; import io.mantisrx.runtime.parameter.type.IntParameter; import io.mantisrx.runtime.parameter.type.StringParameter; import io.mantisrx.runtime.parameter.validator.Validators; import io.mantisrx.runtime.source.Index; import io.mantisrx.runtime.source.Source; import io.reactivx.mantis.operators.DropOperator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.schedulers.Schedulers; import rx.subjects.PublishSubject; import rx.subjects.SerializedSubject; import rx.subjects.Subject; public class PushHttpSource implements Source<String> { private static final Logger LOGGER = LoggerFactory.getLogger(PushHttpSource.class); private final Subject<String, String> eventSubject = new SerializedSubject<>(PublishSubject.create()); private final QueryRegistry queryRegistry; private final int serverPort; private AtomicReference<WorkerMap> workerMapAtomicReference = new AtomicReference<>(new WorkerMap(new HashMap<>())); private static final String NETTY_THREAD_COUNT_PARAM_NAME = "nettyThreadCount"; public PushHttpSource(QueryRegistry registry, int serverPort) { this.queryRegistry = registry; this.serverPort = serverPort; } @Override public Observable<Observable<String>> call(Context context, Index index) { return Observable.just(eventSubject .lift(new DropOperator<>("incoming_" + PushHttpSource.class.getCanonicalName() + "_batch")) .onErrorResumeNext((e) -> Observable.empty())); } @Override public void init(Context context, Index index) { LOGGER.info("Initializing PushHttpSource"); int threadCount = (Integer) context.getParameters().get(NETTY_THREAD_COUNT_PARAM_NAME, 4); LOGGER.info("PushHttpSource server starting at Port " + serverPort); SourceHttpServer server = new NettySourceHttpServer(context, threadCount); try { server.init(queryRegistry, eventSubject, serverPort); } catch (InterruptedException e) { throw new RuntimeException(e); } server.startServer(); context.getWorkerMapObservable().subscribeOn(Schedulers.io()).subscribe((workerMap) -> { LOGGER.info("Got WorkerUpdate" + workerMap); workerMapAtomicReference.set(workerMap); }); LOGGER.info("PushHttpSource server started"); } @Override public List<ParameterDefinition<?>> getParameters() { List<ParameterDefinition<?>> parameters = new ArrayList<>(); parameters.add(new IntParameter() .name(NETTY_THREAD_COUNT_PARAM_NAME) .validator(Validators.range(1, 8)) .defaultValue(4) .build()); parameters.add(new StringParameter() .name(MantisSourceJobConstants.ZONE_LIST_PARAMETER_NAME) .description("list of Zones") .validator(Validators.alwaysPass()) .defaultValue("") .build()); parameters.add(new StringParameter() .name(MantisSourceJobConstants.TARGET_APP_PARAMETER_NAME) .description("target app") .validator(Validators.alwaysPass()) .defaultValue("") .build()); parameters.add(new StringParameter() .name(MantisSourceJobConstants.TARGET_ASG_CSV_PARAM) .description("target ASGs CSV regex") .validator(Validators.alwaysPass()) .defaultValue("") .build()); return parameters; } }
1,186
0
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source
Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/HttpSourceServerHandler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.publish.source.http; import static io.netty.handler.codec.http.HttpResponseStatus.OK; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; import java.util.List; import com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.common.metrics.spectator.MetricGroupId; import io.mantisrx.connector.publish.core.QueryRegistry; import io.mantisrx.publish.proto.MantisServerSubscription; import io.mantisrx.publish.proto.MantisServerSubscriptionEnvelope; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpMessage; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpObject; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpUtil; import io.netty.util.AsciiString; import io.netty.util.CharsetUtil; import mantis.io.reactivex.netty.protocol.http.server.UriInfoHolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.subjects.Subject; public class HttpSourceServerHandler extends SimpleChannelInboundHandler<HttpObject> { private static final Logger LOGGER = LoggerFactory.getLogger(HttpSourceServerHandler.class); private static final byte[] CONTENT = {'O', 'K'}; private static final AsciiString CONTENT_TYPE = AsciiString.cached("Content-Type"); private static final AsciiString CONTENT_LENGTH = AsciiString.cached("Content-Length"); private static final AsciiString CONNECTION = AsciiString.cached("Connection"); private static final AsciiString KEEP_ALIVE = AsciiString.cached("keep-alive"); ObjectMapper mapper = new ObjectMapper(); private final Counter getRequestCount; private final Counter unknownRequestCount; private final Counter postRequestCount; MetricGroupId metricGroupId; private final QueryRegistry registry; private final Subject<String, String> eventSubject; public HttpSourceServerHandler(QueryRegistry queryRegistry, Subject<String, String> eventSubject) { registry = queryRegistry; this.eventSubject = eventSubject; metricGroupId = new MetricGroupId(SourceHttpServer.METRIC_GROUP + "_incoming"); Metrics m = new Metrics.Builder() .id(metricGroupId) .addCounter("GetRequestCount") .addCounter("PostRequestCount") .addCounter("UnknownRequestCount") .build(); m = MetricsRegistry.getInstance().registerAndGet(m); getRequestCount = m.getCounter("GetRequestCount"); unknownRequestCount = m.getCounter("UnknownRequestCount"); postRequestCount = m.getCounter("PostRequestCount"); } @Override public void channelReadComplete(ChannelHandlerContext ctx) { ctx.flush(); } @Override protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { if (msg instanceof HttpRequest) { HttpRequest req = (HttpRequest) msg; boolean keepAlive = HttpUtil.isKeepAlive(req); if (req.method().equals(HttpMethod.GET)) { getRequestCount.increment(); UriInfoHolder uriInfoHolder = new UriInfoHolder(req.uri()); List<MantisServerSubscription> currentSubscriptions = registry.getCurrentSubscriptions(uriInfoHolder.getQueryParameters()); try { byte[] serializedSubs = mapper.writeValueAsBytes(new MantisServerSubscriptionEnvelope(currentSubscriptions)); FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(serializedSubs)); response.headers().set(CONTENT_TYPE, "application/json"); response.headers().setInt(CONTENT_LENGTH, response.content().readableBytes()); if (!keepAlive) { ctx.write(response).addListener(ChannelFutureListener.CLOSE); } else { response.headers().set(CONNECTION, KEEP_ALIVE); ctx.write(response); } } catch (Exception e) { LOGGER.error("problem reading from channel", e); } } else { if (req.method().equals(HttpMethod.POST)) { postRequestCount.increment(); FullHttpMessage aggregator = (FullHttpMessage) msg; ByteBuf content = aggregator.content(); String data = content.toString(CharsetUtil.UTF_8); if (LOGGER.isDebugEnabled()) { LOGGER.debug("got data " + data); } eventSubject.onNext(data); FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(CONTENT)); response.headers().set(CONTENT_TYPE, "text/plain"); response.headers().setInt(CONTENT_LENGTH, response.content().readableBytes()); if (!keepAlive) { ctx.write(response).addListener(ChannelFutureListener.CLOSE); } else { response.headers().set(CONNECTION, KEEP_ALIVE); ctx.write(response); } } else { unknownRequestCount.increment(); } } } } }
1,187
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg
Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/StageOverrideParameters.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import io.mantisrx.connector.iceberg.sink.config.SinkProperties; import io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties; import io.mantisrx.runtime.parameter.Parameters; public class StageOverrideParameters { private StageOverrideParameters() { } public static Parameters newParameters() { Map<String, Object> state = new HashMap<>(); Set<String> required = new HashSet<>(); required.add(SinkProperties.SINK_CATALOG); state.put(SinkProperties.SINK_CATALOG, "catalog"); required.add(SinkProperties.SINK_DATABASE); state.put(SinkProperties.SINK_DATABASE, "database"); required.add(SinkProperties.SINK_TABLE); state.put(SinkProperties.SINK_TABLE, "table"); required.add(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC); state.put(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC, "5000"); return new Parameters(state, required, required); } }
1,188
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/codecs/IcebergCodecsTest.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.codecs; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import java.util.Collections; import io.mantisrx.common.codec.Codec; import org.apache.iceberg.DataFile; import org.apache.iceberg.DataFiles; import org.apache.iceberg.Metrics; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.data.GenericRecord; import org.apache.iceberg.data.Record; import org.apache.iceberg.types.Types; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; class IcebergCodecsTest { private static final Schema SCHEMA = new Schema(Types.NestedField.required(1, "id", Types.IntegerType.get())); private Codec<Record> recordCodec; private Codec<DataFile> dataFileCodec; @BeforeEach void setUp() { this.recordCodec = IcebergCodecs.record(SCHEMA); this.dataFileCodec = IcebergCodecs.dataFile(); } @Test void shouldEncodeAndDecodeRecord() { Record expected = GenericRecord.create(SCHEMA); expected.setField("id", 1); byte[] encoded = recordCodec.encode(expected); Record actual = recordCodec.decode(encoded); assertEquals(expected, actual); } @Test void shouldEncodeAndDecodeDataFile() { PartitionSpec spec = PartitionSpec.unpartitioned(); DataFile expected = DataFiles.builder(spec) .withPath("/path/filename.parquet") .withFileSizeInBytes(1) .withPartition(null) .withMetrics(mock(Metrics.class)) .withSplitOffsets(Collections.singletonList(1L)) .build(); byte[] encoded = dataFileCodec.encode(expected); DataFile actual = dataFileCodec.decode(encoded); assertEquals(expected.path(), actual.path()); assertEquals(expected.fileSizeInBytes(), actual.fileSizeInBytes()); assertEquals(expected.partition(), actual.partition()); assertEquals(expected.splitOffsets(), actual.splitOffsets()); } }
1,189
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriterStageTest.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.writer; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.util.concurrent.TimeUnit; import io.mantisrx.connector.iceberg.sink.StageOverrideParameters; import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig; import io.mantisrx.connector.iceberg.sink.writer.metrics.WriterMetrics; import io.mantisrx.connector.iceberg.sink.writer.partitioner.Partitioner; import io.mantisrx.connector.iceberg.sink.writer.partitioner.PartitionerFactory; import io.mantisrx.runtime.Context; import io.mantisrx.runtime.lifecycle.ServiceLocator; import io.mantisrx.runtime.parameter.Parameters; import org.apache.hadoop.conf.Configuration; import org.apache.iceberg.DataFile; import org.apache.iceberg.DataFiles; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.StructLike; import org.apache.iceberg.Table; import org.apache.iceberg.catalog.Catalog; import org.apache.iceberg.data.GenericRecord; import org.apache.iceberg.data.Record; import org.apache.iceberg.types.Types; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import rx.Observable; import rx.observers.TestSubscriber; import rx.schedulers.TestScheduler; class IcebergWriterStageTest { private TestScheduler scheduler; private TestSubscriber<DataFile> subscriber; private IcebergWriterStage.Transformer transformer; private Catalog catalog; private Table table; private Context context; private IcebergWriter writer; private Partitioner partitioner; private Observable<DataFile> flow; private static final Schema SCHEMA = new Schema(Types.NestedField.required(1, "id", Types.IntegerType.get())); private Record record; @BeforeEach void setUp() { record = GenericRecord.create(SCHEMA); record.setField("id", 1); this.scheduler = new TestScheduler(); this.subscriber = new TestSubscriber<>(); // Writer Parameters parameters = StageOverrideParameters.newParameters(); WriterConfig config = new WriterConfig(parameters, mock(Configuration.class)); WriterMetrics metrics = new WriterMetrics(); this.writer = spy(FakeIcebergWriter.class); this.partitioner = mock(Partitioner.class); when(this.writer.length()).thenReturn(Long.MAX_VALUE); this.transformer = new IcebergWriterStage.Transformer( config, metrics, this.writer, this.partitioner, this.scheduler, this.scheduler); // Catalog ServiceLocator serviceLocator = mock(ServiceLocator.class); when(serviceLocator.service(Configuration.class)).thenReturn(mock(Configuration.class)); this.catalog = mock(Catalog.class); this.table = mock(Table.class); when(this.table.spec()).thenReturn(PartitionSpec.unpartitioned()); when(this.catalog.loadTable(any())).thenReturn(this.table); when(serviceLocator.service(Catalog.class)).thenReturn(this.catalog); when(serviceLocator.service(PartitionerFactory.class)).thenReturn(mock(PartitionerFactory.class)); // Mantis Context this.context = mock(Context.class); when(this.context.getParameters()).thenReturn(parameters); when(this.context.getServiceLocator()).thenReturn(serviceLocator); // Flow Observable<Record> source = Observable.interval(1, TimeUnit.MILLISECONDS, this.scheduler) .map(i -> record); this.flow = source.compose(this.transformer); } @Test void shouldCloseOnNewPartition() throws IOException { PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("id").build(); when(table.spec()).thenReturn(spec); when(catalog.loadTable(any())).thenReturn(table); Record recordWithNewPartition = GenericRecord.create(SCHEMA); recordWithNewPartition.setField("id", 2); // Identity partitioning. when(partitioner.partition(record)).thenReturn(record); when(partitioner.partition(recordWithNewPartition)).thenReturn(recordWithNewPartition); Observable<Record> source = Observable.just(record, record, recordWithNewPartition, record) .concatMap(r -> Observable.just(r).delay(1, TimeUnit.MILLISECONDS, scheduler)); flow = source.compose(transformer); flow.subscribeOn(scheduler).subscribe(subscriber); // Same partition; no other thresholds (size, time) met. scheduler.advanceTimeBy(2, TimeUnit.MILLISECONDS); subscriber.assertNoValues(); // New partition detected scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS); subscriber.assertValueCount(1); // New partition detected scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS); subscriber.assertValueCount(2); verify(writer, times(4)).write(any()); // Two closes for [record, record] and [recordWithNewPartition]; a file is still open from the latest write. verify(writer, times(2)).close(); verify(writer, times(3)).open(any()); } @Test void shouldCloseOnSizeThreshold() throws IOException { flow.subscribeOn(scheduler).subscribe(subscriber); // Greater than size threshold, but not yet checked at row-group-size config. scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS); subscriber.assertNoValues(); scheduler.advanceTimeBy(99, TimeUnit.MILLISECONDS); subscriber.assertValueCount(1); scheduler.advanceTimeBy(100, TimeUnit.MILLISECONDS); subscriber.assertValueCount(2); subscriber.assertNoTerminalEvent(); verify(writer, times(200)).write(any()); verify(writer, times(2)).close(); } @Test void shouldNotCloseWhenUnderSizeThreshold() throws IOException { when(writer.length()).thenReturn(1L); flow.subscribeOn(scheduler).subscribe(subscriber); // Size is checked at row-group-size config, but under size-threshold, so no-op. scheduler.advanceTimeBy(100, TimeUnit.MILLISECONDS); subscriber.assertNoValues(); subscriber.assertNoTerminalEvent(); verify(writer, times(100)).write(any()); verify(writer, times(0)).close(); } @Test void shouldCloseWhenLowVolumeOnTimeThreshold() throws IOException { when(writer.length()).thenReturn(1L); flow.subscribeOn(scheduler).subscribe(subscriber); scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS); subscriber.assertNoValues(); // Size is checked at row-group-size config, but under size threshold, so no-op. scheduler.advanceTimeBy(999, TimeUnit.MILLISECONDS); subscriber.assertNoValues(); // Hits time threshold; proceed to close. scheduler.advanceTimeBy(4000, TimeUnit.MILLISECONDS); subscriber.assertValueCount(1); subscriber.assertNoTerminalEvent(); verify(writer, times(5000)).write(any()); verify(writer, times(1)).close(); } @Test void shouldCloseWhenHighVolumeOnTimeThreshold() throws IOException { Observable<Record> source = Observable.interval(500, TimeUnit.MILLISECONDS, scheduler) .map(i -> record); flow = source.compose(transformer); flow.subscribeOn(scheduler).subscribe(subscriber); // Over the size threshold, but not yet checked at row-group-size config. scheduler.advanceTimeBy(500, TimeUnit.MILLISECONDS); subscriber.assertNoValues(); // Hits time threshold and there's data to write; proceed to close. scheduler.advanceTimeBy(4500, TimeUnit.MILLISECONDS); subscriber.assertValueCount(1); subscriber.assertNoTerminalEvent(); verify(writer, times(10)).write(any()); verify(writer, times(1)).close(); } @Test void shouldNoOpWhenNoDataOnTimeThreshold() throws IOException { // Low volume stream. Observable<Record> source = Observable.interval(10_000, TimeUnit.MILLISECONDS, scheduler) .map(i -> record); flow = source.compose(transformer); flow.subscribeOn(scheduler).subscribe(subscriber); scheduler.advanceTimeBy(5000, TimeUnit.MILLISECONDS); subscriber.assertNoValues(); subscriber.assertNoErrors(); subscriber.assertNoTerminalEvent(); verify(writer, times(0)).open(any()); verify(writer, times(0)).write(any()); verify(writer, times(1)).isClosed(); verify(writer, times(0)).close(); } @Test void shouldNoOpCloseWhenFailedToOpen() throws IOException { doThrow(new IOException()).when(writer).open(any()); flow.subscribeOn(scheduler).subscribe(subscriber); scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS); subscriber.assertError(RuntimeException.class); subscriber.assertTerminalEvent(); verify(writer).open(any()); verify(writer, times(1)).isClosed(); verify(writer, times(1)).close(); } @Test void shouldContinueOnWriteFailure() { doThrow(new RuntimeException()).when(writer).write(any()); flow.subscribeOn(scheduler).subscribe(subscriber); scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS); subscriber.assertNoTerminalEvent(); scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS); subscriber.assertNoTerminalEvent(); verify(writer, times(2)).write(any()); } @Test @Disabled("Will never terminate: Source terminates, but timer will continue to tick") void shouldCloseOnTerminate() throws IOException { Observable<Record> source = Observable.just(record); Observable<DataFile> flow = source.compose(transformer); flow.subscribeOn(scheduler).subscribe(subscriber); scheduler.triggerActions(); subscriber.assertNoErrors(); verify(writer).open(); verify(writer).write(any()); verify(writer, times(2)).isClosed(); verify(writer, times(1)).close(); } @Test void shouldInitializeWithExistingTable() { IcebergWriterStage stage = new IcebergWriterStage(); assertDoesNotThrow(() -> stage.init(context)); } @Test void shouldFailToInitializeWithMissingTable() { when(catalog.loadTable(any())).thenThrow(new RuntimeException()); IcebergWriterStage stage = new IcebergWriterStage(); assertThrows(RuntimeException.class, () -> stage.init(context)); } private static class FakeIcebergWriter implements IcebergWriter { private static final DataFile DATA_FILE = new DataFiles.Builder() .withPath("/datafile.parquet") .withFileSizeInBytes(1L) .withRecordCount(1L) .build(); private final Object object; private Object fileAppender; private StructLike partitionKey; public FakeIcebergWriter() { this.object = new Object(); this.fileAppender = null; } @Override public void open() throws IOException { open(null); } @Override public void open(StructLike newPartitionKey) throws IOException { fileAppender = object; partitionKey = newPartitionKey; } @Override public void write(Record record) { } @Override public DataFile close() throws IOException { fileAppender = null; return DATA_FILE; } @Override public boolean isClosed() { return fileAppender == null; } @Override public long length() { return 0; } @Override public StructLike getPartitionKey() { return partitionKey; } } }
1,190
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitterStageTest.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.committer; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import io.mantisrx.connector.iceberg.sink.StageOverrideParameters; import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig; import io.mantisrx.connector.iceberg.sink.committer.metrics.CommitterMetrics; import io.mantisrx.runtime.Context; import io.mantisrx.runtime.lifecycle.ServiceLocator; import io.mantisrx.runtime.parameter.Parameters; import org.apache.hadoop.conf.Configuration; import org.apache.iceberg.DataFile; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Table; import org.apache.iceberg.catalog.Catalog; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import rx.Observable; import rx.observers.TestSubscriber; import rx.schedulers.TestScheduler; class IcebergCommitterStageTest { private TestScheduler scheduler; private TestSubscriber<Map<String, Object>> subscriber; private Catalog catalog; private Context context; private IcebergCommitter committer; private IcebergCommitterStage.Transformer transformer; @BeforeEach void setUp() { this.scheduler = new TestScheduler(); this.subscriber = new TestSubscriber<>(); Parameters parameters = StageOverrideParameters.newParameters(); CommitterConfig config = new CommitterConfig(parameters); CommitterMetrics metrics = new CommitterMetrics(); this.committer = mock(IcebergCommitter.class); transformer = new IcebergCommitterStage.Transformer(config, metrics, committer, scheduler); ServiceLocator serviceLocator = mock(ServiceLocator.class); when(serviceLocator.service(Configuration.class)).thenReturn(mock(Configuration.class)); this.catalog = mock(Catalog.class); Table table = mock(Table.class); when(table.spec()).thenReturn(PartitionSpec.unpartitioned()); when(this.catalog.loadTable(any())).thenReturn(table); when(serviceLocator.service(Catalog.class)).thenReturn(this.catalog); this.context = mock(Context.class); when(this.context.getParameters()).thenReturn(parameters); when(this.context.getServiceLocator()).thenReturn(serviceLocator); } @Test void shouldCommitPeriodically() { Map<String, Object> summary = new HashMap<>(); summary.put("test", "test"); when(committer.commit(any())).thenReturn(summary); Observable<DataFile> source = Observable.interval(1, TimeUnit.MINUTES, scheduler) .map(i -> mock(DataFile.class)); Observable<Map<String, Object>> flow = source.compose(transformer); flow.subscribeOn(scheduler).subscribe(subscriber); scheduler.advanceTimeBy(1, TimeUnit.MINUTES); subscriber.assertNoValues(); subscriber.assertNotCompleted(); scheduler.advanceTimeBy(4, TimeUnit.MINUTES); subscriber.assertValueCount(1); scheduler.advanceTimeBy(5, TimeUnit.MINUTES); subscriber.assertValueCount(2); scheduler.advanceTimeBy(1, TimeUnit.MINUTES); subscriber.assertValueCount(2); subscriber.assertNoErrors(); verify(committer, times(2)).commit(any()); } @Test void shouldContinueOnCommitFailure() { doThrow(new RuntimeException()).when(committer).commit(any()); Observable<DataFile> source = Observable.interval(1, TimeUnit.MINUTES, scheduler) .map(i -> mock(DataFile.class)); Observable<Map<String, Object>> flow = source.compose(transformer); flow.subscribeOn(scheduler).subscribe(subscriber); scheduler.advanceTimeBy(5, TimeUnit.MINUTES); subscriber.assertNoErrors(); subscriber.assertNotCompleted(); subscriber.assertValueCount(0); verify(committer).commit(any()); } @Test void shouldInitializeWithExistingTable() { IcebergCommitterStage stage = new IcebergCommitterStage(); assertDoesNotThrow(() -> stage.init(context)); } @Test void shouldFailToInitializeWithMissingTable() { when(catalog.loadTable(any())).thenThrow(new RuntimeException()); IcebergCommitterStage stage = new IcebergCommitterStage(); assertThrows(RuntimeException.class, () -> stage.init(context)); } }
1,191
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/codecs/IcebergCodecs.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.codecs; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import io.mantisrx.common.codec.Codec; import org.apache.iceberg.DataFile; import org.apache.iceberg.Schema; import org.apache.iceberg.data.Record; import org.apache.iceberg.data.avro.IcebergDecoder; import org.apache.iceberg.data.avro.IcebergEncoder; import org.apache.iceberg.exceptions.RuntimeIOException; /** * Encoders and decoders for working with Iceberg objects * such as {@link Record}s and {@link DataFile}s. */ public class IcebergCodecs { /** * @return a codec for encoding/decoding Iceberg Records. */ public static Codec<Record> record(Schema schema) { return new RecordCodec<>(schema); } /** * @return a codec for encoding/decoding DataFiles. */ public static Codec<DataFile> dataFile() { return new DataFileCodec(); } private static class RecordCodec<T> implements Codec<T> { private final IcebergEncoder<T> encoder; private final IcebergDecoder<T> decoder; private RecordCodec(Schema schema) { this.encoder = new IcebergEncoder<>(schema); this.decoder = new IcebergDecoder<>(schema); } @Override public T decode(byte[] bytes) { try { return decoder.decode(bytes); } catch (IOException e) { throw new RuntimeIOException("problem decoding Iceberg record", e); } } @Override public byte[] encode(T value) { try { return encoder.encode(value).array(); } catch (IOException e) { throw new RuntimeIOException("problem encoding encoding Iceberg record", e); } } } private static class DataFileCodec implements Codec<DataFile> { @Override public DataFile decode(byte[] bytes) { try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) { return (DataFile) in.readObject(); } catch (IOException | ClassNotFoundException e) { throw new RuntimeException("Failed to convert bytes to DataFile", e); } } @Override public byte[] encode(DataFile value) { ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (ObjectOutputStream out = new ObjectOutputStream(bytes)) { out.writeObject(value); } catch (IOException e) { throw new RuntimeException("Failed to write bytes for DataFile: " + value, e); } return bytes.toByteArray(); } } }
1,192
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/config/SinkConfig.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.config; import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_CATALOG; import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_DATABASE; import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_TABLE; import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig; import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig; import io.mantisrx.runtime.parameter.Parameters; /** * Convenient base config used by {@link WriterConfig} and {@link CommitterConfig}. */ public class SinkConfig { private final String catalog; private final String database; private final String table; /** * Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}. */ public SinkConfig(Parameters parameters) { this.catalog = (String) parameters.get(SINK_CATALOG); this.database = (String) parameters.get(SINK_DATABASE); this.table = (String) parameters.get(SINK_TABLE); } /** * Returns a String for Iceberg Catalog name. */ public String getCatalog() { return catalog; } /** * Returns a String for the database name in a catalog. */ public String getDatabase() { return database; } /** * Returns a String for the table within a database. */ public String getTable() { return table; } }
1,193
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/config/SinkProperties.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.config; /** * Property key names and default values for the base Iceberg Sink config. */ public class SinkProperties { private SinkProperties() { } /** * Name of Iceberg Catalog. */ public static final String SINK_CATALOG = "sinkCatalog"; public static final String SINK_CATALOG_DESCRIPTION = "Name of Iceberg Catalog"; /** * Name of database within Iceberg Catalog. */ public static final String SINK_DATABASE = "sinkDatabase"; public static final String SINK_DATABASE_DESCRIPTION = "Name of database within Iceberg Catalog"; /** * Name of table within database. */ public static final String SINK_TABLE = "sinkTable"; public static final String SINK_TABLE_DESCRIPTION = "Name of table within database"; }
1,194
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriterStage.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.writer; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import io.mantisrx.connector.iceberg.sink.codecs.IcebergCodecs; import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig; import io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties; import io.mantisrx.connector.iceberg.sink.writer.metrics.WriterMetrics; import io.mantisrx.connector.iceberg.sink.writer.partitioner.Partitioner; import io.mantisrx.connector.iceberg.sink.writer.partitioner.PartitionerFactory; import io.mantisrx.runtime.Context; import io.mantisrx.runtime.ScalarToScalar; import io.mantisrx.runtime.WorkerInfo; import io.mantisrx.runtime.computation.ScalarComputation; import io.mantisrx.runtime.parameter.ParameterDefinition; import io.mantisrx.runtime.parameter.type.IntParameter; import io.mantisrx.runtime.parameter.type.StringParameter; import io.mantisrx.runtime.parameter.validator.Validators; import org.apache.hadoop.conf.Configuration; import org.apache.iceberg.DataFile; import org.apache.iceberg.DataFiles; import org.apache.iceberg.Schema; import org.apache.iceberg.StructLike; import org.apache.iceberg.Table; import org.apache.iceberg.catalog.Catalog; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.data.GenericRecord; import org.apache.iceberg.data.Record; import org.apache.iceberg.types.Comparators; import org.apache.iceberg.types.Types; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Scheduler; import rx.exceptions.Exceptions; import rx.schedulers.Schedulers; /** * Processing stage which writes records to Iceberg through a backing file store. */ public class IcebergWriterStage implements ScalarComputation<Record, DataFile> { private static final Logger logger = LoggerFactory.getLogger(IcebergWriterStage.class); private Transformer transformer; /** * Returns a config for this stage which has encoding/decoding semantics and parameter definitions. */ public static ScalarToScalar.Config<Record, DataFile> config() { return new ScalarToScalar.Config<Record, DataFile>() .description("") .codec(IcebergCodecs.dataFile()) .withParameters(parameters()); } /** * Returns a list of parameter definitions for this stage. */ public static List<ParameterDefinition<?>> parameters() { return Arrays.asList( new IntParameter().name(WriterProperties.WRITER_ROW_GROUP_SIZE) .description(WriterProperties.WRITER_ROW_GROUP_SIZE_DESCRIPTION) .validator(Validators.alwaysPass()) .defaultValue(WriterProperties.WRITER_ROW_GROUP_SIZE_DEFAULT) .build(), new StringParameter().name(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES) .description(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES_DESCRIPTION) .validator(Validators.alwaysPass()) .defaultValue(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT) .build(), new StringParameter().name(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC) .description(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC_DESCRIPTION) .validator(Validators.alwaysPass()) .defaultValue(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT) .build(), new StringParameter().name(WriterProperties.WRITER_FILE_FORMAT) .description(WriterProperties.WRITER_FILE_FORMAT_DESCRIPTION) .validator(Validators.alwaysPass()) .defaultValue(WriterProperties.WRITER_FILE_FORMAT_DEFAULT) .build() ); } /** * Use this to instantiate a new transformer from a given {@link Context}. */ public static Transformer newTransformer(Context context) { Configuration hadoopConfig = context.getServiceLocator().service(Configuration.class); WriterConfig config = new WriterConfig(context.getParameters(), hadoopConfig); Catalog catalog = context.getServiceLocator().service(Catalog.class); TableIdentifier id = TableIdentifier.of(config.getCatalog(), config.getDatabase(), config.getTable()); Table table = catalog.loadTable(id); WorkerInfo workerInfo = context.getWorkerInfo(); IcebergWriter writer = new DefaultIcebergWriter(config, workerInfo, table); WriterMetrics metrics = new WriterMetrics(); PartitionerFactory partitionerFactory = context.getServiceLocator().service(PartitionerFactory.class); Partitioner partitioner = partitionerFactory.getPartitioner(table); return new Transformer(config, metrics, writer, partitioner, Schedulers.computation(), Schedulers.io()); } public IcebergWriterStage() { } /** * Uses the provided Mantis Context to inject configuration and opens an underlying file appender. * <p> * This method depends on a Hadoop Configuration and Iceberg Catalog, both injected * from the Context's service locator. * <p> * Note that this method expects an Iceberg Table to have been previously created out-of-band, * otherwise initialization will fail. Users should prefer to create tables * out-of-band so they can be versioned alongside their schemas. */ @Override public void init(Context context) { transformer = newTransformer(context); } @Override public Observable<DataFile> call(Context context, Observable<Record> recordObservable) { return recordObservable.compose(transformer); } /** * Reactive Transformer for writing records to Iceberg. * <p> * Users may use this class independently of this Stage, for example, if they want to * {@link Observable#compose(Observable.Transformer)} this transformer with a flow into * an existing Stage. One benefit of this co-location is to avoid extra network * cost from worker-to-worker communication, trading off debuggability. */ public static class Transformer implements Observable.Transformer<Record, DataFile> { private static final DataFile ERROR_DATA_FILE = new DataFiles.Builder() .withPath("/error.parquet") .withFileSizeInBytes(0L) .withRecordCount(0L) .build(); private static final Schema TIMER_SCHEMA = new Schema( Types.NestedField.required(1, "ts_utc_msec", Types.LongType.get())); private static final Record TIMER_RECORD = GenericRecord.create(TIMER_SCHEMA); private final WriterConfig config; private final WriterMetrics metrics; private final Partitioner partitioner; private final IcebergWriter writer; private final Scheduler timerScheduler; private final Scheduler transformerScheduler; public Transformer( WriterConfig config, WriterMetrics metrics, IcebergWriter writer, Partitioner partitioner, Scheduler timerScheduler, Scheduler transformerScheduler) { this.config = config; this.metrics = metrics; this.writer = writer; this.partitioner = partitioner; this.timerScheduler = timerScheduler; this.transformerScheduler = transformerScheduler; } /** * Opens an IcebergWriter FileAppender, writes records to a file. The appender flushes if any of * following criteria is met, in order of precedence: * <p> * 1. new partition * 2. size threshold * 3. time threshold * <p> * New Partition: * <p> * If the Iceberg Table is partitioned, the appender will check _every_ record to detect a new partition. * If there's a new partition, the appender will align the record to the new partition. It does this by * closing the current file, opening a new file, and writing the record to that new file. * <p> * It's _important_ that upstream producers align events to partitions as best as possible. For example, * - Given an Iceberg Table partitioned by {@code hour} * - 10 producers writing Iceberg Records * <p> * Each of the 10 producers _should_ try to produce events aligned by the hour. * If writes are not well-aligned, then results will be correct, but performance negatively impacted due to * frequent opening/closing of files. * <p> * Writes may be _unordered_ as long as they're aligned by the table's partitioning. * <p> * Size Threshold: * <p> * The appender will periodically check the current file size as configured by * {@link WriterConfig#getWriterRowGroupSize()}. If it's time to check, then the appender will flush on * {@link WriterConfig#getWriterFlushFrequencyBytes()}. * <p> * Time Threshold: * <p> * The appender will periodically attempt to flush as configured by * {@link WriterConfig#getWriterFlushFrequencyMsec()}. If this threshold is met, the appender will flush * only if the appender has an open file. This avoids flushing unnecessarily if there are no events. * Otherwise, a flush will happen, even if there are few events in the file. This effectively limits the * upper-bound for allowed lateness. * <p> * Pair this writer with a progressive multipart file uploader backend for better latencies. */ @Override public Observable<DataFile> call(Observable<Record> source) { Observable<Record> timer = Observable.interval( config.getWriterFlushFrequencyMsec(), TimeUnit.MILLISECONDS, timerScheduler) .map(i -> TIMER_RECORD); return source.mergeWith(timer) .observeOn(transformerScheduler) .scan(new Trigger(config.getWriterRowGroupSize()), (trigger, record) -> { if (record.struct().fields().equals(TIMER_SCHEMA.columns())) { trigger.timeout(); } else { StructLike partition = partitioner.partition(record); // Only open (if closed) on new events from `source`; exclude timer records. if (writer.isClosed()) { try { logger.info("opening file for partition {}", partition); writer.open(partition); trigger.setPartition(partition); metrics.increment(WriterMetrics.OPEN_SUCCESS_COUNT); } catch (IOException e) { metrics.increment(WriterMetrics.OPEN_FAILURE_COUNT); throw Exceptions.propagate(e); } } // Make sure records are aligned with the partition. if (trigger.isNewPartition(partition)) { trigger.setPartition(partition); try { DataFile dataFile = writer.close(); trigger.stage(dataFile); trigger.reset(); } catch (IOException | RuntimeException e) { metrics.increment(WriterMetrics.BATCH_FAILURE_COUNT); logger.error("error writing DataFile", e); } try { logger.info("opening file for new partition {}", partition); writer.open(partition); metrics.increment(WriterMetrics.OPEN_SUCCESS_COUNT); } catch (IOException e) { metrics.increment(WriterMetrics.OPEN_FAILURE_COUNT); throw Exceptions.propagate(e); } } try { writer.write(record); trigger.increment(); metrics.increment(WriterMetrics.WRITE_SUCCESS_COUNT); } catch (RuntimeException e) { metrics.increment(WriterMetrics.WRITE_FAILURE_COUNT); logger.debug("error writing record {}", record); } } return trigger; }) .filter(this::shouldFlush) // Writer can be closed if there are no events, yet timer is still ticking. .filter(trigger -> !writer.isClosed()) .map(trigger -> { // Triggered by new partition. if (trigger.hasStagedDataFile()) { DataFile dataFile = trigger.getStagedDataFile().copy(); trigger.clearStagedDataFile(); return dataFile; } // Triggered by size or time. try { DataFile dataFile = writer.close(); trigger.reset(); return dataFile; } catch (IOException | RuntimeException e) { metrics.increment(WriterMetrics.BATCH_FAILURE_COUNT); logger.error("error writing DataFile", e); return ERROR_DATA_FILE; } }) .filter(dataFile -> !isErrorDataFile(dataFile)) .doOnNext(dataFile -> { metrics.increment(WriterMetrics.BATCH_SUCCESS_COUNT); logger.info("writing DataFile: {}", dataFile); metrics.setGauge(WriterMetrics.BATCH_SIZE, dataFile.recordCount()); metrics.setGauge(WriterMetrics.BATCH_SIZE_BYTES, dataFile.fileSizeInBytes()); }) .doOnTerminate(() -> { try { logger.info("closing writer on rx terminate signal"); writer.close(); } catch (IOException e) { throw Exceptions.propagate(e); } }); } private boolean isErrorDataFile(DataFile dataFile) { return Comparators.charSequences().compare(ERROR_DATA_FILE.path(), dataFile.path()) == 0 && ERROR_DATA_FILE.fileSizeInBytes() == dataFile.fileSizeInBytes() && ERROR_DATA_FILE.recordCount() == dataFile.recordCount(); } /** * Trigger a flush on a repartition event, size threshold, or time threshold. */ private boolean shouldFlush(Trigger trigger) { // For size threshold, check the trigger to short-circuit if the count is over the threshold first // because implementations of `writer.length()` may be expensive if called in a tight loop. return trigger.hasStagedDataFile() || (trigger.isOverCountThreshold() && writer.length() >= config.getWriterFlushFrequencyBytes()) || trigger.isTimedOut(); } private static class Trigger { private final int countThreshold; private int counter; private boolean timedOut; private StructLike partition; private DataFile stagedDataFile; Trigger(int countThreshold) { this.countThreshold = countThreshold; } void increment() { counter++; } void timeout() { timedOut = true; } void setPartition(StructLike newPartition) { partition = newPartition; } void reset() { counter = 0; timedOut = false; } void stage(DataFile dataFile) { this.stagedDataFile = dataFile; } boolean isOverCountThreshold() { return counter >= countThreshold; } boolean isTimedOut() { return timedOut; } boolean isNewPartition(StructLike newPartition) { return partition != null && !partition.equals(newPartition); } boolean hasStagedDataFile() { return stagedDataFile != null; } DataFile getStagedDataFile() { return stagedDataFile; } void clearStagedDataFile() { stagedDataFile = null; } @Override public String toString() { return "Trigger{" + " countThreshold=" + countThreshold + ", counter=" + counter + ", timedOut=" + timedOut + ", partition=" + partition + ", stagedDataFile=" + stagedDataFile + '}'; } } } }
1,195
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriter.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.writer; import java.io.IOException; import org.apache.iceberg.DataFile; import org.apache.iceberg.StructLike; import org.apache.iceberg.data.Record; import org.apache.iceberg.exceptions.RuntimeIOException; public interface IcebergWriter { void open() throws IOException; void open(StructLike newPartitionKey) throws IOException; void write(Record record); DataFile close() throws IOException, RuntimeIOException; boolean isClosed(); long length(); StructLike getPartitionKey(); }
1,196
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/DefaultIcebergWriter.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.writer; import java.io.IOException; import java.util.UUID; import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig; import io.mantisrx.runtime.WorkerInfo; import org.apache.hadoop.fs.Path; import org.apache.iceberg.DataFile; import org.apache.iceberg.DataFiles; import org.apache.iceberg.FileFormat; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.StructLike; import org.apache.iceberg.Table; import org.apache.iceberg.data.Record; import org.apache.iceberg.data.parquet.GenericParquetWriter; import org.apache.iceberg.exceptions.RuntimeIOException; import org.apache.iceberg.hadoop.HadoopOutputFile; import org.apache.iceberg.io.FileAppender; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.parquet.Parquet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Class for writing {@link Record}s to Iceberg via a HDFS-compatible backend. * For example, this class may be used with an S3 compatible filesystem library * which progressively uploads (multipart) to S3 on each write operation for * optimizing latencies. * <p> * Users have the flexibility to choose the semantics of opening, writing, and closing * this Writer, for example, closing the underlying appender after some number * of Bytes written and opening a new appender. */ public class DefaultIcebergWriter implements IcebergWriter { private static final Logger logger = LoggerFactory.getLogger(DefaultIcebergWriter.class); private final WriterConfig config; private final WorkerInfo workerInfo; private final Table table; private final PartitionSpec spec; private final FileFormat format; private FileAppender<Record> appender; private OutputFile file; private StructLike partitionKey; public DefaultIcebergWriter(WriterConfig config, WorkerInfo workerInfo, Table table) { this.config = config; this.workerInfo = workerInfo; this.table = table; this.spec = table.spec(); this.format = FileFormat.valueOf(config.getWriterFileFormat()); } /** * Opens a {@link FileAppender} for a specific {@link FileFormat}. * <p> * A filename is automatically generated for this appender. * <p> * Supports Parquet. Avro, Orc, and others unsupported. */ @Override public void open() throws IOException { open(null); } /** * Opens a {@link FileAppender} using a {@link StructLike} partition key * for a specific {@link FileFormat}. * <p> * A filename is automatically generated for this appender. * <p> * Supports Parquet. Avro, Orc, and others unsupported. */ @Override public void open(StructLike newPartitionKey) throws IOException { partitionKey = newPartitionKey; Path path = new Path(table.location(), generateFilename()); logger.info("opening new {} file appender {}", format, path); file = HadoopOutputFile.fromPath(path, config.getHadoopConfig()); switch (format) { case PARQUET: appender = Parquet.write(file) .schema(table.schema()) .createWriterFunc(GenericParquetWriter::buildWriter) .setAll(table.properties()) .overwrite() .build(); break; case AVRO: default: throw new UnsupportedOperationException("Cannot write using an unsupported file format " + format); } } @Override public void write(Record record) { appender.add(record); } /** * Closes the currently opened file appender and builds a DataFile. * <p> * Users are expected to {@link IcebergWriter#open()} a new file appender for this writer * if they want to continue writing. Users can check for status of the file appender * using {@link IcebergWriter#isClosed()}. * * @return a DataFile representing metadata about the records written. */ @Override public DataFile close() throws IOException, RuntimeIOException { if (appender == null) { return null; } // Calls to FileAppender#close can fail if the backing file system fails to close. // For example, this can happen for an S3-backed file system where it might fail // to GET the status of the file. The file would have already been closed. // Callers should open a new appender. try { appender.close(); return DataFiles.builder(spec) .withPath(file.location()) .withInputFile(file.toInputFile()) .withFileSizeInBytes(appender.length()) .withPartition(spec.fields().size() == 0 ? null : partitionKey) .withMetrics(appender.metrics()) .withSplitOffsets(appender.splitOffsets()) .build(); } finally { appender = null; file = null; } } public boolean isClosed() { return appender == null; } /** * Returns the current file size (in Bytes) written using this writer's appender. * <p> * Users should be careful calling this method in a tight loop because it can * be expensive depending on the file format, for example in Parquet. * * @return current file size (in Bytes). */ public long length() { return appender == null ? 0 : appender.length(); } /** * Returns the partition key for which this record is partitioned in an Iceberg table. * * @return StructLike for partitioned tables; null for unpartitioned tables */ public StructLike getPartitionKey() { return partitionKey; } /** * Generate a Parquet filename with attributes which make it more friendly to determine * the source of the file. For example, if the caller exits unexpectedly and leaves * files in the system, it's possible to identify them through a recursive listing. */ private String generateFilename() { return generateDataPath( generatePartitionPath( format.addExtension(String.format("%s_%s_%s_%s_%s", workerInfo.getJobId(), workerInfo.getStageNumber(), workerInfo.getWorkerIndex(), workerInfo.getWorkerNumber(), UUID.randomUUID().toString())))); } private String generateDataPath(String partitionPath) { return String.format("data/%s", partitionPath); } private String generatePartitionPath(String filename) { if (spec.fields().isEmpty()) { return filename; } return String.format("/%s/%s", spec.partitionToPath(partitionKey), filename); } }
1,197
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/metrics/WriterMetrics.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.writer.metrics; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Gauge; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; public class WriterMetrics { public static final String OPEN_SUCCESS_COUNT = "openSuccessCount"; private final Counter openSuccessCount; public static final String OPEN_FAILURE_COUNT = "openFailureCount"; private final Counter openFailureCount; public static final String WRITE_SUCCESS_COUNT = "writeSuccessCount"; private final Counter writeSuccessCount; public static final String WRITE_FAILURE_COUNT = "writeFailureCount"; private final Counter writeFailureCount; public static final String BATCH_SUCCESS_COUNT = "batchSuccessCount"; private final Counter batchSuccessCount; public static final String BATCH_FAILURE_COUNT = "batchFailureCount"; private final Counter batchFailureCount; public static final String BATCH_SIZE = "batchSize"; private final Gauge batchSize; public static final String BATCH_SIZE_BYTES = "batchSizeBytes"; private final Gauge batchSizeBytes; public WriterMetrics() { Metrics metrics = new Metrics.Builder() .name(WriterMetrics.class.getCanonicalName()) .addCounter(OPEN_SUCCESS_COUNT) .addCounter(OPEN_FAILURE_COUNT) .addCounter(WRITE_SUCCESS_COUNT) .addCounter(WRITE_FAILURE_COUNT) .addCounter(BATCH_SUCCESS_COUNT) .addCounter(BATCH_FAILURE_COUNT) .addGauge(BATCH_SIZE) .addGauge(BATCH_SIZE_BYTES) .build(); metrics = MetricsRegistry.getInstance().registerAndGet(metrics); openSuccessCount = metrics.getCounter(OPEN_SUCCESS_COUNT); openFailureCount = metrics.getCounter(OPEN_FAILURE_COUNT); writeSuccessCount = metrics.getCounter(WRITE_SUCCESS_COUNT); writeFailureCount = metrics.getCounter(WRITE_FAILURE_COUNT); batchSuccessCount = metrics.getCounter(BATCH_SUCCESS_COUNT); batchFailureCount = metrics.getCounter(BATCH_FAILURE_COUNT); batchSize = metrics.getGauge(BATCH_SIZE); batchSizeBytes = metrics.getGauge(BATCH_SIZE_BYTES); } public void setGauge(final String metric, final long value) { switch (metric) { case BATCH_SIZE: batchSize.set(value); break; case BATCH_SIZE_BYTES: batchSizeBytes.set(value); break; default: break; } } public void increment(final String metric) { switch (metric) { case OPEN_SUCCESS_COUNT: openSuccessCount.increment(); break; case OPEN_FAILURE_COUNT: openFailureCount.increment(); break; case WRITE_SUCCESS_COUNT: writeSuccessCount.increment(); break; case WRITE_FAILURE_COUNT: writeFailureCount.increment(); break; case BATCH_SUCCESS_COUNT: batchSuccessCount.increment(); break; case BATCH_FAILURE_COUNT: batchFailureCount.increment(); break; default: break; } } public void increment(final String metric, final long value) { switch (metric) { case OPEN_SUCCESS_COUNT: openSuccessCount.increment(value); break; case OPEN_FAILURE_COUNT: openFailureCount.increment(value); break; case WRITE_SUCCESS_COUNT: writeSuccessCount.increment(value); break; case WRITE_FAILURE_COUNT: writeFailureCount.increment(value); break; case BATCH_SUCCESS_COUNT: batchSuccessCount.increment(value); break; case BATCH_FAILURE_COUNT: batchFailureCount.increment(value); break; default: break; } } }
1,198
0
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/config/WriterProperties.java
/* * Copyright 2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.connector.iceberg.sink.writer.config; import org.apache.iceberg.FileFormat; /** * Property key names and default values for an Iceberg Committer. */ public class WriterProperties { private WriterProperties() { } /** * Maximum number of rows that should exist in a file. */ public static final String WRITER_ROW_GROUP_SIZE = "writerRowGroupSize"; public static final int WRITER_ROW_GROUP_SIZE_DEFAULT = 100; public static final String WRITER_ROW_GROUP_SIZE_DESCRIPTION = String.format("Number of rows to chunk before checking for file size (default: %s)", WRITER_ROW_GROUP_SIZE_DEFAULT); /** * Flush frequency by size (in Bytes). */ public static final String WRITER_FLUSH_FREQUENCY_BYTES = "writerFlushFrequencyBytes"; // TODO: Change to long. public static final String WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT = "134217728"; // 128 MiB public static final String WRITER_FLUSH_FREQUENCY_BYTES_DESCRIPTION = String.format("Flush frequency by size in Bytes (default: %s)", WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT); /** * Flush frequency by time (in milliseconds). */ public static final String WRITER_FLUSH_FREQUENCY_MSEC = "writerFlushFrequencyMsec"; // TODO: Change to long. public static final String WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT = "60000"; // 1 min public static final String WRITER_FLUSH_FREQUENCY_MSEC_DESCRIPTION = String.format("Flush frequency by time in milliseconds (default: %s)", WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT); /** * File format for writing data files to backing Iceberg store. */ public static final String WRITER_FILE_FORMAT = "writerFileFormat"; public static final String WRITER_FILE_FORMAT_DEFAULT = FileFormat.PARQUET.name(); public static final String WRITER_FILE_FORMAT_DESCRIPTION = String.format("File format for writing data files to backing Iceberg store (default: %s)", WRITER_FILE_FORMAT_DEFAULT); }
1,199