index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/export/EndpointValidatorTest.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
import org.junit.Test;
import java.util.Arrays;
import java.util.Collection;
import static org.junit.Assert.assertEquals;
public class EndpointValidatorTest {
@Test
public void shouldRemoveProtocol() {
Collection<String> endpoints = Arrays.asList("my-endpoint", "ws://my-endpoint", "wss://my-endpoint", "http://my-endpoint", "https://my-endpoint");
for (String endpoint : endpoints) {
assertEquals("my-endpoint", EndpointValidator.validate(endpoint));
}
}
@Test
public void shouldRemovePort() {
Collection<String> endpoints = Arrays.asList("my-endpoint", "my-endpoint:8182");
for (String endpoint : endpoints) {
assertEquals("my-endpoint", EndpointValidator.validate(endpoint));
}
}
@Test
public void shouldRemoveProtocolAndPort() {
Collection<String> endpoints = Arrays.asList("my-endpoint", "https://my-endpoint:8182");
for (String endpoint : endpoints) {
assertEquals("my-endpoint", EndpointValidator.validate(endpoint));
}
}
}
| 900 |
0 |
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/export/ArgsTest.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
import org.junit.Test;
import java.util.Collection;
import java.util.Iterator;
import static org.junit.Assert.*;
public class ArgsTest {
@Test
public void shouldRemoveOptions() throws Exception {
Args args = new Args("-x \"extra\" -e endpoint -c config -q query -f file");
args.removeOptions("-x", "-c", "-f", "-notpresent");
assertArrayEquals(new String[]{"-e", "endpoint", "-q", "query"}, args.values());
}
@Test
public void shouldRemoveMultipleOccurrencesOfOption() throws Exception {
Args args = new Args("-e endpoint -l label1 -l label2 -l label3");
args.removeOptions("-l");
assertArrayEquals(new String[]{"-e", "endpoint"}, args.values());
}
@Test
public void shouldRemoveFlags() throws Exception {
Args args = new Args("-e endpoint -flag1 -c config -flag2 -q query");
args.removeFlags("-flag1", "-flag2");
assertArrayEquals(new String[]{"-e", "endpoint", "-c", "config", "-q", "query"}, args.values());
}
@Test
public void shouldAddOption() throws Exception {
Args args = new Args("-e endpoint -c config");
args.addOption("-l", "label1");
args.addOption("-q", "result=\"g.V('id').toList()\"");
assertArrayEquals(new String[]{"-e", "endpoint", "-c", "config", "-l", "label1", "-q", "result=\"g.V('id').toList()\""}, args.values());
}
@Test
public void shouldFormatAsString() throws Exception {
Args args = new Args("-e endpoint -c config");
assertEquals("-e endpoint -c config", args.toString());
}
@Test
public void shouldIndicateWhetherArgsContainArg(){
Args args = new Args("-e endpoint -c config");
assertTrue(args.contains("-c"));
assertFalse(args.contains("-x"));
}
@Test
public void shouldIndicateWhetherArgsContainArgWithValue(){
Args args = new Args("-e endpoint --profile xyz --profile neptune_ml -c config -b");
assertTrue(args.contains("--profile", "neptune_ml"));
assertFalse(args.contains("-b", "xyz"));
}
@Test
public void shouldIndicateWhetherArgsContainArgWithQuotedValue(){
Args args = new Args("-e endpoint --profile xyz --profile \"neptune_ml\" -c config -b");
assertTrue(args.contains("--profile", "neptune_ml"));
assertFalse(args.contains("-b", "xyz"));
}
@Test
public void shouldReplaceArg(){
Args args = new Args("export-pg -e endpoint --profile xyz");
args.replace("export-pg", "export-pg-from-config");
assertEquals("export-pg-from-config -e endpoint --profile xyz", args.toString());
}
@Test
public void shouldIndicateWhetherAnyOfTheSuppliedArgsIsPresent(){
Args args = new Args("export-pg -e endpoint --profile xyz");
assertTrue(args.containsAny("x", "y", "-e", "z"));
assertFalse(args.containsAny("x", "y", "z"));
}
@Test
public void shouldGetFirstOptionValue(){
Args args = new Args("export-pg -e endpoint --profile xyz --profile abc -e endpoint --use-ssl --profile 123");
assertEquals("xyz", args.getFirstOptionValue("--profile"));
}
@Test
public void shouldGetAllOptionValues(){
Args args = new Args("export-pg -e endpoint --profile xyz --profile abc -e endpoint --use-ssl --profile 123");
Collection<String> optionValues = args.getOptionValues("--profile");
assertEquals(3, optionValues.size());
Iterator<String> iterator = optionValues.iterator();
assertEquals("xyz", iterator.next());
assertEquals("abc", iterator.next());
assertEquals("123", iterator.next());
}
}
| 901 |
0 |
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/export/NeptuneExportLambdaTest.java
|
package com.amazonaws.services.neptune.export;
import com.amazonaws.SDKGlobalConfiguration;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.LambdaLogger;
import com.amazonaws.util.StringInputStream;
import com.fasterxml.jackson.core.JsonParseException;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.Assertion;
import org.junit.contrib.java.lang.system.ExpectedSystemExit;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class NeptuneExportLambdaTest {
private Context context;
private LambdaLogger logger;
private ByteArrayOutputStream outputStreamCaptor;
@Rule
public final ExpectedSystemExit exit = ExpectedSystemExit.none();
@Before
public void setup() {
context = mock(Context.class);
logger = mock(LambdaLogger.class);
when(context.getLogger()).thenReturn(logger);
outputStreamCaptor = new ByteArrayOutputStream();
System.setErr(new PrintStream(outputStreamCaptor));
}
@Test
public void shouldRejectIllegalArguments() throws IOException {
exit.expectSystemExitWithStatus(-1);
exit.checkAssertionAfterwards(new ErrorChecker("Found unexpected parameters:"));
System.setProperty(SDKGlobalConfiguration.AWS_REGION_SYSTEM_PROPERTY, "us-west-2");
NeptuneExportLambda lambda = new NeptuneExportLambda();
String input = "{" +
"\"params\": {\"endpoint\" : \"fakeEndpoint\"," +
"\"illegalArgument\": \"test\"}}";
lambda.handleRequest(new StringInputStream(input), mock(OutputStream.class), context);
}
@Test
public void shouldRejectMissingRequiredArguments() throws IOException {
exit.expectSystemExitWithStatus(-1);
exit.checkAssertionAfterwards(new ErrorChecker("One/more of the following options must be specified: -e, --endpoint, --cluster-id, --cluster, --clusterid"));
System.setProperty(SDKGlobalConfiguration.AWS_REGION_SYSTEM_PROPERTY, "us-west-2");
NeptuneExportLambda lambda = new NeptuneExportLambda();
String input = "{\"command\": \"export-pg\", \"params\": {}}";
lambda.handleRequest(new StringInputStream(input), mock(OutputStream.class), context);
}
@Test
public void shouldRejectMalformedJSON() throws IOException {
NeptuneExportLambda lambda = new NeptuneExportLambda();
String input = "{[}";
assertThrows(JsonParseException.class,
() -> lambda.handleRequest(new StringInputStream(input), mock(OutputStream.class), context));
}
private class ErrorChecker implements Assertion {
private String expectedMessage;
ErrorChecker(String expectedMessage) {
this.expectedMessage = expectedMessage;
}
@Override
public void checkAssertion() throws Exception {
String capturedErrors = new String(outputStreamCaptor.toByteArray());
assertTrue(capturedErrors.contains(expectedMessage));
}
}
}
| 902 |
0 |
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/rdf
|
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/rdf/io/NeptuneExportSparqlRepositoryTest.java
|
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.neptune.auth.NeptuneSigV4SignerException;
import com.amazonaws.services.neptune.cluster.ConnectionConfig;
import org.apache.http.Header;
import org.apache.http.conn.EofSensorInputStream;
import org.apache.http.impl.io.ChunkedInputStream;
import org.apache.http.message.BasicHeader;
import org.apache.http.protocol.HttpContext;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class NeptuneExportSparqlRepositoryTest {
@Test
public void ShouldGetEmptyErrorMessageFromNewRepository() throws NeptuneSigV4SignerException {
ConnectionConfig mockedConfig = mock(ConnectionConfig.class);
NeptuneExportSparqlRepository repo = new NeptuneExportSparqlRepository("test", null, null, mockedConfig);
assertEquals("", repo.getErrorMessageFromTrailers());
}
@Test
public void ShouldGetTrailerErrorMessagesFromChunkedStream() throws NeptuneSigV4SignerException {
ConnectionConfig mockedConfig = mock(ConnectionConfig.class);
NeptuneExportSparqlRepository repo = new NeptuneExportSparqlRepository("test", null, null, mockedConfig);
ChunkedInputStream mockedStream = mock(ChunkedInputStream.class);
when(mockedStream.getFooters()).thenReturn(new Header[]{new BasicHeader("name", "value")});
HttpContext mockedContext = mock(HttpContext.class);
when(mockedContext.getAttribute("raw-response-inputstream")).thenReturn(mockedStream);
repo.setLastContext(mockedContext);
assertEquals("name: value\n", repo.getErrorMessageFromTrailers());
}
@Test
public void ShouldGetTrailerErrorMessagesFromEofSensorInputStream() throws NeptuneSigV4SignerException {
ConnectionConfig mockedConfig = mock(ConnectionConfig.class);
NeptuneExportSparqlRepository repo = new NeptuneExportSparqlRepository("test", null, null, mockedConfig);
ChunkedInputStream mockedStream = mock(ChunkedInputStream.class);
when(mockedStream.getFooters()).thenReturn(new Header[]{new BasicHeader("name", "value")});
EofSensorInputStream eofSensorInputStream = new EofSensorInputStream(mockedStream, null);
HttpContext mockedContext = mock(HttpContext.class);
when(mockedContext.getAttribute("raw-response-inputstream")).thenReturn(eofSensorInputStream);
repo.setLastContext(mockedContext);
assertEquals("name: value\n", repo.getErrorMessageFromTrailers());
}
}
| 903 |
0 |
Create_ds/neptune-export/src/main/java/org/apache/tinkerpop/gremlin
|
Create_ds/neptune-export/src/main/java/org/apache/tinkerpop/gremlin/driver/LBAwareSigV4WebSocketChannelizer.java
|
/*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import com.amazon.neptune.gremlin.driver.sigv4.AwsSigV4ClientHandshaker;
import com.amazon.neptune.gremlin.driver.sigv4.ChainedSigV4PropertiesProvider;
import com.amazonaws.services.neptune.auth.HandshakeRequestConfig;
import com.amazonaws.services.neptune.auth.LBAwareAwsSigV4ClientHandshaker;
import io.netty.channel.Channel;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.http.EmptyHttpHeaders;
import io.netty.handler.codec.http.HttpClientCodec;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.websocketx.CloseWebSocketFrame;
import io.netty.handler.codec.http.websocketx.WebSocketClientHandshaker;
import io.netty.handler.codec.http.websocketx.WebSocketVersion;
import org.apache.tinkerpop.gremlin.driver.exception.ConnectionException;
import org.apache.tinkerpop.gremlin.driver.handler.WebSocketClientHandler;
import org.apache.tinkerpop.gremlin.driver.handler.WebSocketGremlinRequestEncoder;
import org.apache.tinkerpop.gremlin.driver.handler.WebSocketGremlinResponseDecoder;
import java.util.concurrent.TimeUnit;
public class LBAwareSigV4WebSocketChannelizer extends Channelizer.AbstractChannelizer {
/**
* Constant to denote the websocket protocol.
*/
private static final String WEB_SOCKET = "ws";
/**
* Constant to denote the websocket secure protocol.
*/
private static final String WEB_SOCKET_SECURE = "wss";
/**
* Name of the HttpCodec handler.
*/
private static final String HTTP_CODEC = "http-codec";
/**
* Name of the HttpAggregator handler.
*/
private static final String AGGREGATOR = "aggregator";
/**
* Name of the WebSocket handler.
*/
protected static final String WEB_SOCKET_HANDLER = "ws-handler";
/**
* Name of the GremlinEncoder handler.
*/
private static final String GREMLIN_ENCODER = "gremlin-encoder";
/**
* Name of the GremlinDecoder handler.
*/
private static final String GRELIN_DECODER = "gremlin-decoder";
/**
* Handshake timeout.
*/
private static final int HANDSHAKE_TIMEOUT_MILLIS = 15000;
/**
* The handler to process websocket messages from the server.
*/
private WebSocketClientHandler handler;
/**
* Encoder to encode websocket requests.
*/
private WebSocketGremlinRequestEncoder webSocketGremlinRequestEncoder;
/**
* Decoder to decode websocket requests.
*/
private WebSocketGremlinResponseDecoder webSocketGremlinResponseDecoder;
/**
* Initializes the channelizer.
* @param connection the {@link Connection} object.
*/
@Override
public void init(final Connection connection) {
super.init(connection);
webSocketGremlinRequestEncoder = new WebSocketGremlinRequestEncoder(true, cluster.getSerializer());
webSocketGremlinResponseDecoder = new WebSocketGremlinResponseDecoder(cluster.getSerializer());
}
/**
* Sends a {@code CloseWebSocketFrame} to the server for the specified channel.
*/
@Override
public void close(final Channel channel) {
if (channel.isOpen()) {
channel.writeAndFlush(new CloseWebSocketFrame());
}
}
@Override
public boolean supportsSsl() {
final String scheme = connection.getUri().getScheme();
return "wss".equalsIgnoreCase(scheme);
}
@Override
public void configure(final ChannelPipeline pipeline) {
final String scheme = connection.getUri().getScheme();
if (!WEB_SOCKET.equalsIgnoreCase(scheme) && !WEB_SOCKET_SECURE.equalsIgnoreCase(scheme)) {
throw new IllegalStateException(String.format("Unsupported scheme (only %s: or %s: supported): %s",
WEB_SOCKET, WEB_SOCKET_SECURE, scheme));
}
if (!supportsSsl() && WEB_SOCKET_SECURE.equalsIgnoreCase(scheme)) {
throw new IllegalStateException(String.format("To use %s scheme ensure that enableSsl is set to true in "
+ "configuration",
WEB_SOCKET_SECURE));
}
final int maxContentLength = cluster.connectionPoolSettings().maxContentLength;
handler = createHandler();
pipeline.addLast(HTTP_CODEC, new HttpClientCodec());
pipeline.addLast(AGGREGATOR, new HttpObjectAggregator(maxContentLength));
pipeline.addLast(WEB_SOCKET_HANDLER, handler);
pipeline.addLast(GREMLIN_ENCODER, webSocketGremlinRequestEncoder);
pipeline.addLast(GRELIN_DECODER, webSocketGremlinResponseDecoder);
}
@Override
public void connected() {
try {
// block for a few seconds - if the handshake takes longer 15 seconds than there's gotta be issues with that
// server. more than likely, SSL is enabled on the server, but the client forgot to enable it or
// perhaps the server is not configured for websockets.
handler.handshakeFuture().get(HANDSHAKE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
} catch (Exception ex) {
throw new RuntimeException(new ConnectionException(connection.getUri(),
"Could not complete websocket handshake - ensure that client protocol matches server", ex));
}
}
/**
* Creates an instance of {@link WebSocketClientHandler} with {@link AwsSigV4ClientHandshaker} as the handshaker
* for SigV4 auth.
* @return the instance of clientHandler.
*/
private WebSocketClientHandler createHandler() {
HandshakeRequestConfig handshakeRequestConfig =
HandshakeRequestConfig.parse(cluster.authProperties().get(AuthProperties.Property.JAAS_ENTRY));
WebSocketClientHandshaker handshaker = new LBAwareAwsSigV4ClientHandshaker(
connection.getUri(),
WebSocketVersion.V13,
null,
false,
EmptyHttpHeaders.INSTANCE,
cluster.getMaxContentLength(),
new ChainedSigV4PropertiesProvider(),
handshakeRequestConfig);
return new WebSocketClientHandler(handshaker, 10000, supportsSsl());
}
}
| 904 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/CreatePropertyGraphExportConfig.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.*;
import com.amazonaws.services.neptune.cluster.Cluster;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.io.DirectoryStructure;
import com.amazonaws.services.neptune.io.Target;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.NeptuneGremlinClient;
import com.amazonaws.services.neptune.propertygraph.io.ExportPropertyGraphJob;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.amazonaws.services.neptune.propertygraph.io.PropertyGraphTargetConfig;
import com.amazonaws.services.neptune.propertygraph.schema.CreateGraphSchemaCommand;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.help.Examples;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import javax.inject.Inject;
import java.util.Collection;
@Examples(examples = {
"bin/neptune-export.sh create-pg-config -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output",
"bin/neptune-export.sh create-pg-config -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output --sample --sample-size 100",
"bin/neptune-export.sh create-pg-config -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output -nl User -el FOLLOWS"
}, descriptions = {
"Create schema config file for all node and edge labels and save it to /home/ec2-user/output",
"Create schema config file for all node and edge labels, sampling 100 nodes and edges for each label",
"Create config file containing schema for User nodes and FOLLOWS edges"
})
@Command(name = "create-pg-config", description = "Create a property graph schema config file.")
public class CreatePropertyGraphExportConfig extends NeptuneExportCommand implements Runnable {
@Inject
private CloneClusterModule cloneStrategy = new CloneClusterModule();
@Inject
private CommonConnectionModule connection = new CommonConnectionModule(awsCli);
@Inject
private PropertyGraphTargetModule target = new PropertyGraphTargetModule(Target.devnull);
@Inject
private PropertyGraphScopeModule scope = new PropertyGraphScopeModule();
@Inject
private PropertyGraphConcurrencyModule concurrency = new PropertyGraphConcurrencyModule();
@Inject
private PropertyGraphSerializationModule serialization = new PropertyGraphSerializationModule();
@Inject
private PropertyGraphSchemaInferencingModule sampling = new PropertyGraphSchemaInferencingModule();
@Inject
private GremlinFiltersModule gremlinFilters = new GremlinFiltersModule();
@Override
public void run() {
try {
Timer.timedActivity("creating property graph config", (CheckedActivity.Runnable) () -> {
try (Cluster cluster = cloneStrategy.cloneCluster(
connection.clusterMetadata(),
connection.config(),
concurrency.config(sampling.isFullScan()),
featureToggles())) {
if (sampling.isFullScan()) {
Directories directories = target.createDirectories(DirectoryStructure.Config);
JsonResource<GraphSchema, Boolean> configFileResource = directories.configFileResource();
JsonResource<ExportStats, GraphSchema> statsFileResource = directories.statsFileResource();
GraphSchema graphSchema = new GraphSchema();
ExportStats stats = new ExportStats();
PropertyGraphTargetConfig targetConfig = target.config(directories, new PrinterOptionsModule().config());
Collection<ExportSpecification> exportSpecifications = scope.exportSpecifications(
graphSchema,
gremlinFilters.filters(),
stats,
featureToggles());
try (NeptuneGremlinClient client = NeptuneGremlinClient.create(cluster, serialization.config());
GraphTraversalSource g = client.newTraversalSource()) {
ExportPropertyGraphJob exportJob = new ExportPropertyGraphJob(
exportSpecifications,
graphSchema,
g,
new PropertyGraphRangeModule().config(),
gremlinFilters.filters(),
cluster.concurrencyConfig(),
targetConfig, featureToggles(),
getMaxFileDescriptorCount()
);
graphSchema = exportJob.execute();
configFileResource.save(graphSchema, false);
statsFileResource.save(stats, graphSchema);
}
directories.writeRootDirectoryPathAsMessage(target.description(), target);
configFileResource.writeResourcePathAsMessage(target);
System.err.println();
System.err.println(stats.formatStats(graphSchema));
directories.writeRootDirectoryPathAsReturnValue(target);
onExportComplete(directories, stats, cluster, graphSchema);
} else {
ExportStats stats = new ExportStats();
Directories directories = target.createDirectories(DirectoryStructure.Config);
JsonResource<GraphSchema, Boolean> configFileResource = directories.configFileResource();
JsonResource<ExportStats, GraphSchema> statsFileResource = directories.statsFileResource();
Collection<ExportSpecification> exportSpecifications = scope.exportSpecifications(
stats,
gremlinFilters.filters(),
featureToggles());
try (NeptuneGremlinClient client = NeptuneGremlinClient.create(cluster, serialization.config());
GraphTraversalSource g = client.newTraversalSource()) {
CreateGraphSchemaCommand createGraphSchemaCommand = sampling.createSchemaCommand(exportSpecifications, g);
GraphSchema graphSchema = createGraphSchemaCommand.execute();
configFileResource.save(graphSchema, false);
statsFileResource.save(stats, graphSchema);
configFileResource.writeResourcePathAsMessage(target);
}
directories.writeConfigFilePathAsReturnValue(target);
onExportComplete(directories, stats, cluster);
}
}
});
} catch (Exception e) {
handleException(e);
}
}
}
| 905 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/NeptuneExportBaseCommand.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
public abstract class NeptuneExportBaseCommand {
@Option(name = {"--log-level"}, description = "Log level (optional, default 'error').", title = "log level")
@Once
@AllowedValues(allowedValues = {"trace", "debug", "info", "warn", "error"})
private String logLevel = "error";
}
| 906 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/GetClusterInfo.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.AwsCliModule;
import com.amazonaws.services.neptune.cluster.NeptuneClusterMetadata;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.commons.lang.StringUtils;
import javax.inject.Inject;
@Command(name = "nei", description = "neptune-export cluster info", hidden = true)
public class GetClusterInfo implements Runnable {
@Inject
private AwsCliModule awsCli = new AwsCliModule();
@Option(name = {"-e", "--endpoint"}, description = "Neptune endpoint.", title = "endpoint")
@Once
private String endpoint;
@Option(name = {"--cluster-id"}, description = "Neptune cluster ID.", title = "clusterId")
@Once
private String clusterId;
@Override
public void run() {
try {
if (StringUtils.isEmpty(endpoint) && StringUtils.isEmpty(clusterId)) {
throw new IllegalArgumentException("You must supply an endpoint or cluster ID");
}
NeptuneClusterMetadata metadata = StringUtils.isNotEmpty(clusterId) ?
NeptuneClusterMetadata.createFromClusterId(clusterId, awsCli) :
NeptuneClusterMetadata.createFromClusterId(
NeptuneClusterMetadata.clusterIdFromEndpoint(endpoint), awsCli);
printClusterDetails(metadata);
} catch (Exception e) {
System.err.println("An error occurred while creating Neptune cluster info:");
e.printStackTrace();
}
}
public static void printClusterDetails(NeptuneClusterMetadata metadata) {
System.err.println();
metadata.printDetails();
}
}
| 907 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/RunNeptuneExportSvc.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.lambda.runtime.ClientContext;
import com.amazonaws.services.lambda.runtime.CognitoIdentity;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.LambdaLogger;
import com.amazonaws.services.neptune.export.NeptuneExportLambda;
import com.amazonaws.services.neptune.util.NotImplementedException;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
@Command(name = "nesvc", description = "neptune-export service", hidden = true)
public class RunNeptuneExportSvc extends NeptuneExportBaseCommand implements Runnable {
/**
* Same as the default value given in the CFN template at https://docs.aws.amazon.com/neptune/latest/userguide/export-service.html
*/
public static final int DEFAULT_MAX_FILE_DESCRIPTOR_COUNT = 10000;
@Option(name = {"--json"}, description = "JSON")
@Once
private String json;
@Option(name = {"--root-path"}, description = "Root directory path", hidden = true)
@Once
private String rootPath = new File("exports").getAbsolutePath();
@Option(name = {"--clean"}, description = "Clean output path before beginning an export.", hidden = true)
@Once
private boolean cleanRootPath = false;
@Option(name = {"--max-file-descriptor-count"}, description = "Maximum number of simultaneously open files.", hidden = true)
@Once
private int maxFileDescriptorCount = DEFAULT_MAX_FILE_DESCRIPTOR_COUNT;
@Override
public void run() {
InputStream input = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8));
try {
new NeptuneExportLambda(rootPath, cleanRootPath, maxFileDescriptorCount).handleRequest(input, System.out, new Context() {
@Override
public String getAwsRequestId() {
throw new NotImplementedException();
}
@Override
public String getLogGroupName() {
throw new NotImplementedException();
}
@Override
public String getLogStreamName() {
throw new NotImplementedException();
}
@Override
public String getFunctionName() {
throw new NotImplementedException();
}
@Override
public String getFunctionVersion() {
throw new NotImplementedException();
}
@Override
public String getInvokedFunctionArn() {
throw new NotImplementedException();
}
@Override
public CognitoIdentity getIdentity() {
throw new NotImplementedException();
}
@Override
public ClientContext getClientContext() {
throw new NotImplementedException();
}
@Override
public int getRemainingTimeInMillis() {
throw new NotImplementedException();
}
@Override
public int getMemoryLimitInMB() {
throw new NotImplementedException();
}
@Override
public LambdaLogger getLogger() {
return new LambdaLogger() {
@Override
public void log(String s) {
System.out.println(s);
}
@Override
public void log(byte[] bytes) {
throw new NotImplementedException();
}
};
}
});
} catch (Exception e) {
e.printStackTrace();
System.err.println("An error occurred while exporting from Neptune: " + e.getMessage());
System.exit(-1);
}
System.exit(0);
}
}
| 908 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/NeptuneExportCli.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.export.NeptuneExportRunner;
import com.github.rvesse.airline.annotations.Alias;
import com.github.rvesse.airline.annotations.Cli;
import com.github.rvesse.airline.annotations.Parser;
import com.github.rvesse.airline.help.Help;
@Cli(name = "neptune-export.sh",
description = "Export Neptune to CSV or JSON",
defaultCommand = Help.class,
commands = {
ExportPropertyGraph.class,
CreatePropertyGraphExportConfig.class,
ExportPropertyGraphFromConfig.class,
ExportPropertyGraphFromGremlinQueries.class,
ExportRdfGraph.class,
RunNeptuneExportSvc.class,
GetClusterInfo.class,
AddClone.class,
RemoveClone.class,
Help.class},
parserConfiguration = @Parser(aliases = {
@Alias(name = "create-config",
arguments = {"create-pg-config"}),
@Alias(name = "export",
arguments = {"export-pg"}),
@Alias(name = "export-from-config",
arguments = {"export-pg-from-config"}),
@Alias(name = "export-from-queries",
arguments = {"export-pg-from-queries"})
}))
public class NeptuneExportCli {
public static void main(String[] args) {
new NeptuneExportRunner(args).run();
}
}
| 909 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/NeptuneExportEventHandlerHost.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.export.NeptuneExportEventHandler;
public interface NeptuneExportEventHandlerHost {
void setEventHandler(NeptuneExportEventHandler eventHandler);
}
| 910 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/ExportPropertyGraphFromGremlinQueries.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.*;
import com.amazonaws.services.neptune.cluster.Cluster;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.io.DirectoryStructure;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.GremlinFilters;
import com.amazonaws.services.neptune.propertygraph.NamedQueries;
import com.amazonaws.services.neptune.propertygraph.NamedQueriesCollection;
import com.amazonaws.services.neptune.propertygraph.NeptuneGremlinClient;
import com.amazonaws.services.neptune.propertygraph.airline.NameQueriesTypeConverter;
import com.amazonaws.services.neptune.propertygraph.io.*;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.help.Examples;
import com.github.rvesse.airline.annotations.restrictions.Once;
import javax.inject.Inject;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@Examples(examples = {
"bin/neptune-export.sh export-pg-from-queries -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output -q person=\"g.V().hasLabel('Person').has('birthday', lt('1985-01-01')).project('id', 'first_name', 'last_name', 'birthday').by(id).by('firstName').by('lastName').by('birthday');g.V().hasLabel('Person').has('birthday', gte('1985-01-01')).project('id', 'first_name', 'last_name', 'birthday').by(id).by('firstName').by('lastName').by('birthday')\" -q post=\"g.V().hasLabel('Post').has('imageFile').range(0, 250000).project('id', 'image_file', 'creation_date', 'creator_id').by(id).by('imageFile').by('creationDate').by(in('CREATED').id());g.V().hasLabel('Post').has('imageFile').range(250000, 500000).project('id', 'image_file', 'creation_date', 'creator_id').by(id).by('imageFile').by('creationDate').by(in('CREATED').id());g.V().hasLabel('Post').has('imageFile').range(500000, 750000).project('id', 'image_file', 'creation_date', 'creator_id').by(id).by('imageFile').by('creationDate').by(in('CREATED').id());g.V().hasLabel('Post').has('imageFile').range(750000, -1).project('id', 'image_file', 'creation_date', 'creator_id').by(id).by('imageFile').by('creationDate').by(in('CREATED').id())\" --concurrency 6",
"bin/neptune-export.sh export-pg-from-queries -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output -q person=\"g.V().hasLabel('Person').has('birthday', lt('1985-01-01')).project('id', 'first_name', 'last_name', 'birthday').by(id).by('firstName').by('lastName').by('birthday');g.V().hasLabel('Person').has('birthday', gte('1985-01-01')).project('id', 'first_name', 'last_name', 'birthday').by(id).by('firstName').by('lastName').by('birthday')\" -q post=\"g.V().hasLabel('Post').has('imageFile').range(0, 250000).project('id', 'image_file', 'creation_date', 'creator_id').by(id).by('imageFile').by('creationDate').by(in('CREATED').id());g.V().hasLabel('Post').has('imageFile').range(250000, 500000).project('id', 'image_file', 'creation_date', 'creator_id').by(id).by('imageFile').by('creationDate').by(in('CREATED').id());g.V().hasLabel('Post').has('imageFile').range(500000, 750000).project('id', 'image_file', 'creation_date', 'creator_id').by(id).by('imageFile').by('creationDate').by(in('CREATED').id());g.V().hasLabel('Post').has('imageFile').range(750000, -1).project('id', 'image_file', 'creation_date', 'creator_id').by(id).by('imageFile').by('creationDate').by(in('CREATED').id())\" --concurrency 6 --format json"},
descriptions = {
"Parallel export of Person data in 2 shards, sharding on the 'birthday' property, and Post data in 4 shards, sharding on range, using 6 threads",
"Parallel export of Person data and Post data as JSON"
})
@Command(name = "export-pg-from-queries", description = "Export property graph to CSV or JSON from Gremlin queries.")
public class ExportPropertyGraphFromGremlinQueries extends NeptuneExportCommand implements Runnable {
@Inject
private CloneClusterModule cloneStrategy = new CloneClusterModule();
@Inject
private CommonConnectionModule connection = new CommonConnectionModule(awsCli);
@Inject
private PropertyGraphTargetModule target = new PropertyGraphTargetModule();
@Inject
private PropertyGraphConcurrencyModule concurrency = new PropertyGraphConcurrencyModule();
@Inject
private PropertyGraphSerializationModule serialization = new PropertyGraphSerializationModule();
@Inject
private PropertyGraphScopeModule scope = new PropertyGraphScopeModule();
@Option(name = {"-q", "--queries", "--query", "--gremlin"}, description = "Gremlin queries (format: name=\"semi-colon-separated list of queries\" OR \"semi-colon-separated list of queries\").",
arity = 1, typeConverterProvider = NameQueriesTypeConverter.class)
private List<NamedQueries> queries = new ArrayList<>();
@Option(name = {"-f", "--queries-file"}, description = "Path to JSON queries file (file path, or 'https' or 's3' URI).")
@Once
private URI queriesFile;
@Option(name = {"--two-pass-analysis"}, description = "Perform two-pass analysis of query results (optional, default 'false').")
@Once
private boolean twoPassAnalysis = false;
@Option(name = {"--include-type-definitions"}, description = "Include type definitions from column headers (optional, default 'false').")
@Once
private boolean includeTypeDefinitions = false;
@Option(name = {"--timeout-millis"}, description = "Query timeout in milliseconds (optional).")
@Once
private Long timeoutMillis = null;
@Option(name = {"--structured-output"}, description = "Enables schema generation. When combined with \"--format csv\", CSV will be structured" +
"according to schema.")
@Once
private boolean structuredOutput = false;
@Override
public void run() {
try {
Timer.timedActivity("exporting property graph from queries", (CheckedActivity.Runnable) () -> {
try (Cluster cluster = cloneStrategy.cloneCluster(
connection.clusterMetadata(),
connection.config(),
concurrency.config(),
featureToggles())) {
Directories directories = initDirectories();
JsonResource<NamedQueriesCollection, Object> queriesResource = queriesFile != null ?
new JsonResource<>("Queries file", queriesFile, NamedQueriesCollection.class) :
directories.queriesResource();
CsvPrinterOptions csvPrinterOptions = CsvPrinterOptions.builder().setIncludeTypeDefinitions(includeTypeDefinitions).build();
JsonPrinterOptions jsonPrinterOptions = JsonPrinterOptions.builder().setStrictCardinality(true).build();
PropertyGraphTargetConfig targetConfig = target.config(directories, new PrinterOptions(csvPrinterOptions, jsonPrinterOptions));
NamedQueriesCollection namedQueries = getNamedQueriesCollection(queries, queriesFile, queriesResource);
GraphSchema graphSchema = new GraphSchema();
ExportStats exportStats = new ExportStats();
Collection<ExportSpecification> exportSpecifications = scope.exportSpecifications(
graphSchema,
GremlinFilters.EMPTY,
exportStats,
featureToggles());
if (!structuredOutput) {
directories.createResultsSubdirectories(namedQueries.names());
}
try (NeptuneGremlinClient client = NeptuneGremlinClient.create(cluster, serialization.config());
NeptuneGremlinClient.QueryClient queryClient = client.queryClient()) {
QueryJob queryJob = new QueryJob(
namedQueries.flatten(),
queryClient,
cluster.concurrencyConfig(),
targetConfig,
twoPassAnalysis,
timeoutMillis,
exportSpecifications,
featureToggles(),
structuredOutput);
queryJob.execute();
}
directories.writeResultsDirectoryPathAsMessage(target.description(), target);
queriesResource.writeResourcePathAsMessage(target);
directories.writeRootDirectoryPathAsReturnValue(target);
onExportComplete(directories, exportStats, cluster);
}
});
} catch (Exception e) {
handleException(e);
}
}
private Directories initDirectories() throws IOException {
if (structuredOutput) {
return target.createDirectories();
}
return target.createDirectories(DirectoryStructure.GremlinQueries);
}
private NamedQueriesCollection getNamedQueriesCollection(List<NamedQueries> queries,
URI queriesFile,
JsonResource<NamedQueriesCollection, Object> queriesResource) throws IOException {
if (queriesFile == null) {
NamedQueriesCollection namedQueries = new NamedQueriesCollection(queries);
queriesResource.save(namedQueries, null);
return namedQueries;
} else {
return queriesResource.get();
}
}
}
| 911 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/ExportPropertyGraphFromConfig.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.*;
import com.amazonaws.services.neptune.cluster.Cluster;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.NeptuneGremlinClient;
import com.amazonaws.services.neptune.propertygraph.io.ExportPropertyGraphJob;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.amazonaws.services.neptune.propertygraph.io.PropertyGraphTargetConfig;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.help.Examples;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import javax.inject.Inject;
import java.util.Collection;
@Examples(examples = {
"bin/neptune-export.sh export-pg-from-config -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -c /home/ec2-user/config.json -d /home/ec2-user/output",
"bin/neptune-export.sh export-pg-from-config -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -c /home/ec2-user/config.json -d /home/ec2-user/output --format json"
}, descriptions = {
"Export data using the schema config in /home/ec2-user/config.json",
"Export data as JSON using the schema config in /home/ec2-user/config.json"
})
@Command(name = "export-pg-from-config", description = "Export property graph from Neptune to CSV or JSON using an existing schema config file.")
public class ExportPropertyGraphFromConfig extends NeptuneExportCommand implements Runnable {
@Inject
private CloneClusterModule cloneStrategy = new CloneClusterModule();
@Inject
private CommonConnectionModule connection = new CommonConnectionModule(awsCli);
@Inject
private PropertyGraphScopeModule scope = new PropertyGraphScopeModule();
@Inject
private PropertyGraphTargetModule target = new PropertyGraphTargetModule();
@Inject
private PropertyGraphConcurrencyModule concurrency = new PropertyGraphConcurrencyModule();
@Inject
private PropertyGraphSerializationModule serialization = new PropertyGraphSerializationModule();
@Inject
private PropertyGraphRangeModule range = new PropertyGraphRangeModule();
@Inject
private GraphSchemaProviderModule graphSchemaProvider = new GraphSchemaProviderModule(true);
@Inject
private PrinterOptionsModule printerOptions = new PrinterOptionsModule();
@Inject
private GremlinFiltersModule gremlinFilters = new GremlinFiltersModule();
@Override
public void run() {
try {
Timer.timedActivity("exporting property graph from config", (CheckedActivity.Runnable) () -> {
try (Cluster cluster = cloneStrategy.cloneCluster(
connection.clusterMetadata(),
connection.config(),
concurrency.config(),
featureToggles())) {
Directories directories = target.createDirectories();
JsonResource<GraphSchema, Boolean> configFileResource = directories.configFileResource();
JsonResource<ExportStats, GraphSchema> statsFileResource = directories.statsFileResource();
GraphSchema graphSchema = graphSchemaProvider.graphSchema();
ExportStats stats = new ExportStats();
PropertyGraphTargetConfig targetConfig = target.config(directories, printerOptions.config());
Collection<ExportSpecification> exportSpecifications = scope.exportSpecifications(
graphSchema,
gremlinFilters.filters(),
stats,
featureToggles());
try (NeptuneGremlinClient client = NeptuneGremlinClient.create(cluster, serialization.config());
GraphTraversalSource g = client.newTraversalSource()) {
ExportPropertyGraphJob exportJob = new ExportPropertyGraphJob(
exportSpecifications,
graphSchema,
g,
range.config(),
gremlinFilters.filters(),
cluster.concurrencyConfig(),
targetConfig, featureToggles(),
getMaxFileDescriptorCount()
);
graphSchema = exportJob.execute();
configFileResource.save(graphSchema, false);
statsFileResource.save(stats, graphSchema);
}
directories.writeRootDirectoryPathAsMessage(target.description(), target);
configFileResource.writeResourcePathAsMessage(target);
System.err.println();
System.err.println(stats.formatStats(graphSchema));
directories.writeRootDirectoryPathAsReturnValue(target);
onExportComplete(directories, stats, cluster, graphSchema);
}
});
} catch (Exception e) {
handleException(e);
}
}
}
| 912 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/AddClone.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.AwsCliModule;
import com.amazonaws.services.neptune.cluster.AddCloneTask;
import com.amazonaws.services.neptune.cluster.NeptuneClusterMetadata;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import com.github.rvesse.airline.annotations.restrictions.Required;
import com.github.rvesse.airline.annotations.restrictions.ranges.IntegerRange;
import javax.inject.Inject;
import java.util.UUID;
@Command(name = "add-clone", description = "Clone an Amazon Neptune database cluster.")
public class AddClone implements Runnable {
@Inject
private AwsCliModule awsCli = new AwsCliModule();
@Option(name = {"--source-cluster-id"}, description = "Cluster ID of the source Amazon Neptune database cluster.")
@Required
@Once
private String sourceClusterId;
@Option(name = {"--clone-cluster-id"}, description = "Cluster ID of the cloned Amazon Neptune database cluster.")
@Once
private String targetClusterId = String.format("neptune-export-cluster-%s", UUID.randomUUID().toString().substring(0, 5));
@Option(name = {"--clone-cluster-instance-type"}, description = "Instance type for cloned cluster (by default neptune-export will use the same instance type as the source cluster).")
@Once
@AllowedValues(allowedValues = {
"db.r4.large",
"db.r4.xlarge",
"db.r4.2xlarge",
"db.r4.4xlarge",
"db.r4.8xlarge",
"db.r5.large",
"db.r5.xlarge",
"db.r5.2xlarge",
"db.r5.4xlarge",
"db.r5.8xlarge",
"db.r5.12xlarge",
"db.r5.16xlarge",
"db.r5.24xlarge",
"db.r5d.large",
"db.r5d.xlarge",
"db.r5d.2xlarge",
"db.r5d.4xlarge",
"db.r5d.8xlarge",
"db.r5d.12xlarge",
"db.r5d.16xlarge",
"db.r5d.24xlarge",
"db.r6g.large",
"db.r6g.xlarge",
"db.r6g.2xlarge",
"db.r6g.4xlarge",
"db.r6g.8xlarge",
"db.r6g.12xlarge",
"db.r6g.16xlarge",
"db.x2g.large",
"db.x2g.xlarge",
"db.x2g.2xlarge",
"db.x2g.4xlarge",
"db.x2g.8xlarge",
"db.x2g.12xlarge",
"db.x2g.16xlarge",
"db.t3.medium",
"db.t4g.medium",
"r4.large",
"r4.xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r5.large",
"r5.xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5d.large",
"r5d.xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r6g.large",
"r6g.xlarge",
"r6g.2xlarge",
"r6g.4xlarge",
"r6g.8xlarge",
"r6g.12xlarge",
"r6g.16xlarge",
"x2g.large",
"x2g.xlarge",
"x2g.2xlarge",
"x2g.4xlarge",
"x2g.8xlarge",
"x2g.12xlarge",
"x2g.16xlarge",
"t3.medium",
"t4g.medium"})
private String cloneClusterInstanceType;
@Option(name = {"--clone-cluster-replica-count"}, description = "Number of read replicas to add to the cloned cluster (default, 0).")
@Once
@IntegerRange(min = 0, minInclusive = true, max = 15, maxInclusive = true)
private int replicaCount = 0;
@Option(name = {"--clone-cluster-engine-version"}, description = "Cloned cluster Neptune engine version (default, latest).", hidden = true)
@Once
private String engineVersion;
@Option(name = {"--clone-cluster-correlation-id"}, description = "Correlation ID to be added to a correlation-id tag on the cloned cluster.")
@Once
private String cloneCorrelationId;
@Override
public void run() {
try {
AddCloneTask addCloneTask = new AddCloneTask(sourceClusterId, targetClusterId, cloneClusterInstanceType, replicaCount, engineVersion, awsCli, cloneCorrelationId);
NeptuneClusterMetadata clusterMetadata = addCloneTask.execute();
GetClusterInfo.printClusterDetails(clusterMetadata);
System.out.println(clusterMetadata.clusterId());
} catch (Exception e) {
System.err.println("An error occurred while cloning an Amazon Neptune database cluster:");
e.printStackTrace();
}
}
}
| 913 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/NeptuneExportCommand.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.AwsCliModule;
import com.amazonaws.services.neptune.cli.FeatureToggleModule;
import com.amazonaws.services.neptune.cli.ProfilesModule;
import com.amazonaws.services.neptune.cluster.Cluster;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.export.NeptuneExportEventHandler;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException;
import javax.inject.Inject;
public abstract class NeptuneExportCommand extends NeptuneExportBaseCommand implements NeptuneExportEventHandler, NeptuneExportEventHandlerHost {
@Inject
protected AwsCliModule awsCli = new AwsCliModule();
@Inject
private FeatureToggleModule featureToggleModule = new FeatureToggleModule();
@Inject
private ProfilesModule profilesModule = new ProfilesModule();
private boolean isCliInvocation = false;
private int maxFileDescriptorCount;
private NeptuneExportEventHandler eventHandler = NeptuneExportEventHandler.NULL_EVENT_HANDLER;
@Override
public void setEventHandler(NeptuneExportEventHandler eventHandler) {
this.eventHandler = eventHandler;
}
public void setIsCliInvocation(boolean isCliInvocation) {
this.isCliInvocation = isCliInvocation;
}
public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster, GraphSchema graphSchema) throws Exception {
eventHandler.onExportComplete(directories, stats, cluster, graphSchema);
}
public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster) throws Exception {
eventHandler.onExportComplete(directories, stats, cluster);
}
public void onError() {
eventHandler.onError();
}
void handleException(Throwable e) {
if (e.getCause() != null && RemoteConnectionException.class.isAssignableFrom(e.getCause().getClass())) {
e.printStackTrace();
System.err.println("An error occurred while connecting to Neptune. " +
"Ensure you have not disabled SSL if the database requires SSL in transit. " +
"Ensure you have specified the --use-iam-auth flag (and set the SERVICE_REGION environment variable if running in your own environment) if the database uses IAM database authentication. " +
"Ensure the database's VPC security group(s) allow access from the export tool.");
} else {
e.printStackTrace();
onError();
System.err.println("An error occurred while exporting from Neptune: " + e.getMessage());
}
if (isCliInvocation) {
System.exit(-1);
}
}
FeatureToggles featureToggles() {
return featureToggleModule.featureToggles();
}
public int getMaxFileDescriptorCount() {
return maxFileDescriptorCount;
}
public void setMaxFileDescriptorCount(int maxFileDescriptorCount) {
this.maxFileDescriptorCount = maxFileDescriptorCount;
}
}
| 914 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/ExportRdfGraph.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.*;
import com.amazonaws.services.neptune.cluster.Cluster;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.amazonaws.services.neptune.cluster.EventId;
import com.amazonaws.services.neptune.cluster.GetLastEventIdStrategy;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.amazonaws.services.neptune.rdf.NeptuneSparqlClient;
import com.amazonaws.services.neptune.rdf.ExportRdfJob;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.help.Examples;
import javax.inject.Inject;
@Examples(examples = {
"bin/neptune-export.sh export-rdf -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output "},
descriptions = {
"Export all data to the /home/ec2-user/output directory"
})
@Command(name = "export-rdf", description = "Export RDF graph from Neptune.")
public class ExportRdfGraph extends NeptuneExportCommand implements Runnable {
@Inject
private CloneClusterModule cloneStrategy = new CloneClusterModule();
@Inject
private CommonConnectionModule connection = new CommonConnectionModule(awsCli);
@Inject
private RdfTargetModule target = new RdfTargetModule();
@Inject
private RdfExportScopeModule exportScope = new RdfExportScopeModule();
@Inject
private NeptuneStreamsModule streams = new NeptuneStreamsModule();
@Override
public void run() {
try {
Timer.timedActivity(String.format("exporting rdf %s", exportScope.scope()), (CheckedActivity.Runnable) () -> {
try (Cluster cluster = cloneStrategy.cloneCluster(
connection.clusterMetadata(),
connection.config(),
new ConcurrencyConfig(1),
featureToggles())) {
Directories directories = target.createDirectories();
JsonResource<EventId, Object> eventIdFileResource = directories.lastEventIdFileResource();
GetLastEventIdStrategy getLastEventIdStrategy = streams.lastEventIdStrategy(cluster, eventIdFileResource);
getLastEventIdStrategy.saveLastEventId("sparql");
try (NeptuneSparqlClient client = NeptuneSparqlClient.create(cluster.connectionConfig(), featureToggles())) {
ExportRdfJob job = exportScope.createJob(client, target.config(directories));
job.execute();
}
directories.writeRootDirectoryPathAsReturnValue(target);
getLastEventIdStrategy.writeLastEventIdResourcePathAsMessage(target);
onExportComplete(directories, new ExportStats(), cluster);
}
});
} catch (Exception e) {
handleException(e);
}
}
}
| 915 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/RemoveClone.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.AwsCliModule;
import com.amazonaws.services.neptune.cluster.GetClusterIdFromCorrelationId;
import com.amazonaws.services.neptune.cluster.NeptuneClusterMetadata;
import com.amazonaws.services.neptune.cluster.RemoveCloneTask;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import com.github.rvesse.airline.annotations.restrictions.RequireOnlyOne;
import org.apache.commons.lang.StringUtils;
import javax.inject.Inject;
@Command(name = "remove-clone", description = "Remove a cloned Amazon Neptune database cluster.")
public class RemoveClone implements Runnable {
@Inject
private AwsCliModule awsCli = new AwsCliModule();
@Option(name = {"--clone-cluster-id"}, description = "Cluster ID of the cloned Amazon Neptune database cluster.")
@RequireOnlyOne(tag = "cloneClusterIdOrCorrelationId")
@Once
private String cloneClusterId;
@Option(name = {"--clone-cluster-correlation-id"}, description = "Value of the correlation-id tag on an Amazon Neptune cloned cluster that you want to remove.")
@RequireOnlyOne(tag = "cloneClusterIdOrCorrelationId")
@Once
private String correlationId;
@Override
public void run() {
if (StringUtils.isEmpty(cloneClusterId) && StringUtils.isNotEmpty(correlationId)) {
cloneClusterId = new GetClusterIdFromCorrelationId(correlationId, awsCli).execute();
if (StringUtils.isEmpty(cloneClusterId)) {
System.err.println(String.format("Unable to get a cloned Amazon Neptune database cluster ID for correlation ID %s", correlationId));
System.exit(0);
}
}
try {
new RemoveCloneTask(NeptuneClusterMetadata.createFromClusterId(cloneClusterId, awsCli)).execute();
} catch (Exception e) {
System.err.println("An error occurred while removing a cloned Amazon Neptune database cluster:");
e.printStackTrace();
System.exit(-1);
}
}
}
| 916 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/ExportPropertyGraph.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune;
import com.amazonaws.services.neptune.cli.*;
import com.amazonaws.services.neptune.cluster.Cluster;
import com.amazonaws.services.neptune.cluster.EventId;
import com.amazonaws.services.neptune.cluster.GetLastEventIdStrategy;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.NeptuneGremlinClient;
import com.amazonaws.services.neptune.propertygraph.io.ExportPropertyGraphJob;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.amazonaws.services.neptune.propertygraph.io.PropertyGraphTargetConfig;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import com.github.rvesse.airline.annotations.Command;
import com.github.rvesse.airline.annotations.help.Examples;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import javax.inject.Inject;
import java.util.Collection;
@Examples(examples = {
"bin/neptune-export.sh export-pg -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output",
"bin/neptune-export.sh export-pg -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output --format json",
"bin/neptune-export.sh export-pg -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output -s nodes",
"bin/neptune-export.sh export-pg -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output -nl User -el FOLLOWS",
"bin/neptune-export.sh export-pg -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output -cn 2",
"bin/neptune-export.sh export-pg -e neptunedbcluster-xxxxxxxxxxxx.cluster-yyyyyyyyyyyy.us-east-1.neptune.amazonaws.com -d /home/ec2-user/output -cn 2 -r 1000"
}, descriptions = {
"Export all data to the /home/ec2-user/output directory",
"Export all data to the /home/ec2-user/output directory as JSON",
"Export only nodes to the /home/ec2-user/output directory",
"Export only User nodes and FOLLOWS relationships",
"Parallel export using 2 threads",
"Parallel export using 2 threads, with each thread processing batches of 1000 nodes or edges"
})
@Command(name = "export-pg", description = "Export property graph from Neptune to CSV or JSON.")
public class ExportPropertyGraph extends NeptuneExportCommand implements Runnable {
@Inject
private CloneClusterModule cloneStrategy = new CloneClusterModule();
@Inject
private CommonConnectionModule connection = new CommonConnectionModule(awsCli);
@Inject
private PropertyGraphScopeModule scope = new PropertyGraphScopeModule();
@Inject
private PropertyGraphTargetModule target = new PropertyGraphTargetModule();
@Inject
private PropertyGraphConcurrencyModule concurrency = new PropertyGraphConcurrencyModule();
@Inject
private PropertyGraphSerializationModule serialization = new PropertyGraphSerializationModule();
@Inject
private PropertyGraphRangeModule range = new PropertyGraphRangeModule();
@Inject
private GraphSchemaProviderModule graphSchemaProvider = new GraphSchemaProviderModule(false);
@Inject
private PrinterOptionsModule printerOptions = new PrinterOptionsModule();
@Inject
private GremlinFiltersModule gremlinFilters = new GremlinFiltersModule();
@Inject
private NeptuneStreamsModule streams = new NeptuneStreamsModule();
@Override
public void run() {
try {
Timer.timedActivity("exporting property graph", (CheckedActivity.Runnable) () -> {
try (Cluster cluster = cloneStrategy.cloneCluster(
connection.clusterMetadata(),
connection.config(),
concurrency.config(),
featureToggles())) {
Directories directories = target.createDirectories();
JsonResource<GraphSchema, Boolean> configFileResource = directories.configFileResource();
JsonResource<EventId, Object> eventIdFileResource = directories.lastEventIdFileResource();
JsonResource<ExportStats, GraphSchema> statsFileResource = directories.statsFileResource();
GetLastEventIdStrategy getLastEventIdStrategy = streams.lastEventIdStrategy(cluster, eventIdFileResource);
getLastEventIdStrategy.saveLastEventId("gremlin");
GraphSchema graphSchema = graphSchemaProvider.graphSchema();
ExportStats stats = new ExportStats();
PropertyGraphTargetConfig targetConfig = target.config(directories, printerOptions.config());
Collection<ExportSpecification> exportSpecifications = scope.exportSpecifications(
graphSchema,
gremlinFilters.filters(),
stats,
featureToggles());
try (NeptuneGremlinClient client = NeptuneGremlinClient.create(cluster, serialization.config());
GraphTraversalSource g = client.newTraversalSource()) {
ExportPropertyGraphJob exportJob = new ExportPropertyGraphJob(
exportSpecifications,
graphSchema,
g,
range.config(),
gremlinFilters.filters(),
cluster.concurrencyConfig(),
targetConfig,
featureToggles(),
getMaxFileDescriptorCount()
);
graphSchema = Timer.timedActivity(
"export",
(CheckedActivity.Callable<GraphSchema>) exportJob::execute);
configFileResource.save(graphSchema, false);
statsFileResource.save(stats, graphSchema);
}
directories.writeRootDirectoryPathAsMessage(target.description(), target);
configFileResource.writeResourcePathAsMessage(target);
getLastEventIdStrategy.writeLastEventIdResourcePathAsMessage(target);
System.err.println();
System.err.println(stats.formatStats(graphSchema));
directories.writeRootDirectoryPathAsReturnValue(target);
onExportComplete(directories, stats, cluster, graphSchema);
}
});
} catch (Exception e) {
handleException(e);
}
}
}
| 917 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/SimulatedCloneCluster.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
public class SimulatedCloneCluster implements CloneClusterStrategy {
private final NeptuneClusterMetadata clusterMetadata;
public SimulatedCloneCluster(NeptuneClusterMetadata clusterMetadata) {
this.clusterMetadata = clusterMetadata;
}
@Override
public Cluster cloneCluster(ConnectionConfig connectionConfig, ConcurrencyConfig concurrencyConfig) throws Exception {
System.err.println("Simulating creating cloned cluster (original cluster will be used)...");
return new Cluster() {
@Override
public ConnectionConfig connectionConfig() {
return connectionConfig;
}
@Override
public ConcurrencyConfig concurrencyConfig() {
return concurrencyConfig;
}
@Override
public NeptuneClusterMetadata clusterMetadata() {
return clusterMetadata;
}
@Override
public void close() throws Exception {
System.err.println("Simulating deleting cloned cluster (original cluster will not be deleted)...");
}
};
}
}
| 918 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/SimpleResponseHandler.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.http.HttpResponseHandler;
import java.io.InputStream;
import java.util.Scanner;
class SimpleResponseHandler implements HttpResponseHandler<HttpResponse> {
@Override
public HttpResponse handle(com.amazonaws.http.HttpResponse response) {
int status = response.getStatusCode();
String content = null;
InputStream stream = response.getContent();
if (stream != null){
Scanner s = new Scanner(stream).useDelimiter("\\A");
content = s.hasNext() ? s.next() : "";
}
if (status < 200 || status >= 300) {
AmazonServiceException ase = new AmazonServiceException(content);
ase.setStatusCode(status);
throw ase;
}
String contentType = response.getHeaderValues("content-type").get(0);
return new HttpResponse(status, content, contentType);
}
@Override
public boolean needsConnectionLeftOpen() {
return false;
}
}
| 919 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/ProxyConfig.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
public class ProxyConfig
{
private final String endpoint;
private final int port;
private final boolean removeHostHeader;
public ProxyConfig(String endpoint, int port, boolean removeHostHeader) {
this.endpoint = endpoint;
this.port = port;
this.removeHostHeader = removeHostHeader;
}
public String endpoint() {
return endpoint;
}
public int port() {
return port;
}
public boolean removeHostHeader() {
return removeHostHeader;
}
@Override
public String toString() {
return "ProxyConfig{" +
"endpoint='" + endpoint + '\'' +
", port=" + port +
", removeHostHeader=" + removeHostHeader +
'}';
}
}
| 920 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/CloneCluster.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.AmazonNeptune;
import java.util.UUID;
import java.util.function.Supplier;
public class CloneCluster implements CloneClusterStrategy {
private final NeptuneClusterMetadata originalClusterMetadata;
private final String cloneClusterInstanceType;
private final int replicaCount;
private final int maxConcurrency;
private final String engineVersion;
private final String cloneCorrelationId;
public CloneCluster(NeptuneClusterMetadata originalClusterMetadata,
String cloneClusterInstanceType,
int replicaCount,
int maxConcurrency,
String engineVersion,
String cloneCorrelationId) {
this.originalClusterMetadata = originalClusterMetadata;
this.cloneClusterInstanceType = cloneClusterInstanceType;
this.replicaCount = replicaCount;
this.maxConcurrency = maxConcurrency;
this.engineVersion = engineVersion;
this.cloneCorrelationId = cloneCorrelationId;
}
@Override
public Cluster cloneCluster(ConnectionConfig connectionConfig, ConcurrencyConfig concurrencyConfig) throws Exception {
if (!connectionConfig.isDirectConnection()) {
throw new IllegalStateException("neptune-export does not support cloning a Neptune cluster accessed via a load balancer");
}
String clusterId = originalClusterMetadata.clusterId();
String targetClusterId = String.format("neptune-export-cluster-%s", UUID.randomUUID().toString().substring(0, 5));
AddCloneTask addCloneTask = new AddCloneTask(
clusterId,
targetClusterId,
cloneClusterInstanceType,
replicaCount,
engineVersion,
originalClusterMetadata.clientSupplier(),
cloneCorrelationId);
NeptuneClusterMetadata targetClusterMetadata = addCloneTask.execute();
InstanceType instanceType = InstanceType.parse(
targetClusterMetadata.instanceMetadataFor(targetClusterMetadata.primary()).instanceType());
int targetConcurrency = instanceType.concurrency() * (1 + replicaCount);
int newConcurrency = maxConcurrency > 0 ?
Math.min(maxConcurrency, targetConcurrency) :
targetConcurrency;
System.err.println();
System.err.println(String.format("Endpoints : %s", String.join(", ", targetClusterMetadata.endpoints())));
System.err.println(String.format("Max concurrency : %s", maxConcurrency));
System.err.println(String.format("Concurrency : %s", newConcurrency));
return new ClonedCluster(
new ConnectionConfig(
targetClusterId,
targetClusterMetadata.endpoints(),
connectionConfig.port(),
targetClusterMetadata.isIAMDatabaseAuthenticationEnabled(),
true,
connectionConfig.proxyConfig(),
connectionConfig.getCredentialsProvider()
),
new ConcurrencyConfig(newConcurrency),
targetClusterMetadata
);
}
private static class ClonedCluster implements Cluster {
private final ConnectionConfig connectionConfig;
private final ConcurrencyConfig concurrencyConfig;
private final NeptuneClusterMetadata clusterMetadata;
private ClonedCluster(ConnectionConfig connectionConfig,
ConcurrencyConfig concurrencyConfig,
NeptuneClusterMetadata clusterMetadata) {
this.connectionConfig = connectionConfig;
this.concurrencyConfig = concurrencyConfig;
this.clusterMetadata = clusterMetadata;
}
@Override
public ConnectionConfig connectionConfig() {
return connectionConfig;
}
@Override
public ConcurrencyConfig concurrencyConfig() {
return concurrencyConfig;
}
@Override
public NeptuneClusterMetadata clusterMetadata() {
return clusterMetadata;
}
@Override
public void close() throws Exception {
RemoveCloneTask removeCloneTask = new RemoveCloneTask(clusterMetadata);
removeCloneTask.execute();
}
}
}
| 921 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/DoNotGetLastEventIdTask.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.io.CommandWriter;
import java.io.IOException;
public class DoNotGetLastEventIdTask implements GetLastEventIdStrategy {
@Override
public void saveLastEventId(String streamEndpointType) throws IOException {
// Do nothing
}
@Override
public void writeLastEventIdResourcePathAsMessage(CommandWriter writer) {
// Do nothing
}
}
| 922 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/GetLastEventId.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.regions.DefaultAwsRegionProviderChain;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.function.ToIntFunction;
import java.util.stream.Collectors;
public class GetLastEventId {
public static final String MaxCommitNumValueForEngine(String engineVersion){
List<Integer> parts = Arrays.stream(engineVersion.split("\\."))
.mapToInt(Integer::valueOf)
.boxed()
.collect(Collectors.toList());
if (parts.get(1) == 0){
if (parts.get(2) < 4){
return String.valueOf(Integer.MAX_VALUE);
} else if (parts.get(2) == 4 && parts.get(3) < 2){
return String.valueOf(Integer.MAX_VALUE);
} else {
return String.valueOf(Long.MAX_VALUE);
}
} else {
return String.valueOf(Long.MAX_VALUE);
}
}
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(GetLastEventId.class);
private final NeptuneClusterMetadata clusterMetadata;
private final ConnectionConfig connectionConfig;
private final String streamEndpointType;
public GetLastEventId(NeptuneClusterMetadata clusterMetadata, ConnectionConfig connectionConfig, String streamEndpointType) {
this.clusterMetadata = clusterMetadata;
this.connectionConfig = connectionConfig;
this.streamEndpointType = streamEndpointType;
}
public EventId execute() {
if (!clusterMetadata.isStreamEnabled()){
return null;
}
String endpoint = connectionConfig.endpoints().iterator().next();
String streamsEndpoint = String.format("https://%s:%s/%s/stream", endpoint, connectionConfig.port(), streamEndpointType);
logger.info("Streams endpoint: {}", streamsEndpoint);
try {
String region = new DefaultAwsRegionProviderChain().getRegion();
NeptuneHttpsClient neptuneHttpsClient = new NeptuneHttpsClient(streamsEndpoint, region, endpoint.equals("localhost"));
Map<String, String> params = new HashMap<>();
params.put("commitNum", MaxCommitNumValueForEngine(clusterMetadata.engineVersion()));
params.put("limit", "1");
HttpResponse httpResponse = neptuneHttpsClient.get(params);
logger.info(httpResponse.getContent());
return null;
} catch (AmazonServiceException e) {
if (e.getErrorCode().equals("StreamRecordsNotFoundException")) {
EventId lastEventId = StreamRecordsNotFoundExceptionParser.parseLastEventId(e.getErrorMessage());
logger.info("LastEventId: {}", lastEventId);
return lastEventId;
} else {
logger.error("Error while accessing Neptune Streams endpoint", e);
return null;
}
} catch (Exception e) {
logger.error("Error while accessing Neptune Streams endpoint", e);
return null;
}
}
}
| 923 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/GetLastEventIdStrategy.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.io.CommandWriter;
import java.io.IOException;
public interface GetLastEventIdStrategy {
void saveLastEventId(String streamEndpointType) throws IOException;
void writeLastEventIdResourcePathAsMessage(CommandWriter writer);
}
| 924 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/SimpleErrorResponseHandler.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.http.HttpResponse;
import com.amazonaws.http.HttpResponseHandler;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.InputStream;
import java.util.Scanner;
public class SimpleErrorResponseHandler implements HttpResponseHandler<AmazonServiceException> {
private static final ObjectMapper MAPPER = new ObjectMapper();
@Override
public AmazonServiceException handle(HttpResponse response) throws Exception {
String content = null;
InputStream stream = response.getContent();
if (stream != null) {
Scanner s = new Scanner(stream).useDelimiter("\\A");
content = s.hasNext() ? s.next() : "";
}
AmazonServiceException.ErrorType errorType = AmazonServiceException.ErrorType.Unknown;
if (response.getStatusCode() >= 500){
errorType = AmazonServiceException.ErrorType.Service;
} else if (response.getStatusCode() >= 400){
errorType = AmazonServiceException.ErrorType.Client;
}
String errorCode = "UnknownError";
String message = response.getStatusText();
String requestId = "";
if (content != null){
JsonNode json = MAPPER.readTree(content);
if (json.has("requestId")){
requestId = json.path("requestId").textValue();
}
if (json.has("code")){
errorCode = json.path("code").textValue();
}
if (json.has("detailedMessage")){
message = json.path("detailedMessage").textValue();
}
}
AmazonServiceException exception = new AmazonServiceException(message);
exception.setStatusCode(response.getStatusCode());
exception.setRequestId(requestId);
exception.setErrorType(errorType);
exception.setErrorCode(errorCode);
return exception;
}
@Override
public boolean needsConnectionLeftOpen() {
return false;
}
}
| 925 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/RemoveCloneTask.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.AmazonNeptune;
import com.amazonaws.services.neptune.model.*;
import com.amazonaws.services.neptune.util.Activity;
import com.amazonaws.services.neptune.util.Timer;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
public class RemoveCloneTask {
private final NeptuneClusterMetadata clusterMetadata;
public RemoveCloneTask(NeptuneClusterMetadata clusterMetadata) {
this.clusterMetadata = clusterMetadata;
}
public void execute() {
AmazonNeptune neptune = clusterMetadata.clientSupplier().get();
try {
Timer.timedActivity("deleting cloned cluster", false,
(Activity.Runnable) () -> deleteCluster(neptune));
} finally {
if (neptune != null) {
neptune.shutdown();
}
}
}
private void deleteCluster(AmazonNeptune neptuneClient) {
System.err.println();
System.err.println("Deleting cloned cluster " + clusterMetadata.clusterId() + "...");
if (!clusterMetadata.isTaggedWithNeptuneExport()) {
throw new IllegalStateException("Cluster must have an 'application' tag with the value '" +
NeptuneClusterMetadata.NEPTUNE_EXPORT_APPLICATION_TAG + "' before it can be deleted");
}
ExecutorService taskExecutor = Executors.newFixedThreadPool(1 + clusterMetadata.replicas().size());
taskExecutor.execute(() -> deleteInstance(neptuneClient, clusterMetadata.primary()));
for (String replicaId : clusterMetadata.replicas()) {
taskExecutor.execute(() -> deleteInstance(neptuneClient, replicaId));
}
taskExecutor.shutdown();
try {
taskExecutor.awaitTermination(30, TimeUnit.MINUTES);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
System.err.println("Deleting cluster...");
neptuneClient.deleteDBCluster(new DeleteDBClusterRequest()
.withDBClusterIdentifier(clusterMetadata.clusterId())
.withSkipFinalSnapshot(true));
try {
boolean clusterIsBeingDeleted = neptuneClient.describeDBClusters(
new DescribeDBClustersRequest().withDBClusterIdentifier(clusterMetadata.clusterId()))
.getDBClusters()
.size() > 0;
while (clusterIsBeingDeleted) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
clusterIsBeingDeleted = neptuneClient.describeDBClusters(
new DescribeDBClustersRequest().withDBClusterIdentifier(clusterMetadata.clusterId()))
.getDBClusters()
.size() > 0;
}
} catch (DBClusterNotFoundException e) {
// Do nothing
}
System.err.println("Deleting parameter groups...");
neptuneClient.deleteDBClusterParameterGroup(new DeleteDBClusterParameterGroupRequest()
.withDBClusterParameterGroupName(clusterMetadata.dbClusterParameterGroupName()));
neptuneClient.deleteDBParameterGroup(new DeleteDBParameterGroupRequest()
.withDBParameterGroupName(
clusterMetadata.instanceMetadataFor(clusterMetadata.primary()).dbParameterGroupName()));
}
private void deleteInstance(AmazonNeptune neptune, String instanceId) {
System.err.println("Deleting instance " + instanceId + "...");
neptune.deleteDBInstance(new DeleteDBInstanceRequest()
.withDBInstanceIdentifier(instanceId)
.withSkipFinalSnapshot(true));
try {
boolean instanceIsBeingDeleted = neptune.describeDBInstances(
new DescribeDBInstancesRequest().withDBInstanceIdentifier(instanceId))
.getDBInstances()
.size() > 0;
while (instanceIsBeingDeleted) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
instanceIsBeingDeleted = neptune.describeDBInstances(
new DescribeDBInstancesRequest().withDBInstanceIdentifier(instanceId))
.getDBInstances()
.size() > 0;
}
} catch (DBInstanceNotFoundException e) {
// Do nothing
}
}
}
| 926 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/AddCloneTask.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.AmazonNeptune;
import com.amazonaws.services.neptune.model.*;
import com.amazonaws.services.neptune.util.Activity;
import com.amazonaws.services.neptune.util.EnvironmentVariableUtils;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
public class AddCloneTask {
private final String sourceClusterId;
private final String targetClusterId;
private final String cloneClusterInstanceType;
private final int replicaCount;
private final String engineVersion;
private final Supplier<AmazonNeptune> amazonNeptuneClientSupplier;
private final String cloneCorrelationId;
public AddCloneTask(String sourceClusterId,
String targetClusterId,
String cloneClusterInstanceType,
int replicaCount,
String engineVersion,
Supplier<AmazonNeptune> amazonNeptuneClientSupplier,
String cloneCorrelationId) {
this.sourceClusterId = sourceClusterId;
this.targetClusterId = targetClusterId;
this.cloneClusterInstanceType = cloneClusterInstanceType;
this.replicaCount = replicaCount;
this.engineVersion = engineVersion;
this.amazonNeptuneClientSupplier = amazonNeptuneClientSupplier;
this.cloneCorrelationId = cloneCorrelationId;
}
public NeptuneClusterMetadata execute() {
return Timer.timedActivity(
"cloning cluster",
(Activity.Callable<NeptuneClusterMetadata>) this::cloneCluster);
}
private NeptuneClusterMetadata cloneCluster() {
System.err.println("Cloning cluster " + sourceClusterId + "...");
System.err.println();
NeptuneClusterMetadata sourceClusterMetadata =
NeptuneClusterMetadata.createFromClusterId(sourceClusterId, amazonNeptuneClientSupplier);
InstanceType instanceType = StringUtils.isEmpty(cloneClusterInstanceType) ?
InstanceType.parse(sourceClusterMetadata.instanceMetadataFor(sourceClusterMetadata.primary()).instanceType()) :
InstanceType.parse(cloneClusterInstanceType);
System.err.println(String.format("Source clusterId : %s", sourceClusterId));
System.err.println(String.format("Target clusterId : %s", targetClusterId));
System.err.println(String.format("Target instance type : %s", instanceType));
AmazonNeptune neptune = amazonNeptuneClientSupplier.get();
DBClusterParameterGroup dbClusterParameterGroup = Timer.timedActivity(
"creating DB cluster parameter group",
(Activity.Callable<DBClusterParameterGroup>) () ->
createDbClusterParameterGroup(sourceClusterMetadata, neptune));
DBParameterGroup dbParameterGroup = Timer.timedActivity(
"creating parameter groups",
(Activity.Callable<DBParameterGroup>) () -> createDbParameterGroup(sourceClusterMetadata, neptune));
DBCluster targetDbCluster = Timer.timedActivity(
"creating target cluster",
(Activity.Callable<DBCluster>) () ->
createCluster(sourceClusterMetadata, neptune, dbClusterParameterGroup));
Timer.timedActivity("creating primary", (Activity.Runnable) () ->
createInstance("primary",
neptune,
sourceClusterMetadata,
instanceType,
dbParameterGroup,
targetDbCluster));
if (replicaCount > 0) {
Timer.timedActivity("creating replicas", (Activity.Runnable) () ->
createReplicas(sourceClusterMetadata, instanceType, neptune, dbParameterGroup, targetDbCluster));
}
neptune.shutdown();
return NeptuneClusterMetadata.createFromClusterId(targetClusterId, amazonNeptuneClientSupplier);
}
private void createReplicas(NeptuneClusterMetadata sourceClusterMetadata,
InstanceType instanceType,
AmazonNeptune neptune,
DBParameterGroup dbParameterGroup,
DBCluster targetDbCluster) {
ExecutorService taskExecutor = Executors.newFixedThreadPool(replicaCount);
for (int i = 0; i < replicaCount; i++) {
taskExecutor.execute(() -> createInstance("replica",
neptune,
sourceClusterMetadata,
instanceType,
dbParameterGroup,
targetDbCluster));
}
taskExecutor.shutdown();
try {
taskExecutor.awaitTermination(30, TimeUnit.MINUTES);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
private DBCluster createCluster(NeptuneClusterMetadata sourceClusterMetadata,
AmazonNeptune neptune,
DBClusterParameterGroup
dbClusterParameterGroup) {
System.err.println("Creating target cluster...");
RestoreDBClusterToPointInTimeRequest cloneClusterRequest = new RestoreDBClusterToPointInTimeRequest()
.withSourceDBClusterIdentifier(sourceClusterId)
.withDBClusterIdentifier(targetClusterId)
.withRestoreType("copy-on-write")
.withUseLatestRestorableTime(true)
.withPort(sourceClusterMetadata.port())
.withDBClusterParameterGroupName(dbClusterParameterGroup.getDBClusterParameterGroupName())
.withEnableIAMDatabaseAuthentication(sourceClusterMetadata.isIAMDatabaseAuthenticationEnabled())
.withDBSubnetGroupName(sourceClusterMetadata.dbSubnetGroupName())
.withVpcSecurityGroupIds(sourceClusterMetadata.vpcSecurityGroupIds())
.withTags(getTags(sourceClusterMetadata.clusterId()));
DBCluster targetDbCluster = neptune.restoreDBClusterToPointInTime(cloneClusterRequest);
String clusterStatus = targetDbCluster.getStatus();
while (clusterStatus.equals("creating")) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
clusterStatus = neptune.describeDBClusters(
new DescribeDBClustersRequest()
.withDBClusterIdentifier(targetDbCluster.getDBClusterIdentifier()))
.getDBClusters()
.get(0)
.getStatus();
}
return targetDbCluster;
}
private Collection<Tag> getTags(String sourceClusterId) {
Collection<Tag> tags = new ArrayList<>();
tags.add(new Tag()
.withKey("source")
.withValue(sourceClusterId));
tags.add(new Tag()
.withKey("application")
.withValue(NeptuneClusterMetadata.NEPTUNE_EXPORT_APPLICATION_TAG));
if (StringUtils.isNotEmpty(cloneCorrelationId)) {
tags.add(new Tag()
.withKey(NeptuneClusterMetadata.NEPTUNE_EXPORT_CORRELATION_ID_KEY)
.withValue(cloneCorrelationId));
}
return tags;
}
private DBParameterGroup createDbParameterGroup(NeptuneClusterMetadata sourceClusterMetadata,
AmazonNeptune neptune) {
DBParameterGroup dbParameterGroup;
dbParameterGroup = neptune.createDBParameterGroup(
new CreateDBParameterGroupRequest()
.withDBParameterGroupName(String.format("%s-db-params", targetClusterId))
.withDescription(String.format("%s DB Parameter Group", targetClusterId))
.withDBParameterGroupFamily(sourceClusterMetadata.dbParameterGroupFamily())
.withTags(getTags(sourceClusterMetadata.clusterId())));
neptune.modifyDBParameterGroup(new ModifyDBParameterGroupRequest()
.withDBParameterGroupName(dbParameterGroup.getDBParameterGroupName())
.withParameters(
new Parameter()
.withParameterName("neptune_query_timeout")
.withParameterValue("2147483647")
.withApplyMethod(ApplyMethod.PendingReboot)));
List<Parameter> dbParameters = neptune.describeDBParameters(
new DescribeDBParametersRequest()
.withDBParameterGroupName(dbParameterGroup.getDBParameterGroupName()))
.getParameters();
while (dbParameters.stream().noneMatch(parameter ->
parameter.getParameterName().equals("neptune_query_timeout") &&
parameter.getParameterValue().equals("2147483647"))) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
dbParameters = neptune.describeDBClusterParameters(
new DescribeDBClusterParametersRequest()
.withDBClusterParameterGroupName(dbParameterGroup.getDBParameterGroupName()))
.getParameters();
}
System.err.println(String.format("DB parameter group : %s", dbParameterGroup.getDBParameterGroupName()));
System.err.println();
return dbParameterGroup;
}
private DBClusterParameterGroup createDbClusterParameterGroup(NeptuneClusterMetadata sourceClusterMetadata,
AmazonNeptune neptune) {
DBClusterParameterGroup dbClusterParameterGroup;
dbClusterParameterGroup = neptune.createDBClusterParameterGroup(
new CreateDBClusterParameterGroupRequest()
.withDBClusterParameterGroupName(String.format("%s-db-cluster-params", targetClusterId))
.withDescription(String.format("%s DB Cluster Parameter Group", targetClusterId))
.withDBParameterGroupFamily(sourceClusterMetadata.dbParameterGroupFamily())
.withTags(getTags(sourceClusterMetadata.clusterId())));
String neptuneStreamsParameterValue = sourceClusterMetadata.isStreamEnabled() ? "1" : "0";
try {
neptune.modifyDBClusterParameterGroup(new ModifyDBClusterParameterGroupRequest()
.withDBClusterParameterGroupName(dbClusterParameterGroup.getDBClusterParameterGroupName())
.withParameters(
new Parameter()
.withParameterName("neptune_enforce_ssl")
.withParameterValue("1")
.withApplyMethod(ApplyMethod.PendingReboot),
new Parameter()
.withParameterName("neptune_query_timeout")
.withParameterValue("2147483647")
.withApplyMethod(ApplyMethod.PendingReboot),
new Parameter()
.withParameterName("neptune_streams")
.withParameterValue(neptuneStreamsParameterValue)
.withApplyMethod(ApplyMethod.PendingReboot)));
} catch (AmazonNeptuneException e) {
neptune.modifyDBClusterParameterGroup(new ModifyDBClusterParameterGroupRequest()
.withDBClusterParameterGroupName(dbClusterParameterGroup.getDBClusterParameterGroupName())
.withParameters(
new Parameter()
.withParameterName("neptune_query_timeout")
.withParameterValue("2147483647")
.withApplyMethod(ApplyMethod.PendingReboot),
new Parameter()
.withParameterName("neptune_streams")
.withParameterValue(neptuneStreamsParameterValue)
.withApplyMethod(ApplyMethod.PendingReboot)));
}
List<Parameter> dbClusterParameters = neptune.describeDBClusterParameters(
new DescribeDBClusterParametersRequest()
.withDBClusterParameterGroupName(dbClusterParameterGroup.getDBClusterParameterGroupName()))
.getParameters();
while (dbClusterParameters.stream().noneMatch(parameter ->
parameter.getParameterName().equals("neptune_query_timeout") &&
parameter.getParameterValue().equals("2147483647"))) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
dbClusterParameters = neptune.describeDBClusterParameters(
new DescribeDBClusterParametersRequest()
.withDBClusterParameterGroupName(dbClusterParameterGroup.getDBClusterParameterGroupName()))
.getParameters();
}
System.err.println(String.format("DB cluster parameter group : %s", dbClusterParameterGroup.getDBClusterParameterGroupName()));
return dbClusterParameterGroup;
}
private void createInstance(String name,
AmazonNeptune neptune,
NeptuneClusterMetadata sourceClusterMetadata,
InstanceType instanceType,
DBParameterGroup dbParameterGroup,
DBCluster targetDbCluster) {
System.err.println("Creating target " + name + " instance...");
CreateDBInstanceRequest request = new CreateDBInstanceRequest()
.withDBInstanceClass(instanceType.value())
.withDBInstanceIdentifier(String.format("neptune-export-%s-%s", name, UUID.randomUUID().toString().substring(0, 5)))
.withDBClusterIdentifier(targetDbCluster.getDBClusterIdentifier())
.withDBParameterGroupName(dbParameterGroup.getDBParameterGroupName())
.withEngine("neptune")
.withTags(getTags(sourceClusterMetadata.clusterId()));
if (StringUtils.isNotEmpty(engineVersion)) {
request = request.withEngineVersion(engineVersion);
}
DBInstance targetDbInstance = neptune.createDBInstance(request);
String instanceStatus = targetDbInstance.getDBInstanceStatus();
while (instanceStatus.equals("creating")) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
instanceStatus = neptune.describeDBInstances(new DescribeDBInstancesRequest()
.withDBInstanceIdentifier(targetDbInstance.getDBInstanceIdentifier()))
.getDBInstances()
.get(0)
.getDBInstanceStatus();
}
}
}
| 927 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/CloneClusterStrategy.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
public interface CloneClusterStrategy {
Cluster cloneCluster(ConnectionConfig connectionConfig,
ConcurrencyConfig concurrencyConfig) throws Exception;
}
| 928 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/Cluster.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.AmazonNeptune;
import java.util.function.Supplier;
public interface Cluster extends AutoCloseable {
ConnectionConfig connectionConfig();
ConcurrencyConfig concurrencyConfig();
NeptuneClusterMetadata clusterMetadata();
}
| 929 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/GetLastEventIdTask.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.io.CommandWriter;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicReference;
public class GetLastEventIdTask implements GetLastEventIdStrategy{
private final Cluster cluster;
private final JsonResource<EventId, Object> lastEventIdResource;
private final AtomicReference<EventId> lastEventId = new AtomicReference<>();
public GetLastEventIdTask(Cluster cluster, JsonResource<EventId, Object> lastEventIdResource) {
this.cluster = cluster;
this.lastEventIdResource = lastEventIdResource;
}
@Override
public void saveLastEventId(String streamEndpointType) throws IOException {
EventId eventId = new GetLastEventId(
cluster.clusterMetadata(),
cluster.connectionConfig(),
streamEndpointType).execute();
if (eventId != null){
lastEventId.set(eventId);
lastEventIdResource.save(eventId, null);
}
}
@Override
public void writeLastEventIdResourcePathAsMessage(CommandWriter writer) {
if (lastEventId.get() != null){
lastEventIdResource.writeResourcePathAsMessage(writer);
}
}
}
| 930 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/GetClusterIdFromCorrelationId.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.AmazonNeptune;
import com.amazonaws.services.neptune.model.*;
import com.amazonaws.services.neptune.util.Activity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.commons.lang.StringUtils;
import java.util.List;
import java.util.function.Supplier;
public class GetClusterIdFromCorrelationId {
private final String correlationId;
private final Supplier<AmazonNeptune> amazonNeptuneClientSupplier;
public GetClusterIdFromCorrelationId(String correlationId, Supplier<AmazonNeptune> amazonNeptuneClientSupplier) {
this.correlationId = correlationId;
this.amazonNeptuneClientSupplier = amazonNeptuneClientSupplier;
}
public String execute() {
AmazonNeptune neptune = amazonNeptuneClientSupplier.get();
try {
return Timer.timedActivity("getting cluster ID from correlation ID", false,
(Activity.Callable<String>) () -> getClusterId(neptune));
} finally {
if (neptune != null) {
neptune.shutdown();
}
}
}
private String getClusterId(AmazonNeptune neptune) {
DescribeDBClustersResult describeDBClustersResult = neptune.describeDBClusters(new DescribeDBClustersRequest());
for (DBCluster dbCluster : describeDBClustersResult.getDBClusters()) {
String clusterCorrelationId = getCorrelationId(dbCluster.getDBClusterArn(), neptune);
if (StringUtils.isNotEmpty(clusterCorrelationId) && clusterCorrelationId.equals(correlationId)) {
String clusterId = dbCluster.getDBClusterIdentifier();
System.err.println(String.format("Found cluster ID %s for correlation ID %s", clusterId, correlationId));
return clusterId;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Do nothing
}
}
System.err.println(String.format("Unable to find cluster ID for correlation ID %s", correlationId));
return null;
}
private String getCorrelationId(String dbClusterArn, AmazonNeptune neptune) {
List<Tag> tagList = neptune.listTagsForResource(
new ListTagsForResourceRequest()
.withResourceName(dbClusterArn)).getTagList();
for (Tag tag : tagList) {
if (tag.getKey().equalsIgnoreCase(NeptuneClusterMetadata.NEPTUNE_EXPORT_CORRELATION_ID_KEY)) {
return tag.getValue();
}
}
return null;
}
}
| 931 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/DoNotCloneCluster.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
public class DoNotCloneCluster implements CloneClusterStrategy {
private final NeptuneClusterMetadata clusterMetadata;
public DoNotCloneCluster(NeptuneClusterMetadata clusterMetadata) {
this.clusterMetadata = clusterMetadata;
}
@Override
public Cluster cloneCluster(ConnectionConfig connectionConfig, ConcurrencyConfig concurrencyConfig) throws Exception {
return new Cluster() {
@Override
public ConnectionConfig connectionConfig() {
return connectionConfig;
}
@Override
public ConcurrencyConfig concurrencyConfig() {
return concurrencyConfig;
}
@Override
public NeptuneClusterMetadata clusterMetadata() {
return clusterMetadata;
}
@Override
public void close() throws Exception {
//Do nothing
}
};
}
}
| 932 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/NeptuneHttpsClient.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.*;
import com.amazonaws.auth.AWS4Signer;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.http.AmazonHttpClient;
import com.amazonaws.http.ExecutionContext;
import com.amazonaws.http.HttpMethodName;
import java.net.URI;
import java.util.Map;
public class NeptuneHttpsClient {
private final AWSCredentialsProvider awsCredentialsProvider = DefaultAWSCredentialsProviderChain.getInstance();
private final AWS4Signer signer;
private final String uri;
private final boolean disableCertCheck;
public NeptuneHttpsClient(String uri, String region, boolean disableCertCheck) {
this.uri = uri;
this.disableCertCheck = disableCertCheck;
signer = new AWS4Signer();
signer.setRegionName(region);
signer.setServiceName("neptune-db");
}
public HttpResponse get(Map<String, String> queryStringParams) {
Request<Void> request = new DefaultRequest<>(signer.getServiceName());
request.setEndpoint(URI.create(uri));
request.setHttpMethod(HttpMethodName.GET);
for (Map.Entry<String, String> entry : queryStringParams.entrySet()) {
request.addParameter(entry.getKey(), entry.getValue());
}
signer.sign(request, awsCredentialsProvider.getCredentials());
if (disableCertCheck){
System.setProperty(SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY, "true");
}
Response<HttpResponse> response = new AmazonHttpClient(new ClientConfiguration())
.requestExecutionBuilder()
.executionContext(new ExecutionContext(false))
.request(request)
.errorResponseHandler(new SimpleErrorResponseHandler())
.execute(new SimpleResponseHandler());
return response.getAwsResponse();
}
}
| 933 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/NeptuneClusterMetadata.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.AmazonNeptune;
import com.amazonaws.services.neptune.export.EndpointValidator;
import com.amazonaws.services.neptune.model.*;
import org.apache.commons.lang.StringUtils;
import java.util.*;
import java.util.function.Supplier;
import java.util.stream.Collectors;
public class NeptuneClusterMetadata {
public static final String NEPTUNE_EXPORT_APPLICATION_TAG = "neptune-export";
public static final String NEPTUNE_EXPORT_CORRELATION_ID_KEY = "correlation-id";
public static String clusterIdFromEndpoint(String endpoint) {
int index = endpoint.indexOf(".");
if (index < 0) {
throw new IllegalArgumentException(String.format("Unable to identify cluster ID from endpoint '%s'. Use the clusterId export parameter instead.", endpoint));
}
return endpoint.substring(0, index);
}
public static NeptuneClusterMetadata createFromEndpoints(Collection<String> endpoints, Supplier<AmazonNeptune> amazonNeptuneClientSupplier) {
AmazonNeptune neptune = amazonNeptuneClientSupplier.get();
String paginationToken = null;
do {
DescribeDBClustersResult describeDBClustersResult = neptune
.describeDBClusters(new DescribeDBClustersRequest()
.withMarker(paginationToken)
.withFilters(new Filter().withName("engine").withValues("neptune")));
paginationToken = describeDBClustersResult.getMarker();
for (DBCluster dbCluster : describeDBClustersResult.getDBClusters()) {
for (String endpoint : endpoints) {
String endpointValue = getEndpointValue(endpoint);
if (endpointValue.equals(getEndpointValue(dbCluster.getEndpoint()))){
return createFromClusterId(dbCluster.getDBClusterIdentifier(), amazonNeptuneClientSupplier);
} else if (endpointValue.equals(getEndpointValue(dbCluster.getReaderEndpoint()))){
return createFromClusterId(dbCluster.getDBClusterIdentifier(), amazonNeptuneClientSupplier);
}
}
}
} while (paginationToken != null);
paginationToken = null;
do {
DescribeDBInstancesResult describeDBInstancesResult = neptune.describeDBInstances(
new DescribeDBInstancesRequest()
.withMarker(paginationToken)
.withFilters(new Filter().withName("engine").withValues("neptune")));
paginationToken = describeDBInstancesResult.getMarker();
for (DBInstance dbInstance : describeDBInstancesResult.getDBInstances()) {
for (String endpoint : endpoints) {
String endpointValue = getEndpointValue(endpoint);
if (endpointValue.equals(getEndpointValue(dbInstance.getEndpoint().getAddress()))){
return createFromClusterId(dbInstance.getDBClusterIdentifier(), amazonNeptuneClientSupplier);
}
}
}
} while (paginationToken != null);
throw new IllegalStateException(String.format("Unable to identify cluster ID from endpoints: %s", endpoints));
}
private static String getEndpointValue(String endpoint) {
return EndpointValidator.validate(endpoint).toLowerCase();
}
public static NeptuneClusterMetadata createFromClusterId(String clusterId, Supplier<AmazonNeptune> amazonNeptuneClientSupplier) {
AmazonNeptune neptune = amazonNeptuneClientSupplier.get();
DescribeDBClustersResult describeDBClustersResult = neptune
.describeDBClusters(new DescribeDBClustersRequest().withDBClusterIdentifier(clusterId));
if (describeDBClustersResult.getDBClusters().isEmpty()) {
throw new IllegalArgumentException(String.format("Unable to find cluster %s", clusterId));
}
DBCluster dbCluster = describeDBClustersResult.getDBClusters().get(0);
List<Tag> tags = neptune.listTagsForResource(
new ListTagsForResourceRequest()
.withResourceName(dbCluster.getDBClusterArn())).getTagList();
Map<String, String> clusterTags = new HashMap<>();
tags.forEach(t -> clusterTags.put(t.getKey(), t.getValue()));
boolean isIAMDatabaseAuthenticationEnabled = dbCluster.isIAMDatabaseAuthenticationEnabled();
Integer port = dbCluster.getPort();
String dbClusterParameterGroup = dbCluster.getDBClusterParameterGroup();
String engineVersion = dbCluster.getEngineVersion();
String dbParameterGroupFamily;
try {
DescribeDBClusterParameterGroupsResult describeDBClusterParameterGroupsResult = neptune.describeDBClusterParameterGroups(
new DescribeDBClusterParameterGroupsRequest()
.withDBClusterParameterGroupName(dbClusterParameterGroup));
Optional<DBClusterParameterGroup> parameterGroup = describeDBClusterParameterGroupsResult
.getDBClusterParameterGroups().stream().findFirst();
dbParameterGroupFamily = parameterGroup.isPresent() ?
parameterGroup.get().getDBParameterGroupFamily() :
"neptune1";
} catch (AmazonNeptuneException e) {
// Older deployments of Neptune Export service may not have requisite permissions to
// describe cluster parameter group, so we'll try and guess the group family.
if (StringUtils.isNotEmpty(engineVersion) && engineVersion.contains(".")) {
int v = Integer.parseInt(engineVersion.split("\\.")[1]);
dbParameterGroupFamily = v > 1 ? "neptune1.2" : "neptune1";
} else {
dbParameterGroupFamily = "neptune1";
}
}
DescribeDBClusterParametersResult describeDBClusterParametersResult = neptune.describeDBClusterParameters(
new DescribeDBClusterParametersRequest()
.withDBClusterParameterGroupName(dbClusterParameterGroup));
Optional<Parameter> neptuneStreamsParameter = describeDBClusterParametersResult.getParameters().stream()
.filter(parameter -> parameter.getParameterName().equals("neptune_streams"))
.findFirst();
boolean isStreamEnabled = neptuneStreamsParameter.isPresent() &&
neptuneStreamsParameter.get().getParameterValue().equals("1");
String dbSubnetGroup = dbCluster.getDBSubnetGroup();
List<VpcSecurityGroupMembership> vpcSecurityGroups = dbCluster.getVpcSecurityGroups();
List<String> vpcSecurityGroupIds = vpcSecurityGroups.stream()
.map(VpcSecurityGroupMembership::getVpcSecurityGroupId)
.collect(Collectors.toList());
List<DBClusterMember> dbClusterMembers = dbCluster.getDBClusterMembers();
Optional<DBClusterMember> clusterWriter = dbClusterMembers.stream()
.filter(DBClusterMember::isClusterWriter)
.findFirst();
String primary = clusterWriter.map(DBClusterMember::getDBInstanceIdentifier).orElse("");
List<String> replicas = dbClusterMembers.stream()
.filter(dbClusterMember -> !dbClusterMember.isClusterWriter())
.map(DBClusterMember::getDBInstanceIdentifier)
.collect(Collectors.toList());
DescribeDBInstancesRequest describeDBInstancesRequest = new DescribeDBInstancesRequest()
.withFilters(Collections.singletonList(
new Filter()
.withName("db-cluster-id")
.withValues(dbCluster.getDBClusterIdentifier())));
DescribeDBInstancesResult describeDBInstancesResult = neptune
.describeDBInstances(describeDBInstancesRequest);
Map<String, NeptuneInstanceMetadata> instanceTypes = new HashMap<>();
describeDBInstancesResult.getDBInstances()
.forEach(c -> instanceTypes.put(
c.getDBInstanceIdentifier(),
new NeptuneInstanceMetadata(
c.getDBInstanceClass(),
c.getDBParameterGroups().get(0).getDBParameterGroupName(),
c.getEndpoint())
));
neptune.shutdown();
return new NeptuneClusterMetadata(clusterId,
port,
engineVersion,
dbClusterParameterGroup,
dbParameterGroupFamily,
isIAMDatabaseAuthenticationEnabled,
isStreamEnabled,
dbSubnetGroup,
vpcSecurityGroupIds,
primary,
replicas,
instanceTypes,
clusterTags,
amazonNeptuneClientSupplier);
}
private final String clusterId;
private final int port;
private final String engineVersion;
private final String dbClusterParameterGroupName;
private final String dbParameterGroupFamily;
private final Boolean isIAMDatabaseAuthenticationEnabled;
private final Boolean isStreamEnabled;
private final String dbSubnetGroupName;
private final Collection<String> vpcSecurityGroupIds;
private final String primary;
private final Collection<String> replicas;
private final Map<String, NeptuneInstanceMetadata> instanceMetadata;
private final Map<String, String> clusterTags;
private final Supplier<AmazonNeptune> amazonNeptuneClientSupplier;
private NeptuneClusterMetadata(String clusterId,
int port,
String engineVersion,
String dbClusterParameterGroupName,
String dbParameterGroupFamily,
Boolean isIAMDatabaseAuthenticationEnabled,
Boolean isStreamEnabled,
String dbSubnetGroupName,
List<String> vpcSecurityGroupIds,
String primary,
Collection<String> replicas,
Map<String, NeptuneInstanceMetadata> instanceMetadata,
Map<String, String> clusterTags,
Supplier<AmazonNeptune> amazonNeptuneClientSupplier) {
this.clusterId = clusterId;
this.port = port;
this.engineVersion = engineVersion;
this.dbClusterParameterGroupName = dbClusterParameterGroupName;
this.dbParameterGroupFamily = dbParameterGroupFamily;
this.isIAMDatabaseAuthenticationEnabled = isIAMDatabaseAuthenticationEnabled;
this.isStreamEnabled = isStreamEnabled;
this.dbSubnetGroupName = dbSubnetGroupName;
this.vpcSecurityGroupIds = vpcSecurityGroupIds;
this.primary = primary;
this.replicas = replicas;
this.instanceMetadata = instanceMetadata;
this.clusterTags = clusterTags;
this.amazonNeptuneClientSupplier = amazonNeptuneClientSupplier;
}
public String clusterId() {
return clusterId;
}
public int port() {
return port;
}
public String engineVersion() {
return engineVersion;
}
public String dbClusterParameterGroupName() {
return dbClusterParameterGroupName;
}
public String dbParameterGroupFamily() {
return dbParameterGroupFamily;
}
public Boolean isIAMDatabaseAuthenticationEnabled() {
return isIAMDatabaseAuthenticationEnabled;
}
public Boolean isStreamEnabled() {
return isStreamEnabled;
}
public String dbSubnetGroupName() {
return dbSubnetGroupName;
}
public Collection<String> vpcSecurityGroupIds() {
return vpcSecurityGroupIds;
}
public String primary() {
return primary;
}
public Collection<String> replicas() {
return replicas;
}
public NeptuneInstanceMetadata instanceMetadataFor(String key) {
return instanceMetadata.get(key);
}
public List<String> endpoints() {
return instanceMetadata.values().stream().map(i -> i.endpoint().getAddress()).collect(Collectors.toList());
}
public boolean isTaggedWithNeptuneExport() {
return clusterTags.containsKey("application") &&
clusterTags.get("application").equalsIgnoreCase(NEPTUNE_EXPORT_APPLICATION_TAG);
}
public Supplier<AmazonNeptune> clientSupplier() {
return amazonNeptuneClientSupplier;
}
public void printDetails(){
System.err.println("Cluster ID : " + clusterId());
System.err.println("Port : " + port());
System.err.println("Engine : " + engineVersion());
System.err.println("IAM DB Auth : " + isIAMDatabaseAuthenticationEnabled());
System.err.println("Streams enabled : " + isStreamEnabled());
System.err.println("Parameter group family : " + dbParameterGroupFamily());
System.err.println("Cluster parameter group : " + dbClusterParameterGroupName());
System.err.println("Subnet group : " + dbSubnetGroupName());
System.err.println("Security group IDs : " + String.join(", ", vpcSecurityGroupIds()));
System.err.println("Instance endpoints : " + String.join(", ", endpoints()));
NeptuneClusterMetadata.NeptuneInstanceMetadata primary = instanceMetadataFor(primary());
System.err.println();
System.err.println("Primary");
System.err.println(" Instance ID : " + primary());
System.err.println(" Instance type : " + primary.instanceType());
System.err.println(" Endpoint : " + primary.endpoint().getAddress());
System.err.println(" Database parameter group : " + primary.dbParameterGroupName());
if (!replicas().isEmpty()) {
for (String replicaId : replicas()) {
NeptuneClusterMetadata.NeptuneInstanceMetadata replica = instanceMetadataFor(replicaId);
System.err.println();
System.err.println("Replica");
System.err.println(" Instance ID : " + replicaId);
System.err.println(" Instance type : " + replica.instanceType());
System.err.println(" Endpoint : " + replica.endpoint().getAddress());
System.err.println(" Database parameter group : " + replica.dbParameterGroupName());
}
}
}
public static class NeptuneInstanceMetadata {
private final String instanceType;
private final String dbParameterGroupName;
private final Endpoint endpoint;
public NeptuneInstanceMetadata(String instanceType, String dbParameterGroupName, Endpoint endpoint) {
this.instanceType = instanceType;
this.dbParameterGroupName = dbParameterGroupName;
this.endpoint = endpoint;
}
public String instanceType() {
return instanceType;
}
public String dbParameterGroupName() {
return dbParameterGroupName;
}
public Endpoint endpoint() {
return endpoint;
}
}
}
| 934 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/StreamRecordsNotFoundExceptionParser.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class StreamRecordsNotFoundExceptionParser {
public static class LastEventId {
private final long commitNum;
private final long opNum;
public LastEventId(long commitNum, long opNum) {
this.commitNum = commitNum;
this.opNum = opNum;
}
public long commitNum() {
return commitNum;
}
public long opNum() {
return opNum;
}
@Override
public String toString() {
return "{ " +
"\"commitNum\": " + commitNum +
", \"opNum\": " + opNum +
" }";
}
}
public static EventId parseLastEventId(String errorMessage){
String commitNum = "-1";
String opNum = "-1";
Pattern p = Pattern.compile("\\d+");
Matcher m = p.matcher(errorMessage);
if (m.find()){
commitNum = m.group();
}
if (m.find()){
opNum = m.group();
}
return new EventId(Long.parseLong( commitNum), Long.parseLong(opNum));
}
}
| 935 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/ConnectionConfig.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.neptune.auth.HandshakeRequestConfig;
import java.util.Collection;
import java.util.Collections;
public class ConnectionConfig {
private final String clusterId;
private final Collection<String> neptuneEndpoints;
private final int neptunePort;
private final boolean useIamAuth;
private boolean useSsl;
private final ProxyConfig proxyConfig;
private final AWSCredentialsProvider credentialsProvider;
public ConnectionConfig(String clusterId,
Collection<String> neptuneEndpoints,
int neptunePort,
boolean useIamAuth, boolean useSsl, ProxyConfig proxyConfig) {
this(clusterId, neptuneEndpoints, neptunePort, useIamAuth, useSsl, proxyConfig, new DefaultAWSCredentialsProviderChain());
}
public ConnectionConfig(String clusterId,
Collection<String> neptuneEndpoints,
int neptunePort,
boolean useIamAuth, boolean useSsl, ProxyConfig proxyConfig,
AWSCredentialsProvider credentialsProvider) {
this.clusterId = clusterId;
this.neptuneEndpoints = neptuneEndpoints;
this.neptunePort = neptunePort;
this.useIamAuth = useIamAuth;
this.useSsl = useSsl;
this.proxyConfig = proxyConfig;
this.credentialsProvider = credentialsProvider;
}
public Collection<String> endpoints() {
if (isDirectConnection()) {
return neptuneEndpoints;
} else {
return Collections.singletonList(proxyConfig.endpoint());
}
}
public int port() {
if (isDirectConnection()) {
return neptunePort;
} else {
return proxyConfig.port();
}
}
public boolean useIamAuth() {
return useIamAuth;
}
public boolean useSsl() {
return useSsl;
}
public HandshakeRequestConfig handshakeRequestConfig() {
if (isDirectConnection()) {
return new HandshakeRequestConfig(Collections.emptyList(), neptunePort, false);
} else {
return new HandshakeRequestConfig(neptuneEndpoints, neptunePort, proxyConfig.removeHostHeader());
}
}
public boolean isDirectConnection() {
return proxyConfig == null;
}
public ProxyConfig proxyConfig() {
return proxyConfig;
}
public AWSCredentialsProvider getCredentialsProvider() {
return credentialsProvider;
}
}
| 936 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/ConcurrencyConfig.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.propertygraph.RangeConfig;
import org.apache.tinkerpop.gremlin.driver.Cluster;
import static java.lang.Math.max;
public class ConcurrencyConfig {
private final int concurrency;
public ConcurrencyConfig(int concurrency) {
if (concurrency < 1){
throw new IllegalArgumentException("Concurrency must be >= 1");
}
this.concurrency = concurrency;
}
public int concurrency() {
return concurrency;
}
public boolean isUnboundedParallelExecution(RangeConfig rangeConfig){
return concurrency > 1 && rangeConfig.rangeSize() == -1;
}
public Cluster.Builder applyTo(Cluster.Builder clusterBuilder, int numberOfEndpoints){
if (concurrency == 1){
return clusterBuilder;
}
int calculatedPoolSize = (concurrency/numberOfEndpoints) + 1;
int minPoolSize = max(calculatedPoolSize, 2);
int maxPoolSize = max(calculatedPoolSize, 8);
return clusterBuilder.
minConnectionPoolSize(minPoolSize).
maxConnectionPoolSize(maxPoolSize);
}
}
| 937 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/EventId.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.amazonaws.services.neptune.propertygraph.io.Jsonizable;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
public class EventId implements Jsonizable<Object> {
private final long commitNum;
private final long opNum;
public EventId(long commitNum, long opNum) {
this.commitNum = commitNum;
this.opNum = opNum;
}
public long commitNum() {
return commitNum;
}
public long opNum() {
return opNum;
}
@Override
public String toString() {
return "{ " +
"\"commitNum\": " + commitNum +
", \"opNum\": " + opNum +
" }";
}
@Override
public JsonNode toJson(Object o) {
ObjectNode json = JsonNodeFactory.instance.objectNode();
json.put("commitNum", commitNum);
json.put("opNum", opNum);
return json;
}
}
| 938 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/HttpResponse.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.node.ArrayNode;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
public class HttpResponse {
private static final String JSON_CONTENT_TYPE = "application/json";
private static final ObjectMapper MAPPER = new ObjectMapper();
private final int status;
private final String content;
private final String contentType;
public HttpResponse(int status, String content, String contentType) {
this.status = status;
this.content = content;
this.contentType = contentType;
}
public int getStatus() {
return status;
}
public String getContent() {
return content;
}
public JsonNode getContentAsJson() throws IOException {
if (contentType.equals(JSON_CONTENT_TYPE)) {
return MAPPER.readTree(content);
} else {
throw new IllegalStateException("Content is not JSON: " + contentType);
}
}
public <T> T getContentAsObject(Class<T> type) throws IOException {
if (contentType.equals(JSON_CONTENT_TYPE)) {
@SuppressWarnings("unchecked")
T returnValue = (T) MAPPER.readerFor(type).readValue(content);
return returnValue;
} else {
throw new IllegalStateException("Content is not JSON: " + contentType);
}
}
public <T> Collection<T> getContentAsCollection(Class<T> type) throws IOException {
if (contentType.equals(JSON_CONTENT_TYPE)) {
ObjectReader reader = MAPPER.readerFor(type);
List<T> results = new ArrayList<>();
ArrayNode array = (ArrayNode) MAPPER.readTree(content);
for (JsonNode node : array) {
results.add(reader.readValue(node));
}
return results;
} else {
throw new IllegalStateException("Content is not JSON: " + contentType);
}
}
}
| 939 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cluster/InstanceType.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cluster;
public enum InstanceType {
db_r4_large {
@Override
int concurrency() {
return 4;
}
},
db_r4_xlarge {
@Override
int concurrency() {
return 8;
}
},
db_r4_2xlarge {
@Override
int concurrency() {
return 16;
}
},
db_r4_4xlarge {
@Override
int concurrency() {
return 32;
}
},
db_r4_8xlarge {
@Override
int concurrency() {
return 64;
}
},
db_r5_large {
@Override
int concurrency() {
return 4;
}
},
db_r5_xlarge {
@Override
int concurrency() {
return 8;
}
},
db_r5_2xlarge {
@Override
int concurrency() {
return 16;
}
},
db_r5_4xlarge {
@Override
int concurrency() {
return 32;
}
},
db_r5_8xlarge {
@Override
int concurrency() {
return 64;
}
},
db_r5_12xlarge {
@Override
int concurrency() {
return 96;
}
},
db_r5_16xlarge {
@Override
int concurrency() {
return 128;
}
},
db_r5_24xlarge {
@Override
int concurrency() {
return 192;
}
},
db_r5d_large {
@Override
int concurrency() {
return 4;
}
},
db_r5d_xlarge {
@Override
int concurrency() {
return 8;
}
},
db_r5d_2xlarge {
@Override
int concurrency() {
return 16;
}
},
db_r5d_4xlarge {
@Override
int concurrency() {
return 32;
}
},
db_r5d_8xlarge {
@Override
int concurrency() {
return 64;
}
},
db_r5d_12xlarge {
@Override
int concurrency() {
return 96;
}
},
db_r5d_16xlarge {
@Override
int concurrency() {
return 128;
}
},
db_r5d_24xlarge {
@Override
int concurrency() {
return 192;
}
},
db_r6g_large {
@Override
int concurrency() {
return 4;
}
},
db_r6g_xlarge {
@Override
int concurrency() {
return 8;
}
},
db_r6g_2xlarge {
@Override
int concurrency() {
return 16;
}
},
db_r6g_4xlarge {
@Override
int concurrency() {
return 32;
}
},
db_r6g_8xlarge {
@Override
int concurrency() {
return 64;
}
},
db_r6g_12xlarge {
@Override
int concurrency() {
return 96;
}
},
db_r6g_16xlarge {
@Override
int concurrency() {
return 128;
}
},
db_x2g_large {
@Override
int concurrency() {
return 4;
}
},
db_x2g_xlarge {
@Override
int concurrency() {
return 8;
}
},
db_x2g_2xlarge {
@Override
int concurrency() {
return 16;
}
},
db_x2g_4xlarge {
@Override
int concurrency() {
return 32;
}
},
db_x2g_8xlarge {
@Override
int concurrency() {
return 64;
}
},
db_x2g_12xlarge {
@Override
int concurrency() {
return 96;
}
},
db_x2g_16xlarge {
@Override
int concurrency() {
return 128;
}
},
db_m5_large {
@Override
int concurrency() {
return 4;
}
},
db_m5_xlarge {
@Override
int concurrency() {
return 8;
}
},
db_m5_2xlarge {
@Override
int concurrency() {
return 16;
}
},
db_m5_3xlarge {
@Override
int concurrency() {
return 32;
}
},
db_m5_8xlarge {
@Override
int concurrency() {
return 64;
}
},
db_m5_12xlarge {
@Override
int concurrency() {
return 96;
}
},
db_m5_16xlarge {
@Override
int concurrency() {
return 128;
}
},
db_m5_24xlarge {
@Override
int concurrency() {
return 192;
}
},
db_t3_medium {
@Override
int concurrency() {
return 4;
}
};
public static InstanceType parse(String value) {
String typeName = value.startsWith("db.") ?
value :
String.format("db.%s", value);
typeName = typeName.toLowerCase().replace(".", "_");
try {
return InstanceType.valueOf(typeName);
} catch (IllegalArgumentException e) {
return db_r5_2xlarge;
}
}
abstract int concurrency();
public String value() {
return name().replace("_", ".");
}
@Override
public String toString() {
return value();
}
}
| 940 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/GitProperties.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
public class GitProperties {
private final String commitId;
private final String buildVersion;
private final String commitTime;
private final String buildTime;
public GitProperties(String commitId, String buildVersion, String commitTime, String buildTime) {
this.commitId = commitId;
this.buildVersion = buildVersion;
this.commitTime = commitTime;
this.buildTime = buildTime;
}
public String commitId() {
return commitId;
}
public static GitProperties fromResource() {
Properties properties = new Properties();
try {
InputStream stream = ClassLoader.getSystemResourceAsStream("git.properties");
if (stream != null) {
properties.load(stream);
stream.close();
}
} catch (IOException e) {
// Do nothing
}
return new GitProperties(
properties.getProperty("git.commit.id", "unknown"),
properties.getProperty("git.build.version", "unknown"),
properties.getProperty("git.commit.time", "unknown"),
properties.getProperty("git.build.time", "unknown"));
}
@Override
public String toString() {
return "[" +
"buildVersion='" + buildVersion + '\'' +
", buildTime='" + buildTime + '\'' +
", commitId='" + commitId + '\'' +
", commitTime='" + commitTime + '\'' +
']';
}
}
| 941 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/Activity.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
public interface Activity {
interface Runnable extends Activity {
void run();
}
interface Callable<T> extends Activity {
T call();
}
}
| 942 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/S3ObjectInfo.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
import com.amazonaws.services.s3.Headers;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.SSEAlgorithm;
import org.apache.commons.lang.StringUtils;
import java.io.File;
import java.net.URI;
public class S3ObjectInfo {
private final String bucket;
private final String key;
private final String fileName;
public S3ObjectInfo(String s3Uri) {
URI uri = URI.create(s3Uri);
bucket = uri.getAuthority();
String path = uri.getPath();
key = StringUtils.isNotEmpty(path) ? path.substring(1) : "";
fileName = new File(uri.getPath()).getName();
}
public String bucket() {
return bucket;
}
public String key() {
return key;
}
public static ObjectMetadata createObjectMetadata(long contentLength, String sseKmsKeyId, ObjectMetadata objectMetadata){
objectMetadata.setContentLength(contentLength);
if (!StringUtils.isBlank(sseKmsKeyId)) {
objectMetadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
objectMetadata.setHeader(
Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
sseKmsKeyId
);
} else {
objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
return objectMetadata;
}
public static ObjectMetadata createObjectMetadata(long contentLength, String sseKmsKeyId) {
return createObjectMetadata(contentLength, sseKmsKeyId, new ObjectMetadata());
}
public File createDownloadFile(String parent) {
return new File(parent, fileName);
}
public S3ObjectInfo withNewKeySuffix(String suffix) {
File file = StringUtils.isNotEmpty(key) ? new File(key, suffix) : new File(suffix);
return new S3ObjectInfo( String.format("s3://%s/%s", bucket, file.getPath()));
}
public S3ObjectInfo replaceOrAppendKey(String placeholder, String ifPresent, String ifAbsent) {
File file = key.contains(placeholder) ?
new File(key.replace(placeholder, ifPresent)) :
new File(key, ifAbsent);
return new S3ObjectInfo( String.format("s3://%s/%s", bucket, file.getPath()));
}
public S3ObjectInfo replaceOrAppendKey(String placeholder, String ifPresent) {
return replaceOrAppendKey(placeholder, ifPresent, ifPresent);
}
@Override
public String toString() {
return String.format("s3://%s/%s", bucket, key);
}
}
| 943 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/NotImplementedException.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
public class NotImplementedException extends RuntimeException {
}
| 944 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/Timer.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
public class Timer {
public static void timedActivity(String description, Activity.Runnable activity) {
timedActivity(description, true, activity);
}
public static void timedActivity(String description, boolean padWithNewlines, Activity.Runnable activity) {
long start = System.currentTimeMillis();
try {
activity.run();
printSuccess(description, padWithNewlines, start);
} catch (Exception e) {
printFailure(description, padWithNewlines, start);
throw e;
}
}
public static void timedActivity(String description, CheckedActivity.Runnable activity) throws Exception {
timedActivity(description, true, activity);
}
public static void timedActivity(String description, boolean padWithNewlines, CheckedActivity.Runnable activity) throws Exception {
long start = System.currentTimeMillis();
try {
activity.run();
printSuccess(description, padWithNewlines, start);
} catch (Exception e) {
printFailure(description, padWithNewlines, start);
throw e;
}
}
public static <T> T timedActivity(String description, Activity.Callable<T> activity) {
return timedActivity(description, true, activity);
}
public static <T> T timedActivity(String description, boolean padWithNewlines, Activity.Callable<T> activity) {
long start = System.currentTimeMillis();
try {
T result = activity.call();
printSuccess(description, padWithNewlines, start);
return result;
} catch (Exception e) {
printFailure(description, padWithNewlines, start);
throw e;
}
}
public static <T> T timedActivity(String description, CheckedActivity.Callable<T> activity) throws Exception {
return timedActivity(description, true, activity);
}
public static <T> T timedActivity(String description, boolean padWithNewlines, CheckedActivity.Callable<T> activity) throws Exception {
long start = System.currentTimeMillis();
try {
T result = activity.call();
printSuccess(description, padWithNewlines, start);
return result;
} catch (Exception e) {
printFailure(description, padWithNewlines, start);
throw e;
}
}
private static void printSuccess(String description, boolean padWithNewlines, long start) {
if (padWithNewlines) {
System.err.println();
}
System.err.println(String.format("Completed %s in %s seconds", description, (System.currentTimeMillis() - start) / 1000));
if (padWithNewlines) {
System.err.println();
}
}
private static void printFailure(String description, boolean padWithNewlines, long start) {
if (padWithNewlines) {
System.err.println();
}
System.err.println(String.format("An error occurred while %s. Elapsed time: %s seconds", description, (System.currentTimeMillis() - start) / 1000));
if (padWithNewlines) {
System.err.println();
}
}
}
| 945 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/CheckedActivity.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
public interface CheckedActivity {
interface Runnable extends CheckedActivity {
void run() throws Exception;
}
interface Callable<T> extends CheckedActivity{
T call() throws Exception;
}
}
| 946 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/AWSCredentialsUtil.java
|
package com.amazonaws.services.neptune.util;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.regions.DefaultAwsRegionProviderChain;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AWSCredentialsUtil {
private static final Logger logger = LoggerFactory.getLogger(AWSCredentialsUtil.class);
public static AWSCredentialsProvider getProfileCredentialsProvider(String profileName, String profilePath) {
if (StringUtils.isEmpty(profileName) && StringUtils.isEmpty(profilePath)) {
return new DefaultAWSCredentialsProviderChain();
}
if (StringUtils.isEmpty(profilePath)) {
logger.debug(String.format("Using ProfileCredentialsProvider with profile: %s", profileName));
return new ProfileCredentialsProvider(profileName);
}
logger.debug(String.format("Using ProfileCredentialsProvider with profile: %s and credentials file: ", profileName, profilePath));
return new ProfileCredentialsProvider(profilePath, profileName);
}
public static AWSCredentialsProvider getSTSAssumeRoleCredentialsProvider(String roleARN, String sessionName, String externalId) {
return getSTSAssumeRoleCredentialsProvider(roleARN, sessionName, externalId, new DefaultAWSCredentialsProviderChain());
}
public static AWSCredentialsProvider getSTSAssumeRoleCredentialsProvider(String roleARN,
String sessionName,
String externalId,
AWSCredentialsProvider sourceCredentialsProvider) {
return getSTSAssumeRoleCredentialsProvider(roleARN, sessionName, externalId, sourceCredentialsProvider,
new DefaultAwsRegionProviderChain().getRegion());
}
public static AWSCredentialsProvider getSTSAssumeRoleCredentialsProvider(String roleARN,
String sessionName,
String externalId,
AWSCredentialsProvider sourceCredentialsProvider,
String region) {
STSAssumeRoleSessionCredentialsProvider.Builder providerBuilder = new STSAssumeRoleSessionCredentialsProvider.Builder(roleARN, sessionName)
.withStsClient(
AWSSecurityTokenServiceClient.builder().withCredentials(sourceCredentialsProvider).withRegion(region).build());
if (externalId != null) {
providerBuilder = providerBuilder.withExternalId(externalId);
}
logger.debug(String.format("Assuming Role: %s with session name: %s", roleARN, sessionName));
return providerBuilder.build();
}
}
| 947 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/EnvironmentVariableUtils.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
public class EnvironmentVariableUtils {
public static String getMandatoryEnv(String name) {
if (isNullOrEmpty(System.getenv(name))) {
throw new IllegalStateException(String.format("Missing environment variable: %s", name));
}
return System.getenv(name);
}
public static String getOptionalEnv(String name, String defaultValue) {
if (isNullOrEmpty(System.getenv(name))) {
return defaultValue;
}
return System.getenv(name);
}
private static boolean isNullOrEmpty(String value) {
return value == null || value.isEmpty();
}
}
| 948 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/TransferManagerWrapper.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
import org.apache.commons.lang.StringUtils;
public class TransferManagerWrapper implements AutoCloseable {
private final TransferManager transferManager;
public TransferManagerWrapper(String s3Region) {
this(s3Region, null);
}
public TransferManagerWrapper(String s3Region, AWSCredentialsProvider credentialsProvider) {
AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard();
if (credentialsProvider != null) {
amazonS3ClientBuilder = amazonS3ClientBuilder.withCredentials(credentialsProvider);
}
if (StringUtils.isNotEmpty(s3Region)) {
amazonS3ClientBuilder = amazonS3ClientBuilder.withRegion(s3Region);
}
transferManager = TransferManagerBuilder.standard()
.withS3Client(amazonS3ClientBuilder.build())
.build();
}
public TransferManager get() {
return transferManager;
}
@Override
public void close() {
transferManager.shutdownNow();
}
}
| 949 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/SemicolonUtils.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
import org.apache.commons.lang.StringUtils;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.regex.Pattern;
public class SemicolonUtils {
private static final String SEMICOLON_SEPARATOR = "(?<!\\\\);";
private static final Pattern regexPattern = Pattern.compile(SEMICOLON_SEPARATOR);
public static Collection<String> split(String s) {
if (StringUtils.isEmpty(s)){
return Collections.emptyList();
}
return Arrays.asList(regexPattern.split(s, 0));
}
public static String unescape(String s) {
if (s.contains(";")){
// String[] strings = regexPattern.split(s, 0);
// if (strings.length == 1) {
// return strings[0].replace("\\;", ";");
// } else {
// return s;
// }
return s.replace("\\;", ";");
} else {
return s;
}
}
}
| 950 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/auth/HandshakeRequestConfig.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.auth;
import org.joda.time.DateTime;
import java.util.*;
import java.util.stream.Collectors;
public class HandshakeRequestConfig {
public static HandshakeRequestConfig parse(String s) {
String[] values = s.split(",");
boolean removeHostHeaderAfterSigning = Boolean.parseBoolean(values[0]);
int port = Integer.parseInt(values[1]);
Collection<String> endpoints = new ArrayList<>();
endpoints.addAll(Arrays.asList(values).subList(2, values.length));
return new HandshakeRequestConfig(endpoints, port, removeHostHeaderAfterSigning);
}
private final List<String> endpoints;
private final int port;
private final boolean removeHostHeaderAfterSigning;
private final Random random = new Random(DateTime.now().getMillis());
public HandshakeRequestConfig(Collection<String> endpoints, int port, boolean removeHostHeaderAfterSigning) {
this.endpoints = new ArrayList<>(endpoints);
this.port = port;
this.removeHostHeaderAfterSigning = removeHostHeaderAfterSigning;
}
public String chooseHostHeader() {
return String.format("%s:%s", endpoints.get(random.nextInt(endpoints.size())), port);
}
public boolean removeHostHeaderAfterSigning() {
return removeHostHeaderAfterSigning;
}
public String value() {
return String.format("%s,%s,%s", removeHostHeaderAfterSigning, port, endpoints.stream().collect(Collectors.joining(",")));
}
@Override
public String toString() {
return value();
}
}
| 951 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/auth/LBAwareAwsSigV4ClientHandshaker.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.auth;
import com.amazon.neptune.gremlin.driver.sigv4.ChainedSigV4PropertiesProvider;
import com.amazon.neptune.gremlin.driver.sigv4.SigV4Properties;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.neptune.auth.NeptuneNettyHttpSigV4Signer;
import com.amazonaws.neptune.auth.NeptuneSigV4SignerException;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.websocketx.WebSocketClientHandshaker13;
import io.netty.handler.codec.http.websocketx.WebSocketVersion;
import java.net.URI;
public class LBAwareAwsSigV4ClientHandshaker extends WebSocketClientHandshaker13 {
private final ChainedSigV4PropertiesProvider sigV4PropertiesProvider;
private final HandshakeRequestConfig handshakeRequestConfig;
private final SigV4Properties sigV4Properties;
public LBAwareAwsSigV4ClientHandshaker(URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, ChainedSigV4PropertiesProvider sigV4PropertiesProvider, HandshakeRequestConfig handshakeRequestConfig) {
super(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength);
this.sigV4PropertiesProvider = sigV4PropertiesProvider;
this.handshakeRequestConfig = handshakeRequestConfig;
this.sigV4Properties = this.loadProperties();
}
protected FullHttpRequest newHandshakeRequest() {
FullHttpRequest request = super.newHandshakeRequest();
request.headers().remove("Host");
request.headers().add("Host", handshakeRequestConfig.chooseHostHeader());
try {
NeptuneNettyHttpSigV4Signer sigV4Signer = new NeptuneNettyHttpSigV4Signer(this.sigV4Properties.getServiceRegion(), new DefaultAWSCredentialsProviderChain());
sigV4Signer.signRequest(request);
if (handshakeRequestConfig.removeHostHeaderAfterSigning()) {
request.headers().remove("Host");
}
return request;
} catch (NeptuneSigV4SignerException var4) {
throw new RuntimeException("Exception occurred while signing the request", var4);
}
}
private SigV4Properties loadProperties() {
return this.sigV4PropertiesProvider.getSigV4Properties();
}
}
| 952 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/FileToStreamOutputWriter.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import org.apache.commons.io.input.Tailer;
import org.apache.commons.io.input.TailerListenerAdapter;
import java.io.Writer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.concurrent.atomic.AtomicInteger;
public class FileToStreamOutputWriter implements OutputWriter {
private final OutputWriter innerOutputWriter;
private final Path filePath;
private final Stream stream;
private final Tailer tailer;
private final ExportListener listener;
FileToStreamOutputWriter(OutputWriter innerOutputWriter, Path filePath, KinesisConfig kinesisConfig) {
this.innerOutputWriter = innerOutputWriter;
this.filePath = filePath;
this.stream = kinesisConfig.stream();
this.listener = new ExportListener(stream);
this.tailer = Tailer.create(filePath.toFile(), listener);
}
@Override
public boolean isNewTarget() {
return false;
}
@Override
public String outputId() {
return String.format("%s [for stream %s]", filePath.toString(), stream.name());
}
@Override
public void startCommit() {
innerOutputWriter.startCommit();
}
@Override
public void endCommit() {
innerOutputWriter.endCommit();
listener.incrementTotalLineCount();
}
@Override
public void print(String s) {
innerOutputWriter.print(s);
}
@Override
public Writer writer() {
return innerOutputWriter.writer();
}
@Override
public void startOp() {
innerOutputWriter.startOp();
}
@Override
public void endOp() {
innerOutputWriter.endOp();
}
@Override
public String lineSeparator() {
return innerOutputWriter.lineSeparator();
}
@Override
public void close() throws Exception {
innerOutputWriter.close();
while (!listener.isFinished()) {
Thread.sleep(1000);
}
tailer.stop();
stream.flushRecords();
Files.deleteIfExists(filePath);
}
private static class ExportListener extends TailerListenerAdapter {
private final Stream stream;
private final AtomicInteger totalLineCount = new AtomicInteger(0);
private int linesProcessed = 0;
private ExportListener(Stream stream) {
this.stream = stream;
}
public void handle(String line) {
stream.publish(line);
linesProcessed++;
}
public void incrementTotalLineCount() {
totalLineCount.incrementAndGet();
}
public boolean isFinished() {
return linesProcessed == totalLineCount.get();
}
}
}
| 953 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/FileExtension.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
public interface FileExtension {
String extension();
}
| 954 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/Status.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Status {
private static final Logger logger = LoggerFactory.getLogger(Status.class);
private final AtomicLong counter = new AtomicLong();
private final AtomicBoolean allowContinue = new AtomicBoolean(true);
private final StatusOutputFormat outputFormat;
private final String description;
private final Supplier<String> additionalDetailsSupplier;
public Status(StatusOutputFormat outputFormat) {
this(outputFormat, "");
}
public Status(StatusOutputFormat outputFormat, String description) {
this(outputFormat, description, () -> "");
}
public Status(StatusOutputFormat outputFormat, String description, Supplier<String> additionalDetailsSupplier) {
this.outputFormat = outputFormat;
this.description = description;
this.additionalDetailsSupplier = additionalDetailsSupplier;
}
public void update() {
long counterValue = counter.incrementAndGet();
if (counterValue % 10000 == 0 && outputFormat == StatusOutputFormat.Dot) {
System.err.print(".");
} else if (counterValue % 100000 == 0 && outputFormat == StatusOutputFormat.Description) {
logger.info("{} ({}){}", counterValue, description, additionalDetailsSupplier.get());
}
}
public boolean allowContinue() {
return allowContinue.get();
}
public void halt() {
allowContinue.set(false);
}
}
| 955 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/RecordSplitter.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.JsonNodeType;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.commons.lang.StringUtils;
import org.eclipse.rdf4j.model.IRI;
import org.eclipse.rdf4j.model.Resource;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.model.Value;
import org.eclipse.rdf4j.model.impl.SimpleValueFactory;
import org.eclipse.rdf4j.rio.RDFHandler;
import org.eclipse.rdf4j.rio.RDFHandlerException;
import org.eclipse.rdf4j.rio.RDFParser;
import org.eclipse.rdf4j.rio.nquads.NQuadsParserFactory;
import org.eclipse.rdf4j.rio.nquads.NQuadsWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
public class RecordSplitter {
private static final Logger logger = LoggerFactory.getLogger(RecordSplitter.class);
public static Collection<String> splitByLength(String s, int length){
return splitByLength(s ,length, 10);
}
public static Collection<String> splitByLength(String s, int length, int wordBoundaryMargin) {
int startIndex = 0;
Collection<String> results = new ArrayList<>();
while (startIndex < s.length()) {
boolean foundWordBoundary = false;
int endIndex = Math.min(startIndex + length, s.length());
int minCandidateEndIndex = Math.max(startIndex +1, endIndex - wordBoundaryMargin);
for (int actualEndIndex = endIndex; actualEndIndex >= minCandidateEndIndex; actualEndIndex--){
if (!StringUtils.isAlphanumeric( s.substring(actualEndIndex - 1, actualEndIndex))){
String result = s.substring(startIndex, actualEndIndex);
String trimmedResult = result.trim();
if (StringUtils.isNotEmpty(trimmedResult)){
results.add(trimmedResult);
}
startIndex = actualEndIndex;
foundWordBoundary = true;
break;
}
}
if (!foundWordBoundary){
String result = s.substring(startIndex, endIndex);
String trimmedResult = result.trim();
if (StringUtils.isNotEmpty(trimmedResult)){
results.add(trimmedResult);
}
startIndex = endIndex;
}
}
return results;
}
private static int calculateStringMaxLength(int maxLength, int recordLength, int valueLength) {
return maxLength - (recordLength - valueLength) - 2;
}
private final int maxSize;
private final LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy;
private final ObjectMapper mapper = new ObjectMapper();
private final RDFParser parser = new NQuadsParserFactory().getParser();
private final StatementHandler handler = new StatementHandler();
public RecordSplitter(int maxSize, LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy) {
this.maxSize = maxSize;
this.largeStreamRecordHandlingStrategy = largeStreamRecordHandlingStrategy;
this.parser.setRDFHandler(handler);
}
public Collection<String> split(String s) {
Collection<String> results = new ArrayList<>();
int opNum = 1;
try {
JsonNode json = mapper.readTree(s);
for (JsonNode jsonNode : json) {
if (isNeptuneStreamEvent(jsonNode)) {
Collection<String> events = splitNeptuneStreamEvent(jsonNode, opNum);
results.addAll(events);
opNum += events.size();
} else {
JsonNodeType nodeType = jsonNode.getNodeType();
if (nodeType == JsonNodeType.NUMBER) {
results.addAll(splitNumber(jsonNode));
} else if (nodeType == JsonNodeType.STRING) {
results.addAll(splitString(jsonNode));
} else {
// This may end up being dropped
results.add(format(jsonNode.toString()));
}
}
}
} catch (JsonProcessingException e) {
// This will almost certainly be dropped
results.add(s);
}
return results;
}
private Collection<String> splitNeptuneStreamEvent(JsonNode jsonNode, int opNum) {
Collection<String> results = new ArrayList<>();
((ObjectNode) jsonNode.get("eventId")).replace("opNum", mapper.valueToTree(opNum));
String jsonString = jsonNode.toString();
int eventJsonLength = jsonString.length();
if (eventJsonLength > maxSize && largeStreamRecordHandlingStrategy.allowShred()) {
if (isProperytGraphEvent(jsonNode)) {
String value = jsonNode.get("data").get("value").get("value").textValue();
int maxStringLength = calculateStringMaxLength(maxSize, eventJsonLength, value.length());
Collection<String> splitValues = splitByLength(value, maxStringLength);
for (String splitValue : splitValues) {
((ObjectNode) jsonNode.get("eventId")).replace("opNum", mapper.valueToTree(opNum));
((ObjectNode) jsonNode.get("data").get("value")).replace("value", mapper.valueToTree(splitValue));
results.add(format(jsonNode.toString()));
opNum += 1;
}
} else {
String statement = jsonNode.get("data").get("stmt").textValue();
int statementLength = statement.length();
int maxStatementLength = calculateStringMaxLength(maxSize, eventJsonLength, statementLength);
handler.reset(statementLength, maxStatementLength);
try {
parser.parse(new StringReader(statement));
for (String splitStatement : handler.statements()) {
((ObjectNode) jsonNode.get("eventId")).replace("opNum", mapper.valueToTree(opNum));
((ObjectNode) jsonNode.get("data")).replace("stmt", mapper.valueToTree(splitStatement));
results.add(format(jsonNode.toString()));
opNum += 1;
}
} catch (IOException e) {
// What to do here?
results.add(format(jsonString));
}
}
} else {
results.add(format(jsonString));
}
return results;
}
private boolean isProperytGraphEvent(JsonNode jsonNode) {
return jsonNode.get("data").has("value");
}
private Collection<String> splitString(JsonNode jsonNode) {
Collection<String> results = new ArrayList<>();
String jsonString = jsonNode.textValue();
if (jsonString.length() > maxSize) {
Collection<String> splitValues = splitByLength(jsonString, maxSize);
for (String splitValue : splitValues) {
results.add(format(splitValue, true));
}
} else {
results.add(format(jsonString, true));
}
return results;
}
private Collection<String> splitNumber(JsonNode jsonNode) {
return Collections.singletonList(format(jsonNode.asText()));
}
private boolean isNeptuneStreamEvent(JsonNode jsonNode) {
return jsonNode.has("eventId");
}
private String format(String s) {
return format(s, false);
}
private String format(String s, boolean addQuotes) {
if (addQuotes) {
return String.format("[\"%s\"]", s);
} else {
return String.format("[%s]", s);
}
}
private static class StatementHandler implements RDFHandler {
private final Collection<String> results = new ArrayList<>();
private int statementLength;
private int maxStatementLength;
@Override
public void startRDF() throws RDFHandlerException {
}
@Override
public void endRDF() throws RDFHandlerException {
}
@Override
public void handleNamespace(String s, String s1) throws RDFHandlerException {
}
@Override
public void handleStatement(Statement statement) throws RDFHandlerException {
Value object = statement.getObject();
if (object.isLiteral()) {
String objectValue = object.stringValue();
int maxObjectLength = calculateStringMaxLength(maxStatementLength, statementLength, objectValue.length());
Collection<String> splitValues = splitByLength(objectValue, maxObjectLength);
for (String splitValue : splitValues) {
StringWriter writer = new StringWriter();
new NQuadsWriter(writer).consumeStatement(new Statement() {
@Override
public Resource getSubject() {
return statement.getSubject();
}
@Override
public IRI getPredicate() {
return statement.getPredicate();
}
@Override
public Value getObject() {
return SimpleValueFactory.getInstance().createLiteral(splitValue);
}
@Override
public Resource getContext() {
return statement.getContext();
}
});
results.add(writer.toString());
}
} else {
results.add(String.format("%s\n", statement.toString()));
}
}
@Override
public void handleComment(String s) throws RDFHandlerException {
}
public void reset(int statementLength, int maxStatementLength) {
this.statementLength = statementLength;
this.maxStatementLength = maxStatementLength;
results.clear();
}
public Collection<String> statements() {
return results;
}
}
}
| 956 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/CommandWriter.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
public interface CommandWriter {
void writeReturnValue(String value);
void writeMessage(String value);
}
| 957 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/DirectoryStructure.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
public enum DirectoryStructure {
Config {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(directory);
}
},
PropertyGraph {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(nodesDirectory);
Files.createDirectories(edgesDirectory);
}
},
Rdf {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(statementsDirectory);
}
},
GremlinQueries {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(resultsDirectory);
}
},
SparqlQueries {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(resultsDirectory);
}
},
SimpleStreamsOutput {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(recordsDirectory);
}
};
public abstract void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException;
}
| 958 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/Stream.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.amazonaws.services.kinesis.producer.Attempt;
import com.amazonaws.services.kinesis.producer.KinesisProducer;
import com.amazonaws.services.kinesis.producer.UserRecordFailedException;
import com.amazonaws.services.kinesis.producer.UserRecordResult;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
public class Stream {
private final KinesisProducer kinesisProducer;
private final String streamName;
private final StreamThrottle streamThrottle;
private final LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy;
private final RecordSplitter splitter;
private final AtomicLong counter = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(Stream.class);
private static final int MAX_SIZE_BYTES = 1000000;
public Stream(KinesisProducer kinesisProducer,
String streamName,
LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy) {
this.kinesisProducer = kinesisProducer;
this.streamName = streamName;
this.streamThrottle = new StreamThrottle(kinesisProducer);
this.largeStreamRecordHandlingStrategy = largeStreamRecordHandlingStrategy;
this.splitter = new RecordSplitter(MAX_SIZE_BYTES, largeStreamRecordHandlingStrategy);
}
public synchronized void publish(String s) {
if (StringUtils.isNotEmpty(s) && s.length() > 2) {
try {
long partitionKeyValue = counter.incrementAndGet();
byte[] bytes = s.getBytes(StandardCharsets.UTF_8.name());
if (bytes.length > MAX_SIZE_BYTES && largeStreamRecordHandlingStrategy.allowSplit()) {
Collection<String> splitRecords = splitter.split(s);
for (String splitRecord : splitRecords) {
publish(partitionKeyValue, splitRecord.getBytes(StandardCharsets.UTF_8.name()));
}
} else {
publish(partitionKeyValue, bytes);
}
} catch (UnsupportedEncodingException e) {
logger.error(e.getMessage());
}
}
}
private void publish(long partitionKeyValue, byte[] bytes) {
if (bytes.length > MAX_SIZE_BYTES) {
logger.warn("Dropping record because it is larger than 1 MB: [{}] '{}...'", bytes.length, new String(Arrays.copyOfRange(bytes, 0, 256)));
return;
}
try {
ByteBuffer data = ByteBuffer.wrap(bytes);
streamThrottle.recalculateMaxBufferSize(partitionKeyValue, bytes.length);
streamThrottle.throttle();
ListenableFuture<UserRecordResult> future = kinesisProducer.addUserRecord(streamName, String.valueOf(partitionKeyValue), data);
Futures.addCallback(future, CALLBACK, MoreExecutors.directExecutor());
} catch (InterruptedException e) {
logger.error(e.getMessage());
Thread.currentThread().interrupt();
}
}
public String name() {
return streamName;
}
public void flushRecords() {
kinesisProducer.flushSync();
}
private static final FutureCallback<UserRecordResult> CALLBACK = new FutureCallback<UserRecordResult>() {
@Override
public void onSuccess(UserRecordResult userRecordResult) {
if (!userRecordResult.isSuccessful()) {
logger.error("Unsuccessful attempt to write to stream: " + formatAttempts(userRecordResult.getAttempts()));
}
}
@Override
public void onFailure(Throwable throwable) {
if (UserRecordFailedException.class.isAssignableFrom(throwable.getClass())) {
UserRecordFailedException e = (UserRecordFailedException) throwable;
logger.error("Error writing to stream: " + formatAttempts(e.getResult().getAttempts()));
}
logger.error("Error writing to stream.", throwable);
}
};
private static String formatAttempts(List<Attempt> attempts) {
StringBuilder builder = new StringBuilder();
for (Attempt attempt : attempts) {
builder.append("[");
builder.append(attempt.getErrorCode()).append(":").append(attempt.getErrorMessage());
builder.append("(").append(attempt.getDelay()).append(",").append(attempt.getDuration()).append(")");
builder.append("]");
}
return builder.toString();
}
}
| 959 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/StdOutPrintOutputWriter.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.*;
public class StdOutPrintOutputWriter extends PrintOutputWriter {
private static final String StdOut = "StdOut";
public StdOutPrintOutputWriter() {
this(System.out, true);
}
private StdOutPrintOutputWriter(Writer out) {
super(StdOut, out);
}
private StdOutPrintOutputWriter(Writer out, boolean autoFlush) {
super(StdOut, out, autoFlush);
}
private StdOutPrintOutputWriter(OutputStream out) {
super(StdOut, out);
}
private StdOutPrintOutputWriter(OutputStream out, boolean autoFlush) {
super(StdOut, out, autoFlush);
}
private StdOutPrintOutputWriter(String fileName) throws FileNotFoundException {
super(fileName);
}
private StdOutPrintOutputWriter(String fileName, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(fileName, csn);
}
private StdOutPrintOutputWriter(File file) throws FileNotFoundException {
super(file);
}
private StdOutPrintOutputWriter(File file, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(file, csn);
}
@Override
public void endCommit() {
flush();
}
@Override
public void close() {
flush();
}
}
| 960 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/OutputWriter.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.Writer;
public interface OutputWriter extends AutoCloseable {
boolean isNewTarget();
String outputId();
void startCommit();
void endCommit();
void print(String s);
Writer writer();
void startOp();
void endOp();
String lineSeparator();
void close() throws Exception;
}
| 961 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/LargeStreamRecordHandlingStrategy.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
public enum LargeStreamRecordHandlingStrategy {
dropAll,
splitAndDrop {
@Override
public boolean allowSplit() {
return true;
}
},
splitAndShred{
@Override
public boolean allowSplit() {
return true;
}
@Override
public boolean allowShred() {
return true;
}
};
public boolean allowSplit() {
return false;
}
public boolean allowShred() {
return false;
}
}
| 962 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/StreamThrottle.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.amazonaws.services.kinesis.producer.KinesisProducer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicLong;
public class StreamThrottle {
private static final Logger logger = LoggerFactory.getLogger(StreamThrottle.class);
private final KinesisProducer kinesisProducer;
private final AtomicLong windowSizeBytes = new AtomicLong();
private volatile long queueHighWatermark = 10000;
private volatile int tumblingWindowSize = 10;
private static final long MAX_QUEUE_HIGH_WATERMARK = 10000;
private static final long QUEUE_SIZE_BYTES = 10000000;
private static final int LENGTH_HIGH_WATERMARK = 900000;
public StreamThrottle(KinesisProducer kinesisProducer) {
this.kinesisProducer = kinesisProducer;
}
public void recalculateMaxBufferSize(long counter, long length) {
long currentWindowSizeBytes = windowSizeBytes.addAndGet(length);
if (length > LENGTH_HIGH_WATERMARK || counter % tumblingWindowSize == 0) {
queueHighWatermark = Math.min(QUEUE_SIZE_BYTES / (currentWindowSizeBytes / tumblingWindowSize), MAX_QUEUE_HIGH_WATERMARK);
logger.trace("Current window has {} records totalling {} bytes, meaning that maxNumberOfQueuedRecords cannot exceed {}", tumblingWindowSize, currentWindowSizeBytes, queueHighWatermark);
windowSizeBytes.set(0);
}
}
public void throttle() throws InterruptedException {
if (kinesisProducer.getOutstandingRecordsCount() > (queueHighWatermark)) {
long start = System.currentTimeMillis();
while (kinesisProducer.getOutstandingRecordsCount() > (queueHighWatermark)) {
Thread.sleep(1);
}
long end = System.currentTimeMillis();
logger.debug("Paused adding records to stream for {} millis while number of queued records exceeded maxNumberOfQueuedRecords of {}", end - start, queueHighWatermark);
}
}
}
| 963 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/Directories.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.amazonaws.services.neptune.cluster.EventId;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.NamedQueriesCollection;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang.StringUtils;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
public class Directories {
public static String fileName(String name, AtomicInteger index) throws UnsupportedEncodingException {
String filename = String.format("%s-%s", name, index.incrementAndGet());
return URLEncoder.encode(filename, StandardCharsets.UTF_8.toString());
}
public static String fileName(String filename) throws UnsupportedEncodingException {
return URLEncoder.encode(filename, StandardCharsets.UTF_8.toString());
}
private static final String CONFIG_FILE = "config.json";
private static final String STATS_FILE = "stats.json";
private static final String LAST_EVENT_ID_FILE = "lastEventId.json";
private static final String QUERIES_FILE = "queries.json";
public static Directories createFor(DirectoryStructure directoryStructure,
File root,
String exportId,
String tag,
String partitionDirectories) throws IOException {
if (root == null) {
throw new IllegalArgumentException("You must supply a directory");
}
Path rootDirectory = root.toPath();
Path directory;
if (StringUtils.isNotEmpty(partitionDirectories)){
directory = rootDirectory;
} else {
String directoryName = tag.isEmpty() ?
exportId :
String.format("%s-%s", tag, exportId);
directory = rootDirectory.resolve(directoryName);
}
Path nodesDirectory = createElementDirectory("nodes", directory, partitionDirectories);
Path edgesDirectory = createElementDirectory("edges", directory, partitionDirectories);
Path statementsDirectory = createElementDirectory("statements", directory, partitionDirectories);
Path resultsDirectory = createElementDirectory("results", directory, partitionDirectories);
Path recordsDirectory = createElementDirectory("records", directory, partitionDirectories);
directoryStructure.createDirectories(
directory,
nodesDirectory,
edgesDirectory,
statementsDirectory,
resultsDirectory,
recordsDirectory);
return new Directories(
directory,
pathOrNull(nodesDirectory),
pathOrNull(edgesDirectory),
pathOrNull(statementsDirectory),
resultsDirectory,
pathOrNull(recordsDirectory),
tag);
}
private static Path pathOrNull(Path path){
if (path.toFile().exists()){
return path;
} else {
return null;
}
}
private static Path createElementDirectory(String name, Path directory, String partitionDirectories){
Path elementDirectory = directory.resolve(name);
if (StringUtils.isNotEmpty(partitionDirectories)){
String[] partitions = partitionDirectories.split("/");
for (String partition : partitions) {
if (StringUtils.isNotEmpty(partition)){
elementDirectory = elementDirectory.resolve(partition);
}
}
}
return elementDirectory;
}
private final String tag;
private final Path directory;
private final Path nodesDirectory;
private final Path edgesDirectory;
private final Path statementsDirectory;
private final Path resultsDirectory;
private final Path recordsDirectory;
private final File directoryFile;
private Directories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory,
String tag) {
this.directory = directory;
this.nodesDirectory = nodesDirectory;
this.edgesDirectory = edgesDirectory;
this.statementsDirectory = statementsDirectory;
this.resultsDirectory = resultsDirectory;
this.recordsDirectory = recordsDirectory;
this.tag = tag;
this.directoryFile = directory.toFile();
}
public void writeRootDirectoryPathAsMessage(String fileType, CommandWriter writer){
writer.writeMessage(fileType + " files : " + directory.toAbsolutePath().toString());
}
public Path writeRootDirectoryPathAsReturnValue(CommandWriter writer){
Path path = directory.toAbsolutePath();
writer.writeReturnValue(path.toString());
return path;
}
public long freeSpaceInGigabytes(){
return directoryFile.getFreeSpace() / 1000000000;
}
public Path rootDirectory() {
return directory.toAbsolutePath();
}
public Collection<Path> subdirectories(){
List<Path> paths = new ArrayList<>();
addIfNotNull(nodesDirectory, paths);
addIfNotNull(edgesDirectory, paths);
addIfNotNull(statementsDirectory, paths);
addIfNotNull(resultsDirectory, paths);
addIfNotNull(recordsDirectory, paths);
return paths;
}
private void addIfNotNull(Path path, List<Path> paths){
if (path != null){
paths.add(path.toAbsolutePath());
}
}
public Path writeConfigFilePathAsReturnValue(CommandWriter writer){
Path path = configFilePath().toAbsolutePath();
writer.writeReturnValue(path.toString());
return path;
}
public void writeResultsDirectoryPathAsMessage(String fileType, CommandWriter writer){
writer.writeMessage(fileType + " files : " + resultsDirectory.toAbsolutePath().toString());
}
public Path createNodesFilePath(String name, FileExtension extension, Label label, boolean perLabelDirectories) {
if (nodesDirectory == null && recordsDirectory != null){
return createFilePath(recordsDirectory, String.format("nodes-%s", name), extension);
} else if (perLabelDirectories){
File labelDirectory = new File(nodesDirectory.toFile(), label.labelsAsString());
if (!labelDirectory.exists()){
synchronized(this){
if (!labelDirectory.exists()){
try {
Files.createDirectories(labelDirectory.toPath());
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to create nodes directory for %s", label.labelsAsString()));
}
}
}
}
return createFilePath(labelDirectory.toPath(), name, extension);
} else {
return createFilePath(nodesDirectory, name, extension);
}
}
public Path createEdgesFilePath(String name, FileExtension extension, Label label, boolean perLabelDirectories){
if (edgesDirectory == null && recordsDirectory != null){
return createFilePath(recordsDirectory, String.format("edges-%s", name), extension);
}
if (perLabelDirectories){
File labelDirectory = new File(edgesDirectory.toFile(), label.labelsAsString());
if (!labelDirectory.exists()){
synchronized(this){
if (!labelDirectory.exists()){
try {
Files.createDirectories(labelDirectory.toPath());
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to create edges directory for %s", label.labelsAsString()));
}
}
}
}
return createFilePath(labelDirectory.toPath(), name, extension);
} else {
return createFilePath(edgesDirectory, name, extension);
}
}
public Path createStatementsFilePath(String name, FileExtension extension){
if (statementsDirectory == null && recordsDirectory != null){
return createFilePath(recordsDirectory, name, extension);
} else {
return createFilePath(statementsDirectory, name, extension);
}
}
public Path createQueryResultsFilePath(String directoryName, String fileName, FileExtension extension){
Path directory = resultsDirectory.resolve(directoryName);
return createFilePath(directory, fileName, extension);
}
public void createResultsSubdirectories(Collection<String> subdirectoryNames) throws IOException {
for (String subdirectoryName : subdirectoryNames) {
Files.createDirectories(resultsDirectory.resolve(subdirectoryName));
}
}
public JsonResource<GraphSchema, Boolean> configFileResource() {
return new JsonResource<>("Config file",
configFilePath().toUri(),
GraphSchema.class);
}
public JsonResource<ExportStats, GraphSchema> statsFileResource() {
return new JsonResource<>("Stats file",
statsFilePath().toUri(),
ExportStats.class);
}
public JsonResource<EventId, Object> lastEventIdFileResource() {
return new JsonResource<>("LastEventId file",
lastEventIdFilePath().toUri(),
EventId.class);
}
public JsonResource<NamedQueriesCollection, Object> queriesResource() {
return new JsonResource<>("Queries file",
queriesFilePath().toUri(),
NamedQueriesCollection.class);
}
public Path createFilePath(Path directory, String name, FileExtension extension) {
String filenameWithoutExtension = tag.isEmpty() ?
name :
String.format("%s-%s", tag, name);
String filename = filenameWithoutExtension.getBytes().length > 250 ?
String.format("%s.%s", DigestUtils.sha1Hex(filenameWithoutExtension), extension.extension()) :
String.format("%s.%s", filenameWithoutExtension, extension.extension());
return directory.resolve(filename);
}
private Path configFilePath() {
return directory.resolve(CONFIG_FILE).toAbsolutePath();
}
private Path statsFilePath() {
return directory.resolve(STATS_FILE).toAbsolutePath();
}
private Path lastEventIdFilePath() {
return directory.resolve(LAST_EVENT_ID_FILE).toAbsolutePath();
}
private Path queriesFilePath() {
return directory.resolve(QUERIES_FILE).toAbsolutePath();
}
public Path debugFilePath(String name) {
return directory.resolve(name + ".txt").toAbsolutePath();
}
}
| 964 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/KinesisConfig.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.amazonaws.services.kinesis.producer.*;
import com.amazonaws.services.neptune.cli.AbstractTargetModule;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class KinesisConfig {
private final Stream stream;
private static final Logger logger = LoggerFactory.getLogger(KinesisConfig.class);
@Deprecated
public KinesisConfig(String streamName, String region, LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy, boolean enableAggregation) {
this(new AbstractTargetModule() {
@Override
protected DirectoryStructure directoryStructure() {
return null;
}
@Override
public String getStreamName() {
return streamName;
}
@Override
public String getRegion() {
return region;
}
@Override
public LargeStreamRecordHandlingStrategy getLargeStreamRecordHandlingStrategy() {
return largeStreamRecordHandlingStrategy;
}
@Override
public boolean isEnableAggregation() {
return enableAggregation;
}
});
}
public KinesisConfig(AbstractTargetModule targetModule) {
if (StringUtils.isNotEmpty(targetModule.getRegion()) && StringUtils.isNotEmpty(targetModule.getStreamName())) {
logger.trace("Constructing new KinesisConfig for stream name: {}, in region: {}, with LargeStreamRecordHandlingStrategy: {} and AggregationEnabled={}",
targetModule.getStreamName(), targetModule.getRegion(), targetModule.getLargeStreamRecordHandlingStrategy(), targetModule.isEnableAggregation());
this.stream = new Stream(
new KinesisProducer(new KinesisProducerConfiguration()
.setAggregationEnabled(targetModule.isEnableAggregation())
.setRegion(targetModule.getRegion())
.setRateLimit(100)
.setConnectTimeout(12000)
.setRequestTimeout(12000)
.setRecordTtl(Integer.MAX_VALUE)
.setCredentialsProvider(targetModule.getCredentialsProvider())
),
targetModule.getStreamName(),
targetModule.getLargeStreamRecordHandlingStrategy());
}
else {
this.stream = null;
}
}
public Stream stream() {
if (stream == null) {
throw new IllegalArgumentException("You must supply an AWS Region and Amazon Kinesis Data Stream name");
}
return stream;
}
}
| 965 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/Target.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Path;
import java.util.function.Supplier;
public enum Target implements CommandWriter {
files {
@Override
public OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException {
File file = pathSupplier.get().toFile();
boolean isNewTarget = !(file.exists());
return new PrintOutputWriter(file.getAbsolutePath(), isNewTarget, new BufferedWriter(new FileWriter(file)));
}
@Override
public boolean isFileBased() {
return true;
}
@Override
public void writeReturnValue(String value) {
System.out.println(value);
}
},
stdout {
@Override
public OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException {
return new StdOutPrintOutputWriter();
}
@Override
public boolean isFileBased() {
return false;
}
@Override
public void writeReturnValue(String value) {
System.err.println(value);
}
},
devnull {
@Override
public OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException {
return new NoOpOutputWriter();
}
@Override
public boolean isFileBased() {
return false;
}
@Override
public void writeReturnValue(String value) {
System.err.println(value);
}
},
stream {
@Override
public OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException {
Path filePath = pathSupplier.get();
File file = filePath.toFile();
return new FileToStreamOutputWriter(
new KinesisStreamPrintOutputWriter(file.getAbsolutePath(), new FileWriter(file)),
filePath,
kinesisConfig);
}
@Override
public boolean isFileBased() {
return false;
}
@Override
public void writeReturnValue(String value) {
System.out.println(value);
}
};
@Override
public void writeMessage(String value) {
System.err.println(value);
}
public abstract OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException;
public abstract boolean isFileBased();
@Override
public abstract void writeReturnValue(String value);
}
| 966 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/NoOpOutputWriter.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.*;
public class NoOpOutputWriter extends PrintOutputWriter {
private static final String NoOp = "NoOp";
public NoOpOutputWriter() {
this(new NoOpOutputStream(), false);
}
private NoOpOutputWriter(Writer out) {
super(NoOp, out);
}
private NoOpOutputWriter(Writer out, boolean autoFlush) {
super(NoOp, out, autoFlush);
}
private NoOpOutputWriter(OutputStream out) {
super(NoOp, out);
}
private NoOpOutputWriter(OutputStream out, boolean autoFlush) {
super(NoOp, out, autoFlush);
}
private NoOpOutputWriter(String fileName) throws FileNotFoundException {
super(fileName);
}
private NoOpOutputWriter(String fileName, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(fileName, csn);
}
private NoOpOutputWriter(File file) throws FileNotFoundException {
super(file);
}
private NoOpOutputWriter(File file, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(file, csn);
}
@Override
public void endCommit() {
flush();
}
@Override
public void close() {
flush();
}
private static class NoOpOutputStream extends OutputStream{
@Override
public void write(int b) throws IOException {
// Do nothing
}
@Override
public void write(byte[] b) throws IOException {
// Do nothing
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
// Do nothing
}
}
}
| 967 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/PrintOutputWriter.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.*;
public class PrintOutputWriter extends PrintWriter implements OutputWriter {
private final String outputId;
private final boolean isNewTarget;
public PrintOutputWriter(String outputId, Writer out) {
this(outputId, true, out);
}
public PrintOutputWriter(String outputId, boolean isNewTarget, Writer out) {
super(out);
this.outputId = outputId;
this.isNewTarget = isNewTarget;
}
PrintOutputWriter(String outputId, Writer out, boolean autoFlush) {
super(out, autoFlush);
this.outputId = outputId;
this.isNewTarget = false;
}
PrintOutputWriter(String outputId, OutputStream out) {
super(out);
this.outputId = outputId;
this.isNewTarget = false;
}
PrintOutputWriter(String outputId, OutputStream out, boolean autoFlush) {
super(out, autoFlush);
this.outputId = outputId;
this.isNewTarget = false;
}
PrintOutputWriter(String fileName) throws FileNotFoundException {
super(fileName);
this.outputId = fileName;
this.isNewTarget = false;
}
PrintOutputWriter(String fileName, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(fileName, csn);
this.outputId = fileName;
this.isNewTarget = false;
}
PrintOutputWriter(File file) throws FileNotFoundException {
super(file);
this.outputId = file.getAbsolutePath();
this.isNewTarget = false;
}
PrintOutputWriter(File file, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(file, csn);
this.outputId = file.getAbsolutePath();
this.isNewTarget = false;
}
@Override
public boolean isNewTarget() {
return isNewTarget;
}
@Override
public String outputId() {
return outputId;
}
@Override
public void startCommit() {
// Do nothing
}
@Override
public void endCommit() {
flush();
}
@Override
public Writer writer() {
return this;
}
@Override
public void startOp() {
// Do nothing
}
@Override
public void endOp() {
// Do nothing
}
@Override
public String lineSeparator() {
return System.lineSeparator();
}
@Override
public void close() {
super.flush();
super.close();
}
}
| 968 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/StatusOutputFormat.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
public enum StatusOutputFormat {
None,
Dot,
Description
}
| 969 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/KinesisStreamPrintOutputWriter.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.Writer;
public class KinesisStreamPrintOutputWriter extends PrintOutputWriter {
private int opCount;
private static final String LINE_SEPARATOR = "";
KinesisStreamPrintOutputWriter(String outputId, Writer out) {
super(outputId, out);
}
@Override
public void startCommit() {
opCount = 0;
write("[");
}
@Override
public void endCommit() {
write("]");
write(System.lineSeparator());
}
@Override
public Writer writer() {
return this;
}
@Override
public String lineSeparator(){
return LINE_SEPARATOR;
}
@Override
public void startOp() {
if (opCount > 0) {
write(",");
}
opCount++;
}
@Override
public void endOp(){
}
}
| 970 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PrinterOptionsModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.TokenPrefix;
import com.amazonaws.services.neptune.propertygraph.io.CsvPrinterOptions;
import com.amazonaws.services.neptune.propertygraph.io.JsonPrinterOptions;
import com.amazonaws.services.neptune.propertygraph.io.PrinterOptions;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class PrinterOptionsModule {
@Option(name = {"--exclude-type-definitions"}, description = "Exclude type definitions from CSV column headers (optional, default 'false').")
@Once
private boolean excludeTypeDefinitions = false;
@Option(name = {"--escape-csv-headers"}, description = "Escape characters in CSV column headers (optional, default 'false').")
@Once
private boolean escapeCsvHeaders = false;
@Option(name = {"--strict-cardinality"}, description = "Format all set and list cardinality properties as arrays in JSON, including properties with a single value (optional, default 'false').")
@Once
private boolean strictCardinality = false;
@Option(name = {"--escape-newline"}, description = "Escape newline characters in CSV files (optional, default 'false').")
@Once
private boolean escapeNewline = false;
@Option(name = {"--multi-value-separator"}, description = "Separator for multi-value properties in CSV output (optional, default ';').")
@Once
private String multiValueSeparator = ";";
@Option(name = {"--token-prefix"}, description = "Token prefix (optional, default '~').")
@Once
private String tokenPrefix = "~";
public PrinterOptions config(){
CsvPrinterOptions csvPrinterOptions = CsvPrinterOptions.builder()
.setMultiValueSeparator(multiValueSeparator)
.setIncludeTypeDefinitions(!excludeTypeDefinitions)
.setEscapeCsvHeaders(escapeCsvHeaders)
.setEscapeNewline(escapeNewline)
.setTokenPrefix(new TokenPrefix(tokenPrefix))
.build();
JsonPrinterOptions jsonPrinterOptions = JsonPrinterOptions.builder()
.setStrictCardinality(strictCardinality)
.setTokenPrefix(new TokenPrefix(tokenPrefix))
.build();
return new PrinterOptions(csvPrinterOptions, jsonPrinterOptions);
}
}
| 971 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphRangeModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.RangeConfig;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class PropertyGraphRangeModule {
@Option(name = {"-r", "--range", "--range-size"}, description = "Number of items to fetch per request (optional).")
@Once
private long rangeSize = -1;
@Option(name = {"--limit"}, description = "Maximum number of items to export (optional).")
@Once
private long limit = Long.MAX_VALUE;
@Option(name = {"--skip"}, description = "Number of items to skip (optional).")
@Once
private long skip = 0;
@Option(name = {"--approx-node-count"}, description = "Approximate number of nodes in the graph.")
@Once
private long approxNodeCount = -1;
@Option(name = {"--approx-edge-count"}, description = "Approximate number of edges in the graph.")
@Once
private long approxEdgeCount = -1;
public RangeConfig config(){
return new RangeConfig(rangeSize, skip, limit, approxNodeCount, approxEdgeCount);
}
}
| 972 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphSchemaInferencingModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.SchemaSamplingSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.CreateGraphSchemaCommand;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import java.util.Collection;
public class PropertyGraphSchemaInferencingModule {
@Option(name = {"--sample"}, description = "Select only a subset of nodes and edges when generating schema.")
@Once
private boolean sample = false;
@Option(name = {"--sample-size"}, description = "Schema sample size (optional, default 1000).")
@Once
private long sampleSize = 1000;
public PropertyGraphSchemaInferencingModule(){
}
public boolean isFullScan(){
return !sample;
}
public CreateGraphSchemaCommand createSchemaCommand(Collection<ExportSpecification> exportSpecifications,
GraphTraversalSource g){
return new SchemaSamplingSpecification(sample, sampleSize).createSchemaCommand(exportSpecifications, g);
}
}
| 973 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphConcurrencyModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class PropertyGraphConcurrencyModule {
@Option(name = {"-cn", "--concurrency"}, description = "Concurrency – the number of parallel queries used to run the export (optional, default 4).")
@Once
private int concurrency = 4;
public ConcurrencyConfig config(){
return config(true);
}
public ConcurrencyConfig config(boolean allowConcurrentOperations){
return new ConcurrencyConfig(allowConcurrentOperations ? concurrency : 1);
}
}
| 974 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/FeatureToggleModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import java.util.Collection;
import java.util.HashSet;
public class FeatureToggleModule {
@Option(name = {"--feature-toggle"}, description = "Name of a feature to enable.", hidden = true)
@AllowedEnumValues(FeatureToggle.class)
private Collection<FeatureToggle> featureToggles = new HashSet<>();
public FeatureToggles featureToggles() {
return new FeatureToggles(featureToggles);
}
}
| 975 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphSerializationModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.NeptuneGremlinClient;
import com.amazonaws.services.neptune.propertygraph.io.SerializationConfig;
import com.amazonaws.services.neptune.propertygraph.schema.TokensOnly;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import com.github.rvesse.airline.annotations.restrictions.AllowedValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.tinkerpop.gremlin.driver.ser.Serializers;
public class PropertyGraphSerializationModule {
@Option(name = {"--serializer"}, description = "Message serializer – (optional, default 'GRAPHBINARY_V1D0').")
@AllowedEnumValues(Serializers.class)
@Once
private String serializer = Serializers.GRAPHBINARY_V1D0.name();
@Option(name = {"--janus"}, description = "Use JanusGraph serializer.")
@Once
private boolean useJanusSerializer = false;
@Option(name = {"--max-content-length"}, description = "Max content length (optional, default 50000000).")
@Once
private int maxContentLength = 50000000;
@Option(name = {"-b", "--batch-size"}, description = "Batch size (optional, default 64). Reduce this number if your queries trigger CorruptedFrameExceptions.")
@Once
private int batchSize = NeptuneGremlinClient.DEFAULT_BATCH_SIZE;
public SerializationConfig config(){
return new SerializationConfig(serializer, maxContentLength, batchSize, useJanusSerializer);
}
}
| 976 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/CommonConnectionModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.AmazonNeptune;
import com.amazonaws.services.neptune.cluster.ConnectionConfig;
import com.amazonaws.services.neptune.cluster.NeptuneClusterMetadata;
import com.amazonaws.services.neptune.cluster.ProxyConfig;
import com.amazonaws.services.neptune.export.EndpointValidator;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.*;
import org.apache.commons.lang.StringUtils;
import javax.inject.Inject;
import java.util.Collection;
import java.util.HashSet;
import java.util.function.Supplier;
public class CommonConnectionModule {
@Inject
private CredentialProfileModule credentialProfileModule = new CredentialProfileModule();
@Option(name = {"-e", "--endpoint"}, description = "Neptune endpoint(s) – supply multiple instance endpoints if you want to load balance requests across a cluster.", title = "endpoint")
@RequireSome(tag = "endpoint or clusterId")
private Collection<String> endpoints = new HashSet<>();
@Option(name = {"--cluster-id", "--cluster", "--clusterid"}, description = "ID of an Amazon Neptune cluster. If you specify a cluster ID, neptune-export will use all of the instance endpoints in the cluster in addition to any endpoints you have specified using the endpoint options.")
@Once
@RequireSome(tag = "endpoint or clusterId")
private String clusterId;
@Option(name = {"-p", "--port"}, description = "Neptune port (optional, default 8182).")
@Port(acceptablePorts = {PortType.SYSTEM, PortType.USER})
@Once
private int port = 8182;
@Option(name = {"--use-iam-auth"}, description = "Use IAM database authentication to authenticate to Neptune (remember to set the SERVICE_REGION environment variable).")
@Once
private boolean useIamAuth = false;
@Option(name = {"--use-ssl"}, description = "Enables connectivity over SSL. This option is deprecated: neptune-export will always connect via SSL unless you use --disable-ssl to explicitly disable connectivity over SSL.")
@Once
private boolean useSsl = true;
@Option(name = {"--disable-ssl"}, description = "Disables connectivity over SSL.")
@Once
private boolean disableSsl = false;
@Option(name = {"--nlb-endpoint"}, description = "Network load balancer endpoint (optional: use only if connecting to an IAM DB enabled Neptune cluster through a network load balancer (NLB) – see https://github.com/aws-samples/aws-dbs-refarch-graph/tree/master/src/connecting-using-a-load-balancer#connecting-to-amazon-neptune-from-clients-outside-the-neptune-vpc-using-aws-network-load-balancer).")
@Once
@MutuallyExclusiveWith(tag = "proxy-endpoint")
private String networkLoadBalancerEndpoint;
@Option(name = {"--alb-endpoint"}, description = "Application load balancer endpoint (optional: use only if connecting to an IAM DB enabled Neptune cluster through an application load balancer (ALB) – see https://github.com/aws-samples/aws-dbs-refarch-graph/tree/master/src/connecting-using-a-load-balancer#connecting-to-amazon-neptune-from-clients-outside-the-neptune-vpc-using-aws-application-load-balancer).")
@Once
@MutuallyExclusiveWith(tag = "proxy-endpoint")
private String applicationLoadBalancerEndpoint;
@Option(name = {"--lb-port"}, description = "Load balancer port (optional, default 80).")
@Port(acceptablePorts = {PortType.SYSTEM, PortType.USER})
@Once
private int loadBalancerPort = 80;
@Option(name = {"--proxy-endpoint"}, description = "Proxy endpoint (optional: use only if connecting to an IAM DB enabled Neptune cluster through a proxy such as a bastion host).")
@Once
@MutuallyExclusiveWith(tag = "proxy-endpoint")
private String proxyEndpoint;
@Option(name = {"--proxy-port"}, description = "Proxy port (optional, default 8182).")
@Port(acceptablePorts = {PortType.SYSTEM, PortType.USER})
@Once
private int proxyPort = 8182;
@Option(name = {"--proxy-remove-host-header"}, description = "Remove Host header after Sigv4 signing request to be forwarded via proxy.")
@Port(acceptablePorts = {PortType.SYSTEM, PortType.USER})
@Once
private boolean removeProxyHostHeader = false;
private final Supplier<AmazonNeptune> amazonNeptuneClientSupplier;
public CommonConnectionModule(Supplier<AmazonNeptune> amazonNeptuneClientSupplier) {
this.amazonNeptuneClientSupplier = amazonNeptuneClientSupplier;
}
public NeptuneClusterMetadata clusterMetadata(){
if (StringUtils.isNotEmpty(clusterId)) {
return NeptuneClusterMetadata.createFromClusterId(clusterId, amazonNeptuneClientSupplier);
} else {
return NeptuneClusterMetadata.createFromEndpoints(endpoints, amazonNeptuneClientSupplier);
}
}
public ConnectionConfig config() {
if (StringUtils.isNotEmpty(clusterId)) {
endpoints.addAll(clusterMetadata().endpoints());
}
if (endpoints.isEmpty()) {
throw new IllegalStateException("You must supply a cluster ID or one or more endpoints");
}
ProxyConfig proxyConfig = null;
if (StringUtils.isNotEmpty(networkLoadBalancerEndpoint)) {
proxyConfig = new ProxyConfig(networkLoadBalancerEndpoint, loadBalancerPort, false);
} else if (StringUtils.isNotEmpty(applicationLoadBalancerEndpoint)) {
proxyConfig = new ProxyConfig(applicationLoadBalancerEndpoint, loadBalancerPort, true);
} else if (StringUtils.isNotEmpty(proxyEndpoint)) {
proxyConfig = new ProxyConfig(proxyEndpoint, proxyPort, removeProxyHostHeader);
}
return new ConnectionConfig(
clusterId,
EndpointValidator.validate(endpoints),
port,
useIamAuth,
!disableSsl,
proxyConfig,
credentialProfileModule.getCredentialsProvider()
);
}
}
| 977 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/GraphSchemaProviderModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.MutuallyExclusiveWith;
import org.apache.commons.lang.StringUtils;
import java.io.IOException;
import java.net.URI;
public class GraphSchemaProviderModule {
@Option(name = {"-c", "--config-file", "--filter-config-file"}, description = "Path to JSON schema config file (file path, or 'https' or 's3' URI).")
@MutuallyExclusiveWith(tag = "configFile or config")
private URI configFile;
@Option(name = {"--config", "--filter"}, description = "JSON schema for property graph.")
@MutuallyExclusiveWith(tag = "configFile or config")
private String configJson;
private final boolean configIsMandatory;
public GraphSchemaProviderModule(boolean configIsMandatory) {
this.configIsMandatory = configIsMandatory;
}
public GraphSchema graphSchema() throws IOException {
if (configFile != null) {
JsonResource<GraphSchema, Boolean> configFileResource = new JsonResource<>(
"Config file",
configFile,
GraphSchema.class);
return configFileResource.get();
} else {
if (StringUtils.isEmpty(configJson)){
if (configIsMandatory){
throw new IllegalStateException("You must supply either a configuration file URI or inline configuration JSON");
}
return new GraphSchema();
}
return GraphSchema.fromJson(new ObjectMapper().readTree(configJson));
}
}
}
| 978 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/ProfilesModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.profiles.ProfilesConfig;
import com.github.rvesse.airline.annotations.Option;
import java.util.Collection;
import java.util.HashSet;
public class ProfilesModule {
@Option(name = {"--profile"}, description = "Name of an export profile.")
private Collection<String> profiles = new HashSet<>();
public ProfilesConfig config() {
return new ProfilesConfig(profiles);
}
}
| 979 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/RdfTargetModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.io.*;
import com.amazonaws.services.neptune.rdf.io.RdfExportFormat;
import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.*;
public class RdfTargetModule extends AbstractTargetModule {
@Option(name = {"--format"}, description = "Output format (optional, default 'turtle').")
@Once
@AllowedEnumValues(RdfExportFormat.class)
private RdfExportFormat format = RdfExportFormat.turtle;
public RdfTargetConfig config(Directories directories) {
return new RdfTargetConfig(directories,
new KinesisConfig(this),
getOutput(), format);
}
@Override
protected DirectoryStructure directoryStructure(){
if (format == RdfExportFormat.neptuneStreamsSimpleJson){
return DirectoryStructure.SimpleStreamsOutput;
} else {
return DirectoryStructure.Rdf;
}
}
}
| 980 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/AwsCliModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.neptune.AmazonNeptune;
import com.amazonaws.services.neptune.AmazonNeptuneClientBuilder;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.commons.lang.StringUtils;
import javax.inject.Inject;
import java.util.function.Supplier;
public class AwsCliModule implements Supplier<AmazonNeptune> {
@Inject
private CredentialProfileModule credentialProfileModule = new CredentialProfileModule();
@Option(name = {"--aws-cli-endpoint-url"}, description = "AWS CLI endpoint URL.", hidden = true)
@Once
private String awsCliEndpointUrl;
@Option(name = {"--aws-cli-region"}, description = "AWS CLI region.", hidden = true)
@Once
private String awsCliRegion;
@Override
public AmazonNeptune get() {
AmazonNeptuneClientBuilder builder = AmazonNeptuneClientBuilder.standard();
if (StringUtils.isNotEmpty(awsCliEndpointUrl) && StringUtils.isNotEmpty(awsCliRegion)) {
builder = builder.withEndpointConfiguration(
new AwsClientBuilder.EndpointConfiguration(awsCliEndpointUrl, awsCliRegion)
);
}
if (credentialProfileModule.getCredentialsProvider() != null) {
builder = builder
.withCredentials(credentialProfileModule.getCredentialsProvider())
.withRegion(credentialProfileModule.getRegionProvider().getRegion());
}
return builder.build();
}
}
| 981 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/NeptuneStreamsModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.cluster.*;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class NeptuneStreamsModule {
@Option(name = {"--include-last-event-id"}, description = "Get the last event ID from the Amazon Neptune stream, if enabled, and save it to a JSON file (optional, default 'false').")
@Once
private boolean includeLastEventId = false;
public GetLastEventIdStrategy lastEventIdStrategy(Cluster cluster, JsonResource<EventId, Object> eventIdResource){
if (includeLastEventId){
return new GetLastEventIdTask(cluster, eventIdResource);
} else {
return new DoNotGetLastEventIdTask();
}
}
}
| 982 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphScopeModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.*;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.amazonaws.services.neptune.propertygraph.schema.TokensOnly;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
public class PropertyGraphScopeModule {
@Option(name = {"-nl", "--node-label"}, description = "Labels of nodes to be included in config (optional, default all labels).",
arity = 1)
private List<String> nodeLabels = new ArrayList<>();
@Option(name = {"-el", "--edge-label"}, description = "Labels of edges to be included in config (optional, default all labels).",
arity = 1)
private List<String> edgeLabels = new ArrayList<>();
@Option(name = {"-s", "--scope"}, description = "Scope (optional, default 'all').")
@Once
@AllowedEnumValues(Scope.class)
private Scope scope = Scope.all;
@Option(name = {"--tokens-only"}, description = "Export tokens (~id, ~label, ~from, ~to) only (optional, default 'off').")
@Once
@AllowedEnumValues(TokensOnly.class)
private TokensOnly tokensOnly = TokensOnly.off;
@Option(name = {"--edge-label-strategy"}, description = "Export edges by their edge labels, or by a combination of their start vertex label, edge label, and end vertex label (optional, default 'edgeLabelsOnly').")
@Once
@AllowedEnumValues(EdgeLabelStrategy.class)
private EdgeLabelStrategy edgeLabelStrategy = EdgeLabelStrategy.edgeLabelsOnly;
public Collection<ExportSpecification> exportSpecifications(ExportStats stats,
GremlinFilters gremlinFilters,
FeatureToggles featureToggles){
return exportSpecifications(new GraphSchema(), gremlinFilters, stats, featureToggles);
}
public Collection<ExportSpecification> exportSpecifications(GraphSchema graphSchema,
GremlinFilters gremlinFilters,
ExportStats stats,
FeatureToggles featureToggles){
return scope.exportSpecifications(
graphSchema,
Label.forLabels(nodeLabels),
Label.forLabels(edgeLabels),
gremlinFilters,
tokensOnly,
edgeLabelStrategy,
stats,
featureToggles);
}
}
| 983 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/AbstractTargetModule.java
|
package com.amazonaws.services.neptune.cli;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.neptune.io.CommandWriter;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.io.DirectoryStructure;
import com.amazonaws.services.neptune.io.LargeStreamRecordHandlingStrategy;
import com.amazonaws.services.neptune.io.Target;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import com.github.rvesse.airline.annotations.restrictions.PathKind;
import com.github.rvesse.airline.annotations.restrictions.Required;
import org.apache.commons.lang.StringUtils;
import javax.inject.Inject;
import java.io.File;
import java.io.IOException;
import java.util.UUID;
import static com.amazonaws.services.neptune.util.AWSCredentialsUtil.getSTSAssumeRoleCredentialsProvider;
public abstract class AbstractTargetModule implements CommandWriter {
@Inject
private CredentialProfileModule credentialProfileModule = new CredentialProfileModule();
@Option(name = {"-d", "--dir"}, description = "Root directory for output.")
@Required
@com.github.rvesse.airline.annotations.restrictions.Path(mustExist = false, kind = PathKind.DIRECTORY)
@Once
private File directory;
@Option(name = {"-t", "--tag"}, description = "Directory prefix (optional).")
@Once
private String tag = "";
@Option(name = {"-o", "--output"}, description = "Output target (optional, default 'file').")
@Once
@AllowedEnumValues(Target.class)
private Target output = Target.files;
@Option(name = {"--stream-name"}, description = "Name of an Amazon Kinesis Data Stream.")
@Once
private String streamName;
@Option(name = {"--region", "--stream-region"}, description = "AWS Region in which your Amazon Kinesis Data Stream is located.")
@Once
private String region;
@Option(name = {"--stream-large-record-strategy"}, description = "Strategy for dealing with records to be sent to Amazon Kinesis that are larger than 1 MB.")
@Once
@AllowedEnumValues(LargeStreamRecordHandlingStrategy.class)
private LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy = LargeStreamRecordHandlingStrategy.splitAndShred;
@Option(name = {"--disable-stream-aggregation"}, description = "Disable aggregation of Kinesis Data Stream records).")
@Once
private boolean disableAggregation = false;
@Option(name = {"--stream-role-arn"}, description = "Optional. Assume specified role for upload to Kinesis stream.")
@Once
private String streamRoleArn = null;
@Option(name = {"--stream-role-session-name"}, description = "Optional. To be used with '--stream-role-arn'. Use specified session name when assuming stream role.")
@Once
private String streamRoleSessionName = "Neptune-Export";
@Option(name = {"--stream-role-external-id"}, description = "Optional. To be used with '--stream-role-arn'. Use specified external id when assuming stream role.")
@Once
private String streamRoleExternalId = null;
@Option(name = {"--export-id"}, description = "Export ID")
@Once
private String exportId = UUID.randomUUID().toString().replace("-", "");
@Option(name = {"--partition-directories"}, description = "Partition directory path (e.g. 'year=2021/month=07/day=21').")
@Once
private String partitionDirectories = "";
public AbstractTargetModule() {}
public AbstractTargetModule(Target target) {
this.output = target;
}
public File getDirectory() {
return directory;
}
public String getTag() {
return tag;
}
public Target getOutput() {
return output;
}
public String getStreamName() {
return streamName;
}
public String getRegion() {
return region;
}
public LargeStreamRecordHandlingStrategy getLargeStreamRecordHandlingStrategy() {
return largeStreamRecordHandlingStrategy;
}
public boolean isEnableAggregation() {
return !disableAggregation;
}
public Directories createDirectories() throws IOException {
return Directories.createFor(directoryStructure(), directory, exportId, tag, partitionDirectories );
}
public Directories createDirectories(DirectoryStructure directoryStructure) throws IOException {
return Directories.createFor(directoryStructure, directory, exportId, tag, partitionDirectories );
}
@Override
public void writeReturnValue(String value){
output.writeReturnValue(value);
}
@Override
public void writeMessage(String value) {
output.writeMessage(value);
}
protected abstract DirectoryStructure directoryStructure();
public AWSCredentialsProvider getCredentialsProvider() {
if (StringUtils.isEmpty(streamRoleArn)) {
return credentialProfileModule.getCredentialsProvider();
}
return getSTSAssumeRoleCredentialsProvider(streamRoleArn, streamRoleSessionName, streamRoleExternalId, credentialProfileModule.getCredentialsProvider(), region);
}
}
| 984 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/GremlinFiltersModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.GremlinFilters;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class GremlinFiltersModule {
@Option(name = {"--gremlin-node-filter"}, description = "Gremlin steps for filtering nodes (overrides --gremlin-filter).")
@Once
private String gremlinNodeFilter;
@Option(name = {"--gremlin-edge-filter"}, description = "Gremlin steps for filtering edges (overrides --gremlin-filter).")
@Once
private String gremlinEdgeFilter;
@Option(name = {"--gremlin-filter"}, description = "Gremlin steps for filtering nodes and edges.")
@Once
private String gremlinFilter;
@Option(name = {"--filter-edges-early"}, description = "Configures edge exports to apply all filters to the " +
"traversal before adding range() steps for concurrency. Results in faster exports for simple fast filters which remove most results.")
@Once
private boolean filterEdgesEarly = false;
public GremlinFilters filters(){
return new GremlinFilters(gremlinFilter, gremlinNodeFilter, gremlinEdgeFilter, filterEdgesEarly);
}
}
| 985 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/RdfExportScopeModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.rdf.*;
import com.amazonaws.services.neptune.rdf.io.*;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.commons.lang.StringUtils;
public class RdfExportScopeModule {
@Option(name = {"--rdf-export-scope"}, description = "Export scope (optional, default 'graph').")
@Once
@AllowedEnumValues(RdfExportScope.class)
private RdfExportScope scope = RdfExportScope.graph;
@Option(name = {"--sparql"}, description = "SPARQL query.")
@Once
private String query;
public ExportRdfJob createJob(NeptuneSparqlClient client, RdfTargetConfig targetConfig){
if (scope == RdfExportScope.graph){
return new ExportRdfGraphJob(client, targetConfig);
} else if (scope == RdfExportScope.edges){
return new ExportRdfEdgesJob(client, targetConfig);
} else if (scope == RdfExportScope.query){
if (StringUtils.isEmpty(query)){
throw new IllegalStateException("You must supply a SPARQL query if exporting from a query");
}
return new ExportRdfFromQuery(client, targetConfig, query);
}
throw new IllegalStateException(String.format("Unknown export scope: %s", scope));
}
public String scope(){
return scope.name();
}
}
| 986 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/CloneClusterModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.cluster.*;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import com.github.rvesse.airline.annotations.restrictions.ranges.IntegerRange;
public class CloneClusterModule {
@Option(name = {"--clone-cluster"}, description = "Clone an Amazon Neptune cluster.")
@Once
private boolean cloneCluster = false;
@Option(name = {"--clone-cluster-instance-type"}, description = "Instance type for cloned cluster (by default neptune-export will use the same instance type as the source cluster).")
@Once
@AllowedValues(allowedValues = {
"db.r4.large",
"db.r4.xlarge",
"db.r4.2xlarge",
"db.r4.4xlarge",
"db.r4.8xlarge",
"db.r5.large",
"db.r5.xlarge",
"db.r5.2xlarge",
"db.r5.4xlarge",
"db.r5.8xlarge",
"db.r5.12xlarge",
"db.r5.16xlarge",
"db.r5.24xlarge",
"db.r5d.large",
"db.r5d.xlarge",
"db.r5d.2xlarge",
"db.r5d.4xlarge",
"db.r5d.8xlarge",
"db.r5d.12xlarge",
"db.r5d.16xlarge",
"db.r5d.24xlarge",
"db.r6g.large",
"db.r6g.xlarge",
"db.r6g.2xlarge",
"db.r6g.4xlarge",
"db.r6g.8xlarge",
"db.r6g.12xlarge",
"db.r6g.16xlarge",
"db.x2g.large",
"db.x2g.xlarge",
"db.x2g.2xlarge",
"db.x2g.4xlarge",
"db.x2g.8xlarge",
"db.x2g.12xlarge",
"db.x2g.16xlarge",
"db.t3.medium",
"db.t4g.medium",
"r4.large",
"r4.xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r5.large",
"r5.xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5d.large",
"r5d.xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r6g.large",
"r6g.xlarge",
"r6g.2xlarge",
"r6g.4xlarge",
"r6g.8xlarge",
"r6g.12xlarge",
"r6g.16xlarge",
"x2g.large",
"x2g.xlarge",
"x2g.2xlarge",
"x2g.4xlarge",
"x2g.8xlarge",
"x2g.12xlarge",
"x2g.16xlarge",
"t3.medium",
"t4g.medium"})
private String cloneClusterInstanceType;
@Option(name = {"--clone-cluster-replica-count"}, description = "Number of read replicas to add to the cloned cluster (default, 0).")
@Once
@IntegerRange(min = 0, minInclusive = true, max = 15, maxInclusive = true)
private int replicaCount = 0;
@Option(name = {"--clone-cluster-max-concurrency"}, description = "Limits concurrency when exporting from cloned cluster (default, no limit).", hidden = true)
@Once
private int maxConcurrency = -1;
@Option(name = {"--clone-cluster-engine-version"}, description = "Cloned cluster Neptune engine version (default, latest).", hidden = true)
@Once
private String engineVersion;
@Option(name = {"--clone-cluster-correlation-id"}, description = "Correlation ID to be added to a correlation-id tag on the cloned cluster.")
@Once
private String cloneCorrelationId;
public CloneClusterModule() {
}
public Cluster cloneCluster(NeptuneClusterMetadata clusterMetadata,
ConnectionConfig connectionConfig,
ConcurrencyConfig concurrencyConfig,
FeatureToggles featureToggles) throws Exception {
clusterMetadata.printDetails();
if (cloneCluster) {
if (featureToggles.containsFeature(FeatureToggle.Simulate_Cloned_Cluster)) {
return new SimulatedCloneCluster(clusterMetadata).cloneCluster(connectionConfig, concurrencyConfig);
} else {
CloneCluster command = new CloneCluster(
clusterMetadata,
cloneClusterInstanceType,
replicaCount,
maxConcurrency,
engineVersion,
cloneCorrelationId);
return command.cloneCluster(connectionConfig, concurrencyConfig);
}
} else {
return new DoNotCloneCluster(clusterMetadata).cloneCluster(connectionConfig, concurrencyConfig);
}
}
}
| 987 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/CredentialProfileModule.java
|
package com.amazonaws.services.neptune.cli;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.regions.AwsProfileRegionProvider;
import com.amazonaws.regions.AwsRegionProvider;
import com.amazonaws.regions.AwsRegionProviderChain;
import com.amazonaws.regions.DefaultAwsRegionProviderChain;
import com.amazonaws.services.neptune.util.AWSCredentialsUtil;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.commons.lang.StringUtils;
public class CredentialProfileModule {
@Option(name = {"--credentials-profile"}, description = "Use profile from credentials config file.", hidden = true)
@Once
private String credentialsProfile;
@Option(name = {"--credentials-config-file"}, description = "Load credentials profile from specified config file.", hidden = true)
@Once
private String credentialsConfigFilePath;
public AWSCredentialsProvider getCredentialsProvider() {
return AWSCredentialsUtil.getProfileCredentialsProvider(credentialsProfile, credentialsConfigFilePath);
}
public AwsRegionProvider getRegionProvider() {
if(StringUtils.isEmpty(credentialsProfile)) {
return new DefaultAwsRegionProviderChain();
}
return new AwsRegionProviderChain(new AwsProfileRegionProvider(credentialsProfile), new DefaultAwsRegionProviderChain());
}
}
| 988 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphTargetModule.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.io.*;
import com.amazonaws.services.neptune.propertygraph.io.PrinterOptions;
import com.amazonaws.services.neptune.propertygraph.io.PropertyGraphExportFormat;
import com.amazonaws.services.neptune.propertygraph.io.PropertyGraphTargetConfig;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.*;
public class PropertyGraphTargetModule extends AbstractTargetModule {
@Option(name = {"--format"}, description = "Output format (optional, default 'csv').")
@Once
@AllowedEnumValues(PropertyGraphExportFormat.class)
private PropertyGraphExportFormat format = PropertyGraphExportFormat.csv;
@Option(name = {"--merge-files"}, description = "Merge files for each vertex or edge label (currently only supports CSV files for export-pg).")
@Once
private boolean mergeFiles = false;
@Option(name = {"--per-label-directories"}, description = "Create a subdirectory for each distinct vertex or edge label.")
@Once
private boolean perLabelDirectories = false;
public PropertyGraphTargetModule() {
}
public PropertyGraphTargetModule(Target target) {
super(target);
}
public PropertyGraphTargetConfig config(Directories directories, PrinterOptions printerOptions){
if (mergeFiles && (format != PropertyGraphExportFormat.csv && format != PropertyGraphExportFormat.csvNoHeaders)){
throw new IllegalArgumentException("Merge files is only supported for CSV formats for export-pg");
}
KinesisConfig kinesisConfig = new KinesisConfig(this);
return new PropertyGraphTargetConfig(directories, kinesisConfig, printerOptions, format, getOutput(), mergeFiles, perLabelDirectories, true);
}
public String description(){
return format.description();
}
@Override
protected DirectoryStructure directoryStructure(){
if (format == PropertyGraphExportFormat.neptuneStreamsSimpleJson){
return DirectoryStructure.SimpleStreamsOutput;
} else {
return DirectoryStructure.PropertyGraph;
}
}
}
| 989 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/SpecifiedLabels.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import org.apache.tinkerpop.gremlin.process.traversal.Traversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Element;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*;
public class SpecifiedLabels implements LabelsFilter {
private final Collection<Label> labels;
private final LabelStrategy labelStrategy;
public SpecifiedLabels(Collection<Label> labels, LabelStrategy labelStrategy) {
this.labels = labels;
this.labelStrategy = labelStrategy;
}
@Override
public GraphTraversal<? extends Element, ?> apply(GraphTraversal<? extends Element, ?> traversal, FeatureToggles featureToggles, GraphElementType graphElementType) {
boolean simpleEdgeLabels = graphElementType == GraphElementType.edges &&
labels.stream().allMatch(l -> !l.hasFromLabels() && !l.hasToLabels());
if (simpleEdgeLabels || featureToggles.containsFeature(FeatureToggle.ExportByIndividualLabels)) {
List<String> labelList = labels.stream()
.flatMap((Function<Label, Stream<String>>) label -> label.labels().stream())
.collect(Collectors.toList());
String firstLabel = labelList.stream().findFirst().orElseThrow(() -> new IllegalStateException("No labels specified"));
String[] remainingLabels = labelList.stream()
.skip(1)
.collect(Collectors.toList())
.toArray(new String[]{});
return traversal.hasLabel(firstLabel, remainingLabels);
} else {
if (labels.size() > 1) {
List<Traversal<?, ?>> traversals = new ArrayList<>();
for (Label label : labels) {
traversals.add(createFilterForLabel(label, null));
}
traversal = traversal.or(traversals.toArray(new Traversal<?, ?>[]{}));
} else {
Label label = labels.iterator().next();
traversal = createFilterForLabel(label, traversal);
}
return traversal;
}
}
private GraphTraversal<? extends Element, ?> createFilterForLabel(Label label, GraphTraversal<? extends Element, ?> t) {
for (String labelValue : label.labels()) {
if (t == null) {
t = hasLabel(labelValue);
} else {
t = t.hasLabel(labelValue);
}
}
if (labelStrategy == EdgeLabelStrategy.edgeAndVertexLabels) {
if (label.hasFromAndToLabels()) {
List<Traversal<?, ?>> traversals = new ArrayList<>();
GraphTraversal<? extends Element, ?> startVertex = outV();
startVertex = createFilterForLabel(label.fromLabels(), startVertex);
traversals.add(startVertex);
GraphTraversal<? extends Element, ?> endVertex = inV();
endVertex = createFilterForLabel(label.toLabels(), endVertex);
traversals.add(endVertex);
t = t.where(and(traversals.toArray(new Traversal<?, ?>[]{})));
} else if (label.hasFromLabels()) {
GraphTraversal<? extends Element, ?> startVertex = outV();
startVertex = createFilterForLabel(label.fromLabels(), startVertex);
t = t.where(startVertex);
} else if (label.hasToLabels()) {
GraphTraversal<? extends Element, ?> endVertex = inV();
endVertex = createFilterForLabel(label.toLabels(), endVertex);
t = t.where(endVertex);
}
}
return t;
}
@Override
public Collection<Label> getLabelsUsing(GraphClient<?> graphClient) {
return labels;
}
@Override
public String[] getPropertiesForLabels(GraphElementSchemas graphElementSchemas) {
Set<String> properties = new HashSet<>();
for (Label label : labels) {
LabelSchema labelSchema = graphElementSchemas.getSchemaFor(label);
for (PropertySchema propertySchema : labelSchema.propertySchemas()) {
properties.add(propertySchema.nameWithoutDataType());
}
}
return properties.toArray(new String[]{});
}
@Override
public Label getLabelFor(Map<String, Object> input) {
return labelStrategy.getLabelFor(input);
}
@Override
public Label getLabelFor(PGResult input) {
return labelStrategy.getLabelFor(input);
}
@Override
public String[] addAdditionalColumnNames(String... columns) {
return labelStrategy.additionalColumns(columns);
}
@Override
public <T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t) {
return labelStrategy.addAdditionalColumns(t);
}
@Override
public LabelsFilter filterFor(Label label) {
return new SpecifiedLabels(Collections.singletonList(label), labelStrategy);
}
@Override
public LabelsFilter intersection(Collection<Label> others) {
Collection<Label> results = new HashSet<>();
for (Label label : labels) {
for (Label other : others) {
if (label.isAssignableFrom(other)){
results.add(other);
}
}
}
return new SpecifiedLabels(results, labelStrategy);
}
@Override
public boolean isEmpty() {
return labels.isEmpty();
}
@Override
public String description(String element) {
if (isEmpty()){
return String.format("%s with zero labels", element);
}
String labelList = labels.stream().map(l -> String.format("'%s'", l.fullyQualifiedLabel())).collect(Collectors.joining(" or "));
return String.format("%s with label(s) %s", element, labelList);
}
@Override
public Collection<LabelsFilter> split() {
return labels.stream()
.map(l -> new SpecifiedLabels(Collections.singletonList(l), labelStrategy))
.collect(Collectors.toList());
}
}
| 990 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/Label.java
|
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.schema.DataType;
import com.amazonaws.services.neptune.util.SemicolonUtils;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.*;
import java.util.stream.Collectors;
public class Label {
public static List<String> fixLabelsIssue(List<String> list) {
if (list.size() == 1 && list.get(0).contains("::")) {
List<String> newResults = Arrays.asList(list.get(0).split("::"));
newResults.sort(String::compareTo);
return newResults;
}
return list;
}
public static Label fromJson(JsonNode jsonNode) {
if (jsonNode.isObject()) {
String label = jsonNode.path("~label").textValue();
Collection<String> fromLabels = new ArrayList<>();
Collection<String> toLabels = new ArrayList<>();
if (jsonNode.has("~fromLabels")) {
JsonNode fromLabelsNode = jsonNode.path("~fromLabels");
if (fromLabelsNode.isArray()) {
ArrayNode fromLabelsArrays = (ArrayNode) fromLabelsNode;
fromLabelsArrays.forEach(l -> fromLabels.add(l.textValue()));
} else {
fromLabels.addAll(SemicolonUtils.split(fromLabelsNode.textValue()));
}
}
if (jsonNode.has("~toLabels")) {
JsonNode toLabelsNode = jsonNode.path("~toLabels");
if (toLabelsNode.isArray()) {
ArrayNode toLabelsArray = (ArrayNode) toLabelsNode;
toLabelsArray.forEach(l -> toLabels.add(l.textValue()));
} else {
toLabels.addAll(SemicolonUtils.split(toLabelsNode.textValue()));
}
}
return new Label(Collections.singletonList(label), fromLabels, toLabels);
} else {
if (jsonNode.isArray()) {
ArrayNode labelsNode = (ArrayNode) jsonNode;
Collection<String> labels = new ArrayList<>();
labelsNode.forEach(l -> labels.add(l.textValue()));
return new Label(labels);
} else {
return new Label(jsonNode.textValue());
}
}
}
public static Collection<Label> forLabels(Collection<String> labels) {
Set<Label> results = new HashSet<>();
for (String label : labels) {
results.add(new Label(Collections.singletonList(label)));
}
return results;
}
private final List<String> labels;
private final List<String> fromLabels;
private final List<String> toLabels;
private final String fullyQualifiedLabel;
public Label(String label) {
this(SemicolonUtils.split(label));
}
public
Label(Collection<String> labels) {
this(labels, Collections.emptyList(), Collections.emptyList());
}
public Label(String label, String fromLabels, String toLabels) {
this(label, SemicolonUtils.split(fromLabels), SemicolonUtils.split(toLabels));
}
public Label(String label, Collection<String> fromLabels, Collection<String> toLabels) {
this(Collections.singletonList(label), fromLabels, toLabels);
}
private Label(Collection<String> labels, Collection<String> fromLabels, Collection<String> toLabels) {
this.labels = labelList(labels);
this.fromLabels = labelList(fromLabels);
this.toLabels = labelList(toLabels);
this.fullyQualifiedLabel = hasFromLabels() || hasToLabels() ?
format(fromLabelsAsString(), labelsAsString(), toLabelsAsString()) :
labelsAsString();
}
private String format(String fromLabels, String label, String toLabels) {
return String.format("(%s)-%s-(%s)", fromLabels, label, toLabels);
}
private List<String> escapeSemicolons(List<String> list) {
return list.stream().map(v -> DataType.escapeSeparators(v, ";")).collect(Collectors.toList());
}
private List<String> labelList(Collection<String> col) {
List<String> results = new ArrayList<>(col);
results = fixLabelsIssue(results);
results.sort(String::compareTo);
return results;
}
public boolean isAssignableFrom(Label l){
boolean allLabelsFound = l.labels.containsAll(labels);
boolean allFromLabelsFound = l.fromLabels.containsAll(fromLabels);
boolean allToLabelsFound = l.toLabels.containsAll(toLabels);
return allLabelsFound && allFromLabelsFound && allToLabelsFound;
}
public List<String> labels() {
return labels;
}
public Label fromLabels() {
return new Label(fromLabels);
}
public Label toLabels() {
return new Label(toLabels);
}
public String fromLabelsAsString() {
if (fromLabels.isEmpty()) {
return "_";
}
return String.join(";", escapeSemicolons(fromLabels));
}
public String toLabelsAsString() {
if (toLabels.isEmpty()) {
return "_";
}
return String.join(";", escapeSemicolons(toLabels));
}
public String labelsAsString() {
return String.join(";", escapeSemicolons(labels));
}
public String fullyQualifiedLabel() {
return fullyQualifiedLabel;
}
public String allLabelsAsArrayString(){
return hasFromLabels() || hasToLabels() ?
String.format("[%s, %s, %s]", fromLabelsAsString(), labelsAsString(), toLabelsAsString()):
labelsAsString();
}
public boolean hasFromAndToLabels() {
return !fromLabels.isEmpty() && !toLabels.isEmpty();
}
public boolean hasFromLabels() {
return !fromLabels.isEmpty();
}
public boolean hasToLabels() {
return !toLabels.isEmpty();
}
public Label createCopy() {
return Label.fromJson(toJson());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Label label = (Label) o;
return fullyQualifiedLabel.equals(label.fullyQualifiedLabel);
}
@Override
public int hashCode() {
return Objects.hash(fullyQualifiedLabel);
}
public JsonNode toJson() {
if (!hasFromAndToLabels()) {
if (labels.size() > 1) {
ArrayNode labelsArray = JsonNodeFactory.instance.arrayNode();
for (String label : labels) {
labelsArray.add(label);
}
return labelsArray;
} else {
return JsonNodeFactory.instance.textNode(labels.get(0));
}
}
ObjectNode labelNode = JsonNodeFactory.instance.objectNode();
ArrayNode fromLabelsArray = JsonNodeFactory.instance.arrayNode();
ArrayNode toLabelsArray = JsonNodeFactory.instance.arrayNode();
labelNode.put("~label", labels.get(0));
for (String fromLabel : fromLabels) {
fromLabelsArray.add(fromLabel);
}
labelNode.set("~fromLabels", fromLabelsArray);
for (String toLabel : toLabels) {
toLabelsArray.add(toLabel);
}
labelNode.set("~toLabels", toLabelsArray);
return labelNode;
}
}
| 991 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/GremlinQueryDebugger.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import org.apache.tinkerpop.gremlin.process.traversal.translator.GroovyTranslator;
public class GremlinQueryDebugger {
public static String queryAsString(Object o){
return new GroovyTranslator.DefaultTypeTranslator(false).apply("g", o).getScript();
}
}
| 992 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/LabelsFilter.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Element;
import java.util.Collection;
import java.util.Map;
public interface LabelsFilter {
GraphTraversal<? extends Element, ?> apply(GraphTraversal<? extends Element, ?> traversal, FeatureToggles featureToggles, GraphElementType graphElementType);
Collection<Label> getLabelsUsing(GraphClient<?> graphClient);
String[] getPropertiesForLabels(GraphElementSchemas graphElementSchemas);
Label getLabelFor(Map<String, Object> input);
Label getLabelFor(PGResult result);
String[] addAdditionalColumnNames(String... columns);
<T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t);
LabelsFilter filterFor(Label label);
LabelsFilter intersection(Collection<Label> labels);
boolean isEmpty();
String description(String element);
Collection<LabelsFilter> split();
}
| 993 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NamedQuery.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
public class NamedQuery {
private final String name;
private final String query;
public NamedQuery(String name, String query) {
if (query.contains(".addV(") || query.contains(".addE(") || query.contains(".drop(") || query.contains(".property(")){
throw new IllegalArgumentException("Query must not contain any Gremlin write steps");
}
this.name = name;
this.query = query;
}
public String name() {
return name;
}
public String query() {
return query;
}
}
| 994 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/GraphClient.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.io.GraphElementHandler;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import java.util.Collection;
import java.util.Map;
public interface GraphClient<T> {
String description();
void queryForSchema(GraphElementHandler<Map<?, Object>> handler, Range range, LabelsFilter labelsFilter, GremlinFilters gremlinFilters);
void queryForValues(GraphElementHandler<T> handler, Range range, LabelsFilter labelsFilter, GremlinFilters gremlinFilters, GraphElementSchemas graphElementSchemas);
long approxCount(LabelsFilter labelsFilter, RangeConfig rangeConfig, GremlinFilters gremlinFilters);
Collection<Label> labels(LabelStrategy labelStrategy);
Label getLabelFor(T input, LabelsFilter labelsFilter);
void updateStats(Label label);
}
| 995 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NamedQueriesCollection.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.io.Jsonizable;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.stream.Collectors;
public class NamedQueriesCollection implements Jsonizable<Object> {
public static NamedQueriesCollection fromJson(JsonNode json) {
List<NamedQueries> collection = new ArrayList<>();
for (JsonNode jsonNode : json) {
collection.add(NamedQueries.fromJson(jsonNode));
}
return new NamedQueriesCollection(collection);
}
private final Collection<NamedQueries> namedQueries;
public NamedQueriesCollection(Collection<NamedQueries> namedQueries) {
this.namedQueries = namedQueries;
}
public Collection<NamedQuery> flatten() {
List<NamedQuery> queries = new ArrayList<>();
namedQueries.forEach(q -> q.addTo(queries));
return queries;
}
public Collection<String> names(){
return namedQueries.stream().map(NamedQueries::name).collect(Collectors.toList());
}
@Override
public JsonNode toJson(Object o) {
ArrayNode json = JsonNodeFactory.instance.arrayNode();
for (NamedQueries queries : namedQueries) {
ObjectNode queriesNode = JsonNodeFactory.instance.objectNode();
ArrayNode arrayNode = queries.toJson();
queriesNode.put("name", queries.name());
queriesNode.set("queries", arrayNode);
json.add(queriesNode);
}
return json;
}
}
| 996 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/GremlinFilters.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import org.apache.commons.lang.StringUtils;
import org.apache.tinkerpop.gremlin.jsr223.CachedGremlinScriptEngineManager;
import org.apache.tinkerpop.gremlin.jsr223.GremlinScriptEngine;
import org.apache.tinkerpop.gremlin.process.traversal.Bytecode;
import org.apache.tinkerpop.gremlin.process.traversal.Traversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Element;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.ISODateTimeFormat;
import javax.script.Bindings;
import javax.script.ScriptException;
import java.util.Arrays;
import java.util.List;
public class GremlinFilters {
public static final GremlinFilters EMPTY = new GremlinFilters(null, null, null, false);
private final String gremlinFilter;
private final String gremlinNodeFilter;
private final String gremlinEdgeFilter;
private final boolean filterEdgesEarly;
private static final List<String> INVALID_OPERATORS = Arrays.asList("addV", "addE", "write", "drop", "sideEffect", "property");
public GremlinFilters(String gremlinFilter, String gremlinNodeFilter, String gremlinEdgeFilter, boolean filterEdgesEarly) {
this.gremlinFilter = gremlinFilter;
this.gremlinNodeFilter = gremlinNodeFilter;
this.gremlinEdgeFilter = gremlinEdgeFilter;
this.filterEdgesEarly = filterEdgesEarly;
}
public GraphTraversal<? extends Element, ?> applyToNodes(GraphTraversal<? extends Element, ?> t) {
if (StringUtils.isNotEmpty(gremlinNodeFilter)) {
return apply(t, gremlinNodeFilter);
} else if (StringUtils.isNotEmpty(gremlinFilter)) {
return apply(t, gremlinFilter);
} else {
return t;
}
}
public GraphTraversal<? extends Element, ?> applyToEdges(GraphTraversal<? extends Element, ?> t) {
if (StringUtils.isNotEmpty(gremlinEdgeFilter)) {
return apply(t, gremlinEdgeFilter);
} else if (StringUtils.isNotEmpty(gremlinFilter)) {
return apply(t, gremlinFilter);
} else {
return t;
}
}
public boolean filterEdgesEarly() {
return filterEdgesEarly;
}
private GraphTraversal<? extends Element, ?> apply(GraphTraversal<? extends Element, ?> t, String gremlin) {
CachedGremlinScriptEngineManager scriptEngineManager = new CachedGremlinScriptEngineManager();
GremlinScriptEngine engine = scriptEngineManager.getEngineByName("gremlin-groovy");
Bindings engineBindings = engine.createBindings();
engineBindings.put("datetime", new DatetimeConverter());
Traversal.Admin<?, ?> whereTraversal = null;
try {
whereTraversal = (Traversal.Admin) engine.eval(gremlin, engineBindings);
} catch (ScriptException e) {
throw new IllegalStateException(String.format("Invalid Gremlin filter: %s. %s", gremlin, e.getMessage()), e);
}
for (Bytecode.Instruction instruction : whereTraversal.getBytecode().getInstructions()) {
String operator = instruction.getOperator();
validateOperator(operator);
t.asAdmin().getBytecode().addStep(operator, instruction.getArguments());
}
return t;
}
private void validateOperator(String operator) {
if (INVALID_OPERATORS.contains(operator)) {
throw new IllegalArgumentException(String.format("Invalid operator: '%s'. Gremlin filter cannot contain side effect or mutating step.", operator));
}
}
private static class DatetimeConverter {
private static final DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTimeParser().withZoneUTC();
public Object call(String args) {
return dateTimeFormatter.parseDateTime(args).toDate();
}
}
}
| 997 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/EdgesClient.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.GraphElementHandler;
import com.amazonaws.services.neptune.propertygraph.io.result.PGEdgeResult;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.util.Activity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.Edge;
import org.apache.tinkerpop.gremlin.structure.Element;
import org.apache.tinkerpop.gremlin.structure.T;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*;
public class EdgesClient implements GraphClient<PGResult> {
private static final Logger logger = LoggerFactory.getLogger(EdgesClient.class);
private final GraphTraversalSource g;
private final boolean tokensOnly;
private final ExportStats stats;
private final FeatureToggles featureToggles;
public EdgesClient(GraphTraversalSource g,
boolean tokensOnly,
ExportStats stats,
FeatureToggles featureToggles) {
this.g = g;
this.tokensOnly = tokensOnly;
this.stats = stats;
this.featureToggles = featureToggles;
}
@Override
public String description() {
return "edge";
}
@Override
public void queryForSchema(GraphElementHandler<Map<?, Object>> handler, Range range, LabelsFilter labelsFilter, GremlinFilters gremlinFilters) {
GraphTraversal<? extends Element, Map<Object, Object>> t1 = tokensOnly ?
traversal(range, labelsFilter).valueMap(true, "~TOKENS-ONLY") :
traversal(range, labelsFilter).valueMap(true);
logger.info(GremlinQueryDebugger.queryAsString(t1));
t1.forEachRemaining(m -> {
try {
handler.handle(m, false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
@Override
public void queryForValues(GraphElementHandler<PGResult> handler,
Range range,
LabelsFilter labelsFilter,
GremlinFilters gremlinFilters,
GraphElementSchemas graphElementSchemas) {
GraphTraversal<Edge, Edge> t1 = tokensOnly ?
g.withSideEffect("x", new HashMap<String, Object>()).E() :
g.E();
GraphTraversal<? extends Element, ?> t2 = labelsFilter.apply(t1, featureToggles, GraphElementType.edges);
if(!gremlinFilters.filterEdgesEarly()) {
t2 = range.applyRange(t2);
}
GraphTraversal<? extends Element, ?> t3 = filterByPropertyKeys(t2, labelsFilter, graphElementSchemas);
GraphTraversal<? extends Element, ?> t4 = gremlinFilters.applyToEdges(t3);
if(gremlinFilters.filterEdgesEarly()) {
t4 = range.applyRange(t4);
}
GraphTraversal<? extends Element, Map<String, Object>> t5 = t4.
project("~id", labelsFilter.addAdditionalColumnNames("~label", "properties", "~from", "~to")).
by(T.id).
by(T.label).
by(tokensOnly ?
select("x") :
valueMap(labelsFilter.getPropertiesForLabels(graphElementSchemas))
).
by(outV().id()).
by(inV().id());
GraphTraversal<? extends Element, Map<String, Object>> traversal = labelsFilter.addAdditionalColumns(t5);
logger.info(GremlinQueryDebugger.queryAsString(traversal));
traversal.forEachRemaining(p -> {
try {
if (featureToggles.containsFeature(FeatureToggle.Inject_Fault)){
throw new IllegalStateException("Simulated fault in EdgesClient");
}
handler.handle(new PGEdgeResult(p), false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
private GraphTraversal<? extends Element, ?> filterByPropertyKeys(GraphTraversal<? extends Element, ?> traversal,
LabelsFilter labelsFilter,
GraphElementSchemas graphElementSchemas) {
if (!featureToggles.containsFeature(FeatureToggle.FilterByPropertyKeys)) {
return traversal;
}
return traversal.where(
properties().key().is(P.within(labelsFilter.getPropertiesForLabels(graphElementSchemas))));
}
@Override
public long approxCount(LabelsFilter labelsFilter, RangeConfig rangeConfig, GremlinFilters gremlinFilters) {
if (rangeConfig.approxEdgeCount() > 0) {
return rangeConfig.approxEdgeCount();
}
String description = labelsFilter.description("edges");
System.err.println(String.format("Counting %s...", description));
return Timer.timedActivity(String.format("counting %s", description), (Activity.Callable<Long>) () -> {
GraphTraversal<? extends Element, ?> traversal = traversal(Range.ALL, labelsFilter);
if(gremlinFilters.filterEdgesEarly()) {
traversal = gremlinFilters.applyToEdges(traversal);
}
GraphTraversal<? extends Element, Long> t = traversal.count();
logger.info(GremlinQueryDebugger.queryAsString(t));
Long count = t.next();
stats.setEdgeCount(count);
return count;
});
}
@Override
public Collection<Label> labels(LabelStrategy labelStrategy) {
return labelStrategy.getLabels(g);
}
@Override
public Label getLabelFor(PGResult input, LabelsFilter labelsFilter) {
return labelsFilter.getLabelFor(input);
}
@Override
public void updateStats(Label label) {
stats.incrementEdgeStats(label);
}
private GraphTraversal<? extends Element, ?> traversal(Range range, LabelsFilter labelsFilter) {
return range.applyRange(labelsFilter.apply(g.E(), featureToggles, GraphElementType.edges));
}
}
| 998 |
0 |
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune
|
Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NodesClient.java
|
/*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.GraphElementHandler;
import com.amazonaws.services.neptune.propertygraph.io.result.ExportPGNodeResult;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.util.Activity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.Element;
import org.apache.tinkerpop.gremlin.structure.T;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*;
public class NodesClient implements GraphClient<PGResult> {
private static final Logger logger = LoggerFactory.getLogger(NodesClient.class);
private final GraphTraversalSource g;
private final boolean tokensOnly;
private final ExportStats stats;
private final FeatureToggles featureToggles;
public NodesClient(GraphTraversalSource g,
boolean tokensOnly,
ExportStats stats,
FeatureToggles featureToggles) {
this.g = g;
this.tokensOnly = tokensOnly;
this.stats = stats;
this.featureToggles = featureToggles;
}
@Override
public String description() {
return "node";
}
@Override
public void queryForSchema(GraphElementHandler<Map<?, Object>> handler,
Range range,
LabelsFilter labelsFilter,
GremlinFilters gremlinFilters) {
GraphTraversal<? extends Element, Map<Object, Object>> t = tokensOnly ?
createTraversal(range, labelsFilter, gremlinFilters).valueMap(true, "~TOKENS-ONLY") :
createTraversal(range, labelsFilter, gremlinFilters).valueMap(true);
logger.info(GremlinQueryDebugger.queryAsString(t));
t.forEachRemaining(m -> {
try {
handler.handle(m, false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
@Override
public void queryForValues(GraphElementHandler<PGResult> handler,
Range range,
LabelsFilter labelsFilter,
GremlinFilters gremlinFilters,
GraphElementSchemas graphElementSchemas) {
GraphTraversal<? extends Element, ?> t1 = createTraversal(range, labelsFilter, gremlinFilters);
GraphTraversal<? extends Element, ?> t2 = filterByPropertyKeys(t1, labelsFilter, graphElementSchemas);
GraphTraversal<? extends Element, Map<String, Object>> t3 = t2.
project("~id", labelsFilter.addAdditionalColumnNames("~label", "properties")).
by(T.id).
by(label().fold()).
by(tokensOnly ?
select("x") :
valueMap(labelsFilter.getPropertiesForLabels(graphElementSchemas))
);
GraphTraversal<? extends Element, Map<String, Object>> traversal = labelsFilter.addAdditionalColumns(t3);
logger.info(GremlinQueryDebugger.queryAsString(traversal));
traversal.forEachRemaining(m -> {
try {
if (featureToggles.containsFeature(FeatureToggle.Inject_Fault)){
throw new IllegalStateException("Simulated fault in NodesClient");
}
handler.handle(new ExportPGNodeResult(m), false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
private GraphTraversal<? extends Element, ?> filterByPropertyKeys(GraphTraversal<? extends Element, ?> traversal,
LabelsFilter labelsFilter,
GraphElementSchemas graphElementSchemas) {
if (!featureToggles.containsFeature(FeatureToggle.FilterByPropertyKeys)) {
return traversal;
}
return traversal.where(
properties().key().is(P.within(labelsFilter.getPropertiesForLabels(graphElementSchemas))));
}
@Override
public long approxCount(LabelsFilter labelsFilter, RangeConfig rangeConfig, GremlinFilters gremlinFilters) {
if (rangeConfig.approxNodeCount() > 0) {
return rangeConfig.approxNodeCount();
}
String description = labelsFilter.description("nodes");
System.err.println(String.format("Counting %s...", description));
return Timer.timedActivity(String.format("counting %s", description), (Activity.Callable<Long>) () ->
{
GraphTraversal<? extends Element, Long> t = createTraversal(Range.ALL, labelsFilter, gremlinFilters).count();
logger.info(GremlinQueryDebugger.queryAsString(t));
Long count = t.next();
stats.setNodeCount(count);
return count;
});
}
@Override
public Collection<Label> labels(LabelStrategy labelStrategy) {
return labelStrategy.getLabels(g);
}
@Override
public Label getLabelFor(PGResult input, LabelsFilter labelsFilter) {
return labelsFilter.getLabelFor(input);
}
@Override
public void updateStats(Label label) {
stats.incrementNodeStats(label);
}
private GraphTraversal<? extends Element, ?> createTraversal(Range range, LabelsFilter labelsFilter, GremlinFilters gremlinFilters) {
GraphTraversal<Vertex, Vertex> t = tokensOnly ?
g.withSideEffect("x", new HashMap<String, Object>()).V() :
g.V();
return range.applyRange(gremlinFilters.applyToNodes( labelsFilter.apply(t, featureToggles, GraphElementType.nodes)));
}
}
| 999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.