patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -16,8 +16,12 @@
package io.servicecomb.provider.springmvc.reference;
+import java.net.URI;
+
import org.junit.Assert;
import org.junit.Test;
+import org.springframework.web.client.RestClientException;
+import org.springframework.web.client.RestTemplate;
public class TestRestTemplateBuilder {
| 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.provider.springmvc.reference;
import org.junit.Assert;
import org.junit.Test;
public class TestRestTemplateBuilder {
@Test
public void testRestTemplateBuilder() {
Assert.assertEquals(RestTemplateWrapper.class, RestTemplateBuilder.create().getClass());
}
}
| 1 | 6,527 | this test only covers the happy path. what about no acceptable rest template found? | apache-servicecomb-java-chassis | java |
@@ -582,7 +582,7 @@ public class DistributorTest {
);
Session firefoxSession = distributor.newSession(createRequest(firefoxPayload)).getSession();
- LOG.info(String.format("Firefox Session %d assigned to %s", i, chromeSession.getUri()));
+ LOG.finer(String.format("Firefox Session %d assigned to %s", i, chromeSession.getUri()));
boolean inFirefoxNodes = firefoxNodes.stream().anyMatch(node -> node.getUri().equals(firefoxSession.getUri()));
boolean inChromeNodes = chromeNodes.stream().anyMatch(node -> node.getUri().equals(chromeSession.getUri())); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.distributor;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.opentracing.Tracer;
import io.opentracing.noop.NoopTracerFactory;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.ImmutableCapabilities;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.NoSuchSessionException;
import org.openqa.selenium.SessionNotCreatedException;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.events.local.GuavaEventBus;
import org.openqa.selenium.grid.component.HealthCheck;
import org.openqa.selenium.grid.data.CreateSessionRequest;
import org.openqa.selenium.grid.data.DistributorStatus;
import org.openqa.selenium.grid.data.NodeStatus;
import org.openqa.selenium.grid.data.Session;
import org.openqa.selenium.grid.distributor.local.LocalDistributor;
import org.openqa.selenium.grid.distributor.remote.RemoteDistributor;
import org.openqa.selenium.grid.node.Node;
import org.openqa.selenium.grid.node.local.LocalNode;
import org.openqa.selenium.grid.sessionmap.SessionMap;
import org.openqa.selenium.grid.sessionmap.local.LocalSessionMap;
import org.openqa.selenium.grid.testing.PassthroughHttpClient;
import org.openqa.selenium.grid.testing.TestSessionFactory;
import org.openqa.selenium.grid.web.CombinedHandler;
import org.openqa.selenium.net.PortProber;
import org.openqa.selenium.remote.Dialect;
import org.openqa.selenium.remote.NewSessionPayload;
import org.openqa.selenium.remote.SessionId;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.support.ui.FluentWait;
import org.openqa.selenium.support.ui.Wait;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.time.Duration;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
import static org.assertj.core.api.Assertions.fail;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.openqa.selenium.remote.http.Contents.utf8String;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
public class DistributorTest {
private Tracer tracer;
private EventBus bus;
private HttpClient.Factory clientFactory;
private Distributor local;
private ImmutableCapabilities caps;
private static final Logger LOG = Logger.getLogger("Distributor Test");
@Before
public void setUp() {
tracer = NoopTracerFactory.create();
bus = new GuavaEventBus();
clientFactory = HttpClient.Factory.createDefault();
LocalSessionMap sessions = new LocalSessionMap(bus);
local = new LocalDistributor(tracer, bus, HttpClient.Factory.createDefault(), sessions);
caps = new ImmutableCapabilities("browserName", "cheese");
}
@Test
public void creatingANewSessionWithoutANodeEndsInFailure() throws MalformedURLException {
Distributor distributor = new RemoteDistributor(
tracer,
new PassthroughHttpClient.Factory(local),
new URL("http://does.not.exist/"));
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
assertThatExceptionOfType(SessionNotCreatedException.class)
.isThrownBy(() -> distributor.newSession(createRequest(payload)));
}
}
@Test
public void shouldBeAbleToAddANodeAndCreateASession() throws URISyntaxException {
URI nodeUri = new URI("http://example:5678");
URI routableUri = new URI("http://localhost:1234");
LocalSessionMap sessions = new LocalSessionMap(bus);
LocalNode node = LocalNode.builder(tracer, bus, clientFactory, routableUri)
.add(caps, new TestSessionFactory((id, c) -> new Session(id, nodeUri, c)))
.build();
Distributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(node),
sessions);
distributor.add(node);
MutableCapabilities sessionCaps = new MutableCapabilities(caps);
sessionCaps.setCapability("sausages", "gravy");
try (NewSessionPayload payload = NewSessionPayload.create(sessionCaps)) {
Session session = distributor.newSession(createRequest(payload)).getSession();
assertThat(session.getCapabilities()).isEqualTo(sessionCaps);
assertThat(session.getUri()).isEqualTo(routableUri);
}
}
@Test
public void creatingASessionAddsItToTheSessionMap() throws URISyntaxException {
URI nodeUri = new URI("http://example:5678");
URI routableUri = new URI("http://localhost:1234");
LocalSessionMap sessions = new LocalSessionMap(bus);
LocalNode node = LocalNode.builder(tracer, bus, clientFactory, routableUri)
.add(caps, new TestSessionFactory((id, c) -> new Session(id, nodeUri, c)))
.build();
Distributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(node),
sessions);
distributor.add(node);
MutableCapabilities sessionCaps = new MutableCapabilities(caps);
sessionCaps.setCapability("sausages", "gravy");
try (NewSessionPayload payload = NewSessionPayload.create(sessionCaps)) {
Session returned = distributor.newSession(createRequest(payload)).getSession();
Session session = sessions.get(returned.getId());
assertThat(session.getCapabilities()).isEqualTo(sessionCaps);
assertThat(session.getUri()).isEqualTo(routableUri);
}
}
@Test
public void shouldBeAbleToRemoveANode() throws URISyntaxException, MalformedURLException {
URI nodeUri = new URI("http://example:5678");
URI routableUri = new URI("http://localhost:1234");
LocalSessionMap sessions = new LocalSessionMap(bus);
LocalNode node = LocalNode.builder(tracer, bus, clientFactory, routableUri)
.add(caps, new TestSessionFactory((id, c) -> new Session(id, nodeUri, c)))
.build();
Distributor local = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(node),
sessions);
Distributor distributor = new RemoteDistributor(
tracer,
new PassthroughHttpClient.Factory(local),
new URL("http://does.not.exist"));
distributor.add(node);
distributor.remove(node.getId());
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
assertThatExceptionOfType(SessionNotCreatedException.class)
.isThrownBy(() -> distributor.newSession(createRequest(payload)));
}
}
@Test
public void registeringTheSameNodeMultipleTimesOnlyCountsTheFirstTime()
throws URISyntaxException {
URI nodeUri = new URI("http://example:5678");
URI routableUri = new URI("http://localhost:1234");
LocalNode node = LocalNode.builder(tracer, bus, clientFactory, routableUri)
.add(caps, new TestSessionFactory((id, c) -> new Session(id, nodeUri, c)))
.build();
local.add(node);
local.add(node);
DistributorStatus status = local.getStatus();
assertThat(status.getNodes().size()).isEqualTo(1);
}
@Test
public void theMostLightlyLoadedNodeIsSelectedFirst() {
// Create enough hosts so that we avoid the scheduler returning hosts in:
// * insertion order
// * reverse insertion order
// * sorted with most heavily used first
SessionMap sessions = new LocalSessionMap(bus);
Node lightest = createNode(caps, 10, 0);
Node medium = createNode(caps, 10, 4);
Node heavy = createNode(caps, 10, 6);
Node massive = createNode(caps, 10, 8);
CombinedHandler handler = new CombinedHandler();
handler.addHandler(lightest);
handler.addHandler(medium);
handler.addHandler(heavy);
handler.addHandler(massive);
Distributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions)
.add(heavy)
.add(medium)
.add(lightest)
.add(massive);
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
Session session = distributor.newSession(createRequest(payload)).getSession();
assertThat(session.getUri()).isEqualTo(lightest.getStatus().getUri());
}
}
@Test
public void shouldUseLastSessionCreatedTimeAsTieBreaker() {
SessionMap sessions = new LocalSessionMap(bus);
Node leastRecent = createNode(caps, 5, 0);
CombinedHandler handler = new CombinedHandler();
handler.addHandler(sessions);
handler.addHandler(leastRecent);
Distributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions)
.add(leastRecent);
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
distributor.newSession(createRequest(payload));
// Will be "leastRecent" by default
}
Node middle = createNode(caps, 5, 0);
handler.addHandler(middle);
distributor.add(middle);
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
Session session = distributor.newSession(createRequest(payload)).getSession();
// Least lightly loaded is middle
assertThat(session.getUri()).isEqualTo(middle.getStatus().getUri());
}
Node mostRecent = createNode(caps, 5, 0);
handler.addHandler(mostRecent);
distributor.add(mostRecent);
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
Session session = distributor.newSession(createRequest(payload)).getSession();
// Least lightly loaded is most recent
assertThat(session.getUri()).isEqualTo(mostRecent.getStatus().getUri());
}
// All the nodes should be equally loaded.
Map<Capabilities, Integer> expected = mostRecent.getStatus().getStereotypes();
assertThat(leastRecent.getStatus().getStereotypes()).isEqualTo(expected);
assertThat(middle.getStatus().getStereotypes()).isEqualTo(expected);
// All nodes are now equally loaded. We should be going in time order now
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
Session session = distributor.newSession(createRequest(payload)).getSession();
assertThat(session.getUri()).isEqualTo(leastRecent.getStatus().getUri());
}
}
@Test
public void shouldIncludeHostsThatAreUpInHostList() {
CombinedHandler handler = new CombinedHandler();
SessionMap sessions = new LocalSessionMap(bus);
handler.addHandler(sessions);
URI uri = createUri();
Node alwaysDown = LocalNode.builder(tracer, bus, clientFactory, uri)
.add(caps, new TestSessionFactory((id, c) -> new Session(id, uri, c)))
.advanced()
.healthCheck(() -> new HealthCheck.Result(false, "Boo!"))
.build();
handler.addHandler(alwaysDown);
Node alwaysUp = LocalNode.builder(tracer, bus, clientFactory, uri)
.add(caps, new TestSessionFactory((id, c) -> new Session(id, uri, c)))
.advanced()
.healthCheck(() -> new HealthCheck.Result(true, "Yay!"))
.build();
handler.addHandler(alwaysUp);
LocalDistributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions);
handler.addHandler(distributor);
distributor.add(alwaysDown);
// Should be unable to create a session because the node is down.
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
assertThatExceptionOfType(SessionNotCreatedException.class)
.isThrownBy(() -> distributor.newSession(createRequest(payload)));
}
distributor.add(alwaysUp);
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
distributor.newSession(createRequest(payload));
}
}
@Test
public void shouldNotScheduleAJobIfAllSlotsAreBeingUsed() {
SessionMap sessions = new LocalSessionMap(bus);
CombinedHandler handler = new CombinedHandler();
Distributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions);
handler.addHandler(distributor);
Node node = createNode(caps, 1, 0);
handler.addHandler(node);
distributor.add(node);
// Use up the one slot available
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
distributor.newSession(createRequest(payload));
}
// Now try and create a session.
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
assertThatExceptionOfType(SessionNotCreatedException.class)
.isThrownBy(() -> distributor.newSession(createRequest(payload)));
}
}
@Test
public void shouldReleaseSlotOnceSessionEnds() {
SessionMap sessions = new LocalSessionMap(bus);
CombinedHandler handler = new CombinedHandler();
Distributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions);
handler.addHandler(distributor);
Node node = createNode(caps, 1, 0);
handler.addHandler(node);
distributor.add(node);
// Use up the one slot available
Session session;
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
session = distributor.newSession(createRequest(payload)).getSession();
}
// Make sure the session map has the session
sessions.get(session.getId());
node.stop(session.getId());
// Now wait for the session map to say the session is gone.
Wait<Object> wait = new FluentWait<>(new Object()).withTimeout(Duration.ofSeconds(2));
wait.until(obj -> {
try {
sessions.get(session.getId());
return false;
} catch (NoSuchSessionException e) {
return true;
}
});
wait.until(obj -> distributor.getStatus().hasCapacity());
// And we should now be able to create another session.
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
distributor.newSession(createRequest(payload));
}
}
@Test
public void shouldNotStartASessionIfTheCapabilitiesAreNotSupported() {
CombinedHandler handler = new CombinedHandler();
LocalSessionMap sessions = new LocalSessionMap(bus);
handler.addHandler(handler);
Distributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions);
handler.addHandler(distributor);
Node node = createNode(caps, 1, 0);
handler.addHandler(node);
distributor.add(node);
ImmutableCapabilities unmatched = new ImmutableCapabilities("browserName", "transit of venus");
try (NewSessionPayload payload = NewSessionPayload.create(unmatched)) {
assertThatExceptionOfType(SessionNotCreatedException.class)
.isThrownBy(() -> distributor.newSession(createRequest(payload)));
}
}
@Test
public void attemptingToStartASessionWhichFailsMarksAsTheSlotAsAvailable() {
CombinedHandler handler = new CombinedHandler();
SessionMap sessions = new LocalSessionMap(bus);
handler.addHandler(sessions);
URI uri = createUri();
Node node = LocalNode.builder(tracer, bus, clientFactory, uri)
.add(caps, new TestSessionFactory((id, caps) -> {
throw new SessionNotCreatedException("OMG");
}))
.build();
handler.addHandler(node);
Distributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions);
handler.addHandler(distributor);
distributor.add(node);
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
assertThatExceptionOfType(SessionNotCreatedException.class)
.isThrownBy(() -> distributor.newSession(createRequest(payload)));
}
assertThat(distributor.getStatus().hasCapacity()).isTrue();
}
@Test
public void shouldReturnNodesThatWereDownToPoolOfNodesOnceTheyMarkTheirHealthCheckPasses() {
CombinedHandler handler = new CombinedHandler();
SessionMap sessions = new LocalSessionMap(bus);
handler.addHandler(sessions);
AtomicBoolean isUp = new AtomicBoolean(false);
URI uri = createUri();
Node node = LocalNode.builder(tracer, bus, clientFactory, uri)
.add(caps, new TestSessionFactory((id, caps) -> new Session(id, uri, caps)))
.advanced()
.healthCheck(() -> new HealthCheck.Result(isUp.get(), "TL;DR"))
.build();
handler.addHandler(node);
LocalDistributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions);
handler.addHandler(distributor);
distributor.add(node);
// Should be unable to create a session because the node is down.
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
assertThatExceptionOfType(SessionNotCreatedException.class)
.isThrownBy(() -> distributor.newSession(createRequest(payload)));
}
// Mark the node as being up
isUp.set(true);
// Kick the machinery to ensure that everything is fine.
distributor.refresh();
// Because the node is now up and running, we should now be able to create a session
try (NewSessionPayload payload = NewSessionPayload.create(caps)) {
distributor.newSession(createRequest(payload));
}
}
private Set<Node> createNodeSet(Distributor distributor, int count, Capabilities...capabilities) {
Set<Node> nodeSet = new HashSet<>();
for (int i=0; i<count; i++) {
URI uri = createUri();
LocalNode.Builder builder = LocalNode.builder(tracer, bus, clientFactory, uri);
for (Capabilities caps: capabilities) {
builder.add(caps, new TestSessionFactory((id, hostCaps) -> new HandledSession(uri, hostCaps)));
}
Node node = builder.build();
distributor.add(node);
nodeSet.add(node);
}
return nodeSet;
}
@Test
public void shouldPrioritizeHostsWithTheMostSlotsAvailableForASessionType() {
//SS: Consider the case where you have 1 Windows machine and 5 linux machines. All of these hosts
// can run Chrome and Firefox sessions, but only one can run Edge sessions. Ideally, the machine
// able to run Edge would be sorted last.
//Create the Distributor
CombinedHandler handler = new CombinedHandler();
SessionMap sessions = new LocalSessionMap(bus);
handler.addHandler(sessions);
LocalDistributor distributor = new LocalDistributor(
tracer,
bus,
new PassthroughHttpClient.Factory(handler),
sessions);
handler.addHandler(distributor);
//Create all three Capability types
Capabilities edgeCapabilities = new ImmutableCapabilities("browserName", "edge");
Capabilities firefoxCapabilities = new ImmutableCapabilities("browserName", "firefox");
Capabilities chromeCapabilities = new ImmutableCapabilities("browserName", "chrome");
//TODO This should probably be a map of browser -> all nodes that support <browser>
//Store our "expected results" sets for the various browser-specific nodes
Set<Node> edgeNodes = createNodeSet(distributor, 3, edgeCapabilities, chromeCapabilities, firefoxCapabilities);
//chromeNodes is all these new nodes PLUS all the Edge nodes from before
Set<Node> chromeNodes = createNodeSet(distributor,5, chromeCapabilities, firefoxCapabilities);
chromeNodes.addAll(edgeNodes);
//all nodes support firefox, so add them to the firefoxNodes set
Set<Node> firefoxNodes = createNodeSet(distributor,3, firefoxCapabilities);
firefoxNodes.addAll(edgeNodes);
firefoxNodes.addAll(chromeNodes);
//Assign 5 Chrome and 5 Firefox sessions to the distributor, make sure they don't go to the Edge node
for (int i=0; i<5; i++) {
try (NewSessionPayload chromePayload = NewSessionPayload.create(chromeCapabilities);
NewSessionPayload firefoxPayload = NewSessionPayload.create(firefoxCapabilities)) {
Session chromeSession = distributor.newSession(createRequest(chromePayload)).getSession();
assertThat( //Ensure the Uri of the Session matches one of the Chrome Nodes, not the Edge Node
chromeSession.getUri()).isIn(
chromeNodes
.stream().map(Node::getStatus).collect(Collectors.toList()) //List of getStatus() from the Set
.stream().map(NodeStatus::getUri).collect(Collectors.toList()) //List of getUri() from the Set
);
Session firefoxSession = distributor.newSession(createRequest(firefoxPayload)).getSession();
LOG.info(String.format("Firefox Session %d assigned to %s", i, chromeSession.getUri()));
boolean inFirefoxNodes = firefoxNodes.stream().anyMatch(node -> node.getUri().equals(firefoxSession.getUri()));
boolean inChromeNodes = chromeNodes.stream().anyMatch(node -> node.getUri().equals(chromeSession.getUri()));
//This could be either, or, or both
assertTrue(inFirefoxNodes || inChromeNodes);
}
}
//The Chrome Nodes should be full at this point, but Firefox isn't... so send an Edge session and make sure it routes to an Edge node
try (NewSessionPayload edgePayload = NewSessionPayload.create(edgeCapabilities)) {
Session edgeSession = distributor.newSession(createRequest(edgePayload)).getSession();
assertTrue(edgeNodes.stream().anyMatch(node -> node.getUri().equals(edgeSession.getUri())));
}
}
private Node createNode(Capabilities stereotype, int count, int currentLoad) {
URI uri = createUri();
LocalNode.Builder builder = LocalNode.builder(tracer, bus, clientFactory, uri);
for (int i = 0; i < count; i++) {
builder.add(stereotype, new TestSessionFactory((id, caps) -> new HandledSession(uri, caps)));
}
LocalNode node = builder.build();
for (int i = 0; i < currentLoad; i++) {
// Ignore the session. We're just creating load.
node.newSession(new CreateSessionRequest(
ImmutableSet.copyOf(Dialect.values()),
stereotype,
ImmutableMap.of()));
}
return node;
}
@Test
@Ignore
public void shouldCorrectlySetSessionCountsWhenStartedAfterNodeWithSession() {
fail("write me!");
}
@Test
public void statusShouldIndicateThatDistributorIsNotAvailableIfNodesAreDown()
throws URISyntaxException {
Capabilities capabilities = new ImmutableCapabilities("cheese", "peas");
URI uri = new URI("http://example.com");
Node node = LocalNode.builder(tracer, bus, clientFactory, uri)
.add(capabilities, new TestSessionFactory((id, caps) -> new Session(id, uri, caps)))
.advanced()
.healthCheck(() -> new HealthCheck.Result(false, "TL;DR"))
.build();
local.add(node);
DistributorStatus status = local.getStatus();
assertFalse(status.hasCapacity());
}
private HttpRequest createRequest(NewSessionPayload payload) {
StringBuilder builder = new StringBuilder();
try {
payload.writeTo(builder);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
HttpRequest request = new HttpRequest(POST, "/se/grid/distributor/session");
request.setContent(utf8String(builder.toString()));
return request;
}
private URI createUri() {
try {
return new URI("http://localhost:" + PortProber.findFreePort());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
class HandledSession extends Session implements HttpHandler {
HandledSession(URI uri, Capabilities caps) {
super(new SessionId(UUID.randomUUID()), uri, caps);
}
@Override
public HttpResponse execute(HttpRequest req) throws UncheckedIOException {
// no-op
return new HttpResponse();
}
}
}
| 1 | 17,126 | Since this is in a test, I imagine that the choice of `info` level was deliberate. | SeleniumHQ-selenium | java |
@@ -3,9 +3,10 @@ package langserver
import (
"context"
"github.com/stretchr/testify/assert"
- "strings"
"testing"
+ "core"
+ "strings"
"tools/build_langserver/lsp"
)
| 1 | package langserver
import (
"context"
"github.com/stretchr/testify/assert"
"strings"
"testing"
"tools/build_langserver/lsp"
)
var completionURI = lsp.DocumentURI("file://tools/build_langserver/langserver/test_data/completion.build")
var completionPropURI = lsp.DocumentURI("file://tools/build_langserver/langserver/test_data/completion_props.build")
var completionLabelURI = lsp.DocumentURI("file://tools/build_langserver/langserver/test_data/completion_buildlabels.build")
func TestCompletionWithCONFIG(t *testing.T) {
ctx := context.Background()
// Test completion on CONFIG with no starting character
items, err := getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 0, Character: 6})
assert.Equal(t, nil, err)
assert.Equal(t, len(analyzer.State.Config.TagsToFields()), len(items))
for _, i := range items {
assert.Equal(t, i.Kind, lsp.Property)
}
// Test completion on CONFIG with 1 starting character
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 1, Character: 7})
assert.Equal(t, nil, err)
assert.True(t, len(analyzer.State.Config.TagsToFields()) > len(items))
assert.True(t, itemInList(items, "JARCAT_TOOL"))
assert.False(t, itemInList(items, "PLZ_VERSION"))
// Test completion on CONFIG with a word
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 2, Character: 10})
assert.Equal(t, nil, err)
assert.True(t, len(analyzer.State.Config.TagsToFields()) > len(items))
assert.True(t, itemInList(items, "JAVAC_TOOL"))
for _, i := range items {
assert.True(t, strings.Contains(i.Label, "JAVA"))
}
// Test completion with assignment
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 3, Character: 17})
assert.Equal(t, nil, err)
assert.True(t, len(analyzer.State.Config.TagsToFields()) > len(items))
assert.True(t, itemInList(items, "JAVAC_TOOL"))
for _, i := range items {
assert.True(t, strings.Contains(i.Label, "JAVA"))
}
// Test completion on empty line
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 5, Character: 13})
assert.Equal(t, nil, err)
assert.Equal(t, 0, len(items))
// Test config should be empty
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 4, Character: 13})
assert.Equal(t, nil, err)
assert.Equal(t, 0, len(items))
}
func TestCompletionWithStringMethods(t *testing.T) {
ctx := context.Background()
// Tests completion on no letters follows after dot(.)
items, err := getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 6, Character: 18})
assert.Equal(t, nil, err)
assert.Equal(t, len(analyzer.Attributes["str"]), len(items))
assert.True(t, itemInList(items, "replace"))
assert.True(t, itemInList(items, "format"))
for _, i := range items {
assert.Equal(t, i.Kind, lsp.Function)
}
// Test completion with 1 starting character: f
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 7, Character: 19})
assert.Equal(t, nil, err)
assert.True(t, itemInList(items, "format"))
assert.True(t, itemInList(items, "find"))
assert.True(t, itemInList(items, "rfind"))
// Test completion with a three letters: for
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 8, Character: 21})
assert.Equal(t, nil, err)
assert.Equal(t, 1, len(items))
assert.Equal(t, "format", items[0].Label)
// Test completion with assignment
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 9, Character: 18})
assert.Equal(t, nil, err)
assert.Equal(t, 1, len(items))
assert.Equal(t, "format", items[0].Label)
}
func TestCompletionWithDictMethods(t *testing.T) {
ctx := context.Background()
// Tests completion on no letters follows after dot(.)
items, err := getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 11, Character: 24})
assert.Equal(t, nil, err)
assert.Equal(t, len(analyzer.Attributes["dict"]), len(items))
assert.True(t, itemInList(items, "get"))
assert.True(t, itemInList(items, "keys"))
assert.True(t, itemInList(items, "items"))
for _, i := range items {
assert.Equal(t, i.Kind, lsp.Function)
}
// Test completion with 1 starting character: k
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 12, Character: 15})
assert.Equal(t, nil, err)
assert.Equal(t, 1, len(items))
assert.Equal(t, "keys", items[0].Label)
assert.Equal(t, "keys()", items[0].InsertText)
// Test completion with a three letters: get
items, err = getCompletionItemsList(ctx, analyzer, true,
completionPropURI, lsp.Position{Line: 13, Character: 17})
assert.Equal(t, nil, err)
assert.Equal(t, 1, len(items))
assert.Equal(t, "get", items[0].Label)
assert.Equal(t, "get(key)", items[0].InsertText)
}
func TestCompletionWithBuildLabels(t *testing.T) {
ctx := context.Background()
items, err := getCompletionItemsList(ctx, analyzer, true,
completionLabelURI, lsp.Position{Line: 0, Character: 4})
assert.Equal(t, nil, err)
assert.True(t, itemInList(items, "//src/cache"))
for _, i := range items {
assert.True(t, strings.HasPrefix(i.Label, "//src"))
}
items, err = getCompletionItemsList(ctx, analyzer, true,
completionLabelURI, lsp.Position{Line: 1, Character: 12})
assert.Equal(t, nil, err)
assert.Equal(t, 1, len(items))
assert.Equal(t, "//src/query:query", items[0].Label)
}
func TestCompletionIncompleteFile(t *testing.T) {
//TODO(BNM)
stmt, err := analyzer.AspStatementFromFile(completionURI)
t.Log(stmt)
t.Log(err)
}
/***************************************
* Helpers
***************************************/
func itemInList(itemList []*lsp.CompletionItem, targetLabel string) bool {
for _, item := range itemList {
if item.Label == targetLabel {
return true
}
}
return false
}
| 1 | 8,545 | this should probably be named somewhere if you want to reuse it. | thought-machine-please | go |
@@ -191,6 +191,14 @@ const (
// path where we mount in the SSH key for connecting to the bare metal libvirt provisioning host.
LibvirtSSHPrivKeyPathEnvVar = "LIBVIRT_SSH_PRIV_KEY_PATH"
+ // BoundServiceAccountSigningKeyEnvVar contains the path to the bound service account signing key and
+ // is set in the install pod for AWS STS clusters.
+ BoundServiceAccountSigningKeyEnvVar = "AWS_BOUND_SA_SIGNING_KEY"
+
+ // BoundServiceAccountSigningKeyFile is the Secret key and projected filename where an AWS STS
+ // SA signing key will be projected into the install pod.
+ BoundServiceAccountSigningKeyFile = "bound-service-account-signing-key.key"
+
// FakeClusterInstallEnvVar is the environment variable Hive will set for the installmanager pod to request
// a fake install.
FakeClusterInstallEnvVar = "FAKE_INSTALL" | 1 | package constants
import (
apihelpers "github.com/openshift/hive/apis/helpers"
hivev1 "github.com/openshift/hive/apis/hive/v1"
)
const (
PlatformAWS = "aws"
PlatformAzure = "azure"
PlatformBaremetal = "baremetal"
PlatformAgentBaremetal = "agent-baremetal"
PlatformGCP = "gcp"
PlatformOpenStack = "openstack"
PlatformUnknown = "unknown"
PlatformVSphere = "vsphere"
mergedPullSecretSuffix = "merged-pull-secret"
// VeleroBackupEnvVar is the name of the environment variable used to tell the controller manager to enable velero backup integration.
VeleroBackupEnvVar = "HIVE_VELERO_BACKUP"
// VeleroNamespaceEnvVar is the name of the environment variable used to tell the controller manager which namespace velero backup objects should be created in.
VeleroNamespaceEnvVar = "HIVE_VELERO_NAMESPACE"
// DeprovisionsDisabledEnvVar is the name of the environment variable used to tell the controller manager to skip
// processing of any ClusterDeprovisions.
DeprovisionsDisabledEnvVar = "DEPROVISIONS_DISABLED"
// MinBackupPeriodSecondsEnvVar is the name of the environment variable used to tell the controller manager the minimum period of time between backups.
MinBackupPeriodSecondsEnvVar = "HIVE_MIN_BACKUP_PERIOD_SECONDS"
// InstallJobLabel is the label used for artifacts specific to Hive cluster installations.
InstallJobLabel = "hive.openshift.io/install"
// UninstallJobLabel is the label used for artifacts specific to Hive cluster deprovision.
UninstallJobLabel = "hive.openshift.io/uninstall"
// MachinePoolNameLabel is the label that is used to identify the MachinePool which owns a particular resource.
MachinePoolNameLabel = "hive.openshift.io/machine-pool-name"
// ClusterDeploymentNameLabel is the label that is used to identify a relationship to a given cluster deployment object.
ClusterDeploymentNameLabel = "hive.openshift.io/cluster-deployment-name"
// ClusterDeprovisionNameLabel is the label that is used to identify a relationship to a given cluster deprovision object.
ClusterDeprovisionNameLabel = "hive.openshift.io/cluster-deprovision-name"
// ClusterProvisionNameLabel is the label that is used to identify a relationship to a given cluster provision object.
ClusterProvisionNameLabel = "hive.openshift.io/cluster-provision-name"
// ClusterPoolNameLabel is the label that is used to signal that a namespace was created to house a
// ClusterDeployment created for a ClusterPool. The label is used to reap namespaces after the ClusterDeployment
// has been deleted.
ClusterPoolNameLabel = "hive.openshift.io/cluster-pool-name"
// SyncSetNameLabel is the label that is used to identify a relationship to a given syncset object.
SyncSetNameLabel = "hive.openshift.io/syncset-name"
// SelectorSyncSetNameLabel is the label that is used to identify a relationship to a given selector syncset object.
SelectorSyncSetNameLabel = "hive.openshift.io/selector-syncset-name"
// PVCTypeLabel is the label that is used to identify what a PVC is being used for.
PVCTypeLabel = "hive.openshift.io/pvc-type"
// PVCTypeInstallLogs is used as a value of PVCTypeLabel that says the PVC specifically stores installer logs.
PVCTypeInstallLogs = "installlogs"
// JobTypeLabel is the label that is used to identify what a Job is being used for.
JobTypeLabel = "hive.openshift.io/job-type"
// JobTypeImageSet is used as a value of JobTypeLabel that says the Job is specifically running to determine which imageset to use.
JobTypeImageSet = "imageset"
// JobTypeDeprovision is used as a value of JobTypeLabel that says the Job is specifically running the deprovisioner.
JobTypeDeprovision = "deprovision"
// JobTypeProvision is used as a value of JobTypeLabel that says the Job is specifically running the provisioner.
JobTypeProvision = "provision"
// DNSZoneTypeLabel is the label that is used to identify what a DNSZone is being used for.
DNSZoneTypeLabel = "hive.openshift.io/dnszone-type"
// DNSZoneTypeChild is used as a value of DNSZoneTypeLabel that says the DNSZone is specifically used as the forwarding zone for the target cluster.
DNSZoneTypeChild = "child"
// SecretTypeLabel is the label that is used to identify what a Secret is being used for.
SecretTypeLabel = "hive.openshift.io/secret-type"
// SecretTypeMergedPullSecret is used as a value of SecretTypeLabel that says the secret is specifically used for storing a pull secret.
SecretTypeMergedPullSecret = "merged-pull-secret"
// SecretTypeKubeConfig is used as a value of SecretTypeLabel that says the secret is specifically used for storing a kubeconfig.
SecretTypeKubeConfig = "kubeconfig"
// SecretTypeKubeAdminCreds is used as a value of SecretTypeLabel that says the secret is specifically used for storing kubeadmin credentials.
SecretTypeKubeAdminCreds = "kubeadmincreds"
// SyncSetTypeLabel is the label that is used to identify what a SyncSet is being used for.
SyncSetTypeLabel = "hive.openshift.io/syncset-type"
// SyncSetTypeControlPlaneCerts is used as a value of SyncSetTypeLabel that says the syncset is specifically used to distribute control plane certificates.
SyncSetTypeControlPlaneCerts = "controlplanecerts"
// SyncSetTypeRemoteIngress is used as a value of SyncSetTypeLabel that says the syncset is specifically used to distribute remote ingress information.
SyncSetTypeRemoteIngress = "remoteingress"
// SyncSetTypeIdentityProvider is used as a value of SyncSetTypeLabel that says the syncset is specifically used to distribute identity provider information.
SyncSetTypeIdentityProvider = "identityprovider"
// GlobalPullSecret is the environment variable for controllers to get the global pull secret
GlobalPullSecret = "GLOBAL_PULL_SECRET"
// DefaultHiveNamespace is the default namespace where core hive components will run. It is used if the environment variable is not defined.
DefaultHiveNamespace = "hive"
// HiveNamespaceEnvVar is the environment variable for the namespace where the core hive-controllers and hiveadmission will run.
// This is set on the deployments by the hive-operator which deploys them, based on the targetNamespace defined in HiveConfig.
// The default is defined above.
HiveNamespaceEnvVar = "HIVE_NS"
// CheckpointName is the name of the object in each namespace in which the namespace's backup information is stored.
CheckpointName = "hive"
// SyncsetPauseAnnotation is a annotation used by clusterDeployment, if it's true, then we will disable syncing to a specific cluster
SyncsetPauseAnnotation = "hive.openshift.io/syncset-pause"
// HiveManagedLabel is a label added to any resources we sync to the remote cluster to help identify that they are
// managed by Hive, and any manual changes may be undone the next time the resource is reconciled.
HiveManagedLabel = "hive.openshift.io/managed"
// DisableInstallLogPasswordRedactionAnnotation is an annotation used on ClusterDeployments to disable the installmanager
// functionality which refuses to print output if it appears to contain a password or sensitive info. This can be
// useful in scenarios where debugging is needed and important info is being redacted. Set to "true".
DisableInstallLogPasswordRedactionAnnotation = "hive.openshift.io/disable-install-log-password-redaction"
// PauseOnInstallFailureAnnotation is an annotation used on ClusterDeployments to trigger a sleep after an install
// failure for the specified duration. This will keep the install pod running and allow a user to rsh in for debug
// purposes. Examples: "1h", "20m".
PauseOnInstallFailureAnnotation = "hive.openshift.io/pause-on-install-failure"
// WaitForInstallCompleteExecutionsAnnotation is an annotation used on ClusterDeployments to set additional waits
// for the cluster provision to complete by running `openshift-install wait-for install-complete` command.
WaitForInstallCompleteExecutionsAnnotation = "hive.openshift.io/wait-for-install-complete-executions"
// ProtectedDeleteAnnotation is an annotation used on ClusterDeployments to indicate that the ClusterDeployment
// cannot be deleted. The annotation must be removed in order to delete the ClusterDeployment.
ProtectedDeleteAnnotation = "hive.openshift.io/protected-delete"
// ProtectedDeleteEnvVar is the name of the environment variable used to tell the controller manager whether
// protected delete is enabled.
ProtectedDeleteEnvVar = "PROTECTED_DELETE"
// RelocateAnnotation is an annotation used on ClusterDeployments and DNSZones to indicate that the resource
// is involved in a relocation between Hive instances.
// The value of the annotation has the format "{ClusterRelocate}/{Status}", where
// {ClusterRelocate} is the name of the ClusterRelocate that is driving the relocation and
// {Status} is the status of the relocate. The status is outgoing, completed, or incoming.
// An outgoing status indicates that the resource is on the source side of an in-progress relocate.
// A completed status indicates that the resource is on the source side of a completed relocate.
// An incoming status indicates that the resource is on the destination side of an in-progress relocate.
RelocateAnnotation = "hive.openshift.io/relocate"
// ManagedDomainsFileEnvVar if present, points to a simple text
// file that includes a valid managed domain per line. Cluster deployments
// requesting that their domains be managed must have a base domain
// that is a direct child of one of the valid domains.
ManagedDomainsFileEnvVar = "MANAGED_DOMAINS_FILE"
// ManagedDomainsVolumeName is the name of the volume that will point
// to the configmap containing the managed domain configuration.
ManagedDomainsVolumeName = "managed-domains"
// GCPCredentialsName is the name of the GCP credentials file or secret key.
GCPCredentialsName = "osServiceAccount.json"
// AzureCredentialsName is the name of the Azure credentials file or secret key.
AzureCredentialsName = "osServicePrincipal.json"
// AzureCredentialsEnvVar is the name of the environment variable pointing to the location
// where Azure credentials can be found.
AzureCredentialsEnvVar = "AZURE_AUTH_LOCATION"
// OpenStackCredentialsName is the name of the OpenStack credentials file.
OpenStackCredentialsName = "clouds.yaml"
// SSHPrivKeyPathEnvVar is the environment variable Hive will set for the installmanager pod to point to the
// path where we mount in the SSH key to be configured on the cluster hosts.
SSHPrivKeyPathEnvVar = "SSH_PRIV_KEY_PATH"
// LibvirtSSHPrivKeyPathEnvVar is the environment variable Hive will set for the installmanager pod to point to the
// path where we mount in the SSH key for connecting to the bare metal libvirt provisioning host.
LibvirtSSHPrivKeyPathEnvVar = "LIBVIRT_SSH_PRIV_KEY_PATH"
// FakeClusterInstallEnvVar is the environment variable Hive will set for the installmanager pod to request
// a fake install.
FakeClusterInstallEnvVar = "FAKE_INSTALL"
// ControlPlaneCertificateSuffix is the suffix used when naming objects having to do control plane certificates.
ControlPlaneCertificateSuffix = "cp-certs"
// ClusterIngressSuffix is the suffix used when naming objects having to do with cluster ingress.
ClusterIngressSuffix = "clusteringress"
// IdentityProviderSuffix is the suffix used when naming objects having to do with identity provider
IdentityProviderSuffix = "idp"
// KubeconfigSecretKey is the key used inside of a secret containing a kubeconfig
KubeconfigSecretKey = "kubeconfig"
// UsernameSecretKey is a key used to store a username inside of a secret containing username / password credentials
UsernameSecretKey = "username"
// PasswordSecretKey is a key used to store a password inside of a secret containing username / password credentials
PasswordSecretKey = "password"
// AWSRoute53Region is the region to use for route53 operations.
AWSRoute53Region = "us-east-1"
// AWSChinaRoute53Region is the region to use for AWS China route53 operations.
AWSChinaRoute53Region = "cn-northwest-1"
// AWSChinaRegionPrefix is the prefix for regions in AWS China.
AWSChinaRegionPrefix = "cn-"
// SSHPrivateKeySecretKey is the key we use in a Kubernetes Secret containing an SSH private key.
SSHPrivateKeySecretKey = "ssh-privatekey"
// RawKubeconfigSecretKey is the key we use in a Kubernetes Secret containing the raw (unmodified) form of
// an admin kubeconfig. (before Hive injects things such as additional CAs)
RawKubeconfigSecretKey = "raw-kubeconfig"
// AWSAccessKeyIDSecretKey is the key we use in a Kubernetes Secret containing AWS credentials for the access key ID.
AWSAccessKeyIDSecretKey = "aws_access_key_id"
// AWSSecretAccessKeySecretKey is the key we use in a Kubernetes Secret containing AWS credentials for the access key ID.
AWSSecretAccessKeySecretKey = "aws_secret_access_key"
// TLSCrtSecretKey is the key we use in a Kubernetes Secret containing a TLS certificate.
TLSCrtSecretKey = "tls.crt"
// TLSKeySecretKey is the key we use in a Kubernetes Secret containing a TLS certificate key.
TLSKeySecretKey = "tls.key"
// VSphereUsernameEnvVar is the environent variable specifying the vSphere username.
VSphereUsernameEnvVar = "GOVC_USERNAME"
// VSpherePasswordEnvVar is the environment variable specifying the vSphere password.
VSpherePasswordEnvVar = "GOVC_PASSWORD"
// VSphereVCenterEnvVar is the environment variable specifying the vSphere vCenter host.
VSphereVCenterEnvVar = "GOVC_HOST"
// VSphereTLSCACertsEnvVar is the environment variable containing : delimited paths to vSphere CA certificates.
VSphereTLSCACertsEnvVar = "GOVC_TLS_CA_CERTS"
// VSphereNetworkEnvVar is the environment variable specifying the vSphere network.
VSphereNetworkEnvVar = "GOVC_NETWORK"
// VSphereDataCenterEnvVar is the environment variable specifying the vSphere datacenter.
VSphereDataCenterEnvVar = "GOVC_DATACENTER"
// VSphereDataStoreEnvVar is the environment variable specifying the vSphere default datastore.
VSphereDataStoreEnvVar = "GOVC_DATASTORE"
// VersionMajorLabel is a label applied to ClusterDeployments to show the version of the cluster
// in the form "[MAJOR]".
VersionMajorLabel = "hive.openshift.io/version-major"
// VersionMajorMinorLabel is a label applied to ClusterDeployments to show the version of the cluster
// in the form "[MAJOR].[MINOR]".
VersionMajorMinorLabel = "hive.openshift.io/version-major-minor"
// VersionMajorMinorPatchLabel is a label applied to ClusterDeployments to show the version of the cluster
// in the form "[MAJOR].[MINOR].[PATCH]".
VersionMajorMinorPatchLabel = "hive.openshift.io/version-major-minor-patch"
// OvirtCredentialsName is the name of the oVirt credentials file.
OvirtCredentialsName = "ovirt-config.yaml"
// OvirtConfigEnvVar is the environment variable specifying the oVirt config path
OvirtConfigEnvVar = "OVIRT_CONFIG"
// AWSCredsMount is the location where the AWS credentials secret is mounted for uninstall pods.
AWSCredsMount = "/etc/aws-creds"
// InstallLogsUploadProviderEnvVar is used to specify which object store provider is being used.
InstallLogsUploadProviderEnvVar = "HIVE_INSTALL_LOGS_UPLOAD_PROVIDER"
// InstallLogsUploadProviderAWS is used to specify that AWS is the cloud provider to upload logs to.
InstallLogsUploadProviderAWS = "aws"
// InstallLogsCredentialsSecretRefEnvVar is the environment variable specifying what secret to use for storing logs.
InstallLogsCredentialsSecretRefEnvVar = "HIVE_INSTALL_LOGS_CREDENTIALS_SECRET"
// InstallLogsAWSRegionEnvVar is the environment variable specifying the region to use with S3
InstallLogsAWSRegionEnvVar = "HIVE_INSTALL_LOGS_AWS_REGION"
// InstallLogsAWSServiceEndpointEnvVar is the environment variable specifying the S3 endpoint to use.
InstallLogsAWSServiceEndpointEnvVar = "HIVE_INSTALL_LOGS_AWS_S3_URL"
// InstallLogsAWSS3BucketEnvVar is the environment variable specifying the S3 bucket to use.
InstallLogsAWSS3BucketEnvVar = "HIVE_INSTALL_LOGS_AWS_S3_BUCKET"
// HiveFakeClusterAnnotation can be set to true on a cluster deployment to create a fake cluster that never
// provisions resources, and all communication with the cluster will be faked.
HiveFakeClusterAnnotation = "hive.openshift.io/fake-cluster"
// ReconcileIDLen is the length of the random strings we generate for contextual loggers in controller
// Reconcile functions.
ReconcileIDLen = 8
// SyncSetMetricsGroupAnnotation can be applied to non-selector SyncSets to make them part of a
// group for which first applied metrics can be reported
SyncSetMetricsGroupAnnotation = "hive.openshift.io/syncset-metrics-group"
// ClusterClaimRemoveClusterAnnotation is used by the cluster claim controller to mark that the cluster
// that are previously claimed is no longer required and therefore should be removed/deprovisioned and removed
// from the pool.
ClusterClaimRemoveClusterAnnotation = "hive.openshift.io/remove-claimed-cluster-from-pool"
// HiveFeatureGatesEnabledEnvVar is the the environment variable specifying the comma separated list of
// feature gates that are enabled.
HiveFeatureGatesEnabledEnvVar = "HIVE_FEATURE_GATES_ENABLED"
// MachineManagementAnnotation
MachineManagementAnnotation = "hive.openshift.io/machine-management-cluster-name"
)
// GetMergedPullSecretName returns name for merged pull secret name per cluster deployment
func GetMergedPullSecretName(cd *hivev1.ClusterDeployment) string {
return apihelpers.GetResourceName(cd.Name, mergedPullSecretSuffix)
}
| 1 | 17,243 | needs a rename to not include AWS | openshift-hive | go |
@@ -15,13 +15,11 @@
*******************************************************************************/
#include "oneapi/dal/algo/rbf_kernel/backend/gpu/compute_kernel.hpp"
-#include "oneapi/dal/backend/interop/common_dpc.hpp"
-#include "oneapi/dal/backend/interop/table_conversion.hpp"
-
+#include "oneapi/dal/backend/primitives/reduction.hpp"
+#include "oneapi/dal/backend/primitives/blas.hpp"
+#include "oneapi/dal/backend/math.hpp"
#include "oneapi/dal/table/row_accessor.hpp"
-#include <daal/src/algorithms/kernel_function/oneapi/kernel_function_rbf_kernel_oneapi.h>
-
namespace oneapi::dal::rbf_kernel::backend {
using dal::backend::context_gpu; | 1 | /*******************************************************************************
* Copyright 2020-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "oneapi/dal/algo/rbf_kernel/backend/gpu/compute_kernel.hpp"
#include "oneapi/dal/backend/interop/common_dpc.hpp"
#include "oneapi/dal/backend/interop/table_conversion.hpp"
#include "oneapi/dal/table/row_accessor.hpp"
#include <daal/src/algorithms/kernel_function/oneapi/kernel_function_rbf_kernel_oneapi.h>
namespace oneapi::dal::rbf_kernel::backend {
using dal::backend::context_gpu;
using input_t = compute_input<task::compute>;
using result_t = compute_result<task::compute>;
using descriptor_t = detail::descriptor_base<task::compute>;
namespace daal_rbf_kernel = daal::algorithms::kernel_function::rbf;
namespace interop = dal::backend::interop;
template <typename Float>
using daal_rbf_kernel_t =
daal_rbf_kernel::internal::KernelImplRBFOneAPI<daal_rbf_kernel::defaultDense, Float>;
template <typename Float>
static result_t call_daal_kernel(const context_gpu& ctx,
const descriptor_t& desc,
const table& x,
const table& y) {
auto& queue = ctx.get_queue();
interop::execution_context_guard guard(queue);
const int64_t row_count_x = x.get_row_count();
const int64_t row_count_y = y.get_row_count();
dal::detail::check_mul_overflow(row_count_x, row_count_y);
auto arr_values =
array<Float>::empty(queue, row_count_x * row_count_y, sycl::usm::alloc::device);
const auto daal_x = interop::convert_to_daal_table(queue, x);
const auto daal_y = interop::convert_to_daal_table(queue, y);
const auto daal_values =
interop::convert_to_daal_table(queue, arr_values, row_count_x, row_count_y);
daal_rbf_kernel::Parameter daal_parameter(desc.get_sigma());
daal_rbf_kernel_t<Float>().compute(daal_x.get(),
daal_y.get(),
daal_values.get(),
&daal_parameter);
return result_t().set_values(
dal::detail::homogen_table_builder{}.reset(arr_values, row_count_x, row_count_y).build());
}
template <typename Float>
static result_t compute(const context_gpu& ctx, const descriptor_t& desc, const input_t& input) {
return call_daal_kernel<Float>(ctx, desc, input.get_x(), input.get_y());
}
template <typename Float>
struct compute_kernel_gpu<Float, method::dense, task::compute> {
result_t operator()(const context_gpu& ctx,
const descriptor_t& desc,
const input_t& input) const {
return compute<Float>(ctx, desc, input);
}
};
template struct compute_kernel_gpu<float, method::dense, task::compute>;
template struct compute_kernel_gpu<double, method::dense, task::compute>;
} // namespace oneapi::dal::rbf_kernel::backend
| 1 | 29,027 | Isn't sigma have `double` type? | oneapi-src-oneDAL | cpp |
@@ -977,6 +977,7 @@ TEST (rpc, wallet_create_seed)
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
nano::raw_key seed;
+ seed.data = 1;
auto prv = nano::deterministic_key (seed, 0);
auto pub (nano::pub_key (prv));
auto node = system.nodes.front (); | 1 | #include <nano/core_test/testutil.hpp>
#include <nano/lib/ipc.hpp>
#include <nano/lib/rpcconfig.hpp>
#include <nano/lib/timer.hpp>
#include <nano/node/ipc.hpp>
#include <nano/node/json_handler.hpp>
#include <nano/node/node_rpc_config.hpp>
#include <nano/node/testing.hpp>
#include <nano/rpc/rpc.hpp>
#include <nano/rpc/rpc_request_processor.hpp>
#include <gtest/gtest.h>
#include <boost/beast.hpp>
#include <algorithm>
using namespace std::chrono_literals;
namespace
{
class test_response
{
public:
test_response (boost::property_tree::ptree const & request_a, boost::asio::io_context & io_ctx) :
request (request_a),
sock (io_ctx)
{
}
test_response (boost::property_tree::ptree const & request_a, uint16_t port, boost::asio::io_context & io_ctx) :
request (request_a),
sock (io_ctx)
{
run (port);
}
void run (uint16_t port)
{
sock.async_connect (nano::tcp_endpoint (boost::asio::ip::address_v6::loopback (), port), [this](boost::system::error_code const & ec) {
if (!ec)
{
std::stringstream ostream;
boost::property_tree::write_json (ostream, request);
req.method (boost::beast::http::verb::post);
req.target ("/");
req.version (11);
ostream.flush ();
req.body () = ostream.str ();
req.prepare_payload ();
boost::beast::http::async_write (sock, req, [this](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
boost::beast::http::async_read (sock, sb, resp, [this](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
std::stringstream body (resp.body ());
try
{
boost::property_tree::read_json (body, json);
status = 200;
}
catch (std::exception &)
{
status = 500;
}
}
else
{
status = 400;
};
});
}
else
{
status = 600;
}
});
}
else
{
status = 400;
}
});
}
boost::property_tree::ptree const & request;
boost::asio::ip::tcp::socket sock;
boost::property_tree::ptree json;
boost::beast::flat_buffer sb;
boost::beast::http::request<boost::beast::http::string_body> req;
boost::beast::http::response<boost::beast::http::string_body> resp;
std::atomic<int> status{ 0 };
};
void enable_ipc_transport_tcp (nano::ipc::ipc_config_tcp_socket & transport_tcp, uint16_t ipc_port)
{
transport_tcp.enabled = true;
transport_tcp.port = ipc_port;
}
void enable_ipc_transport_tcp (nano::ipc::ipc_config_tcp_socket & transport_tcp)
{
static nano::network_constants network_constants;
enable_ipc_transport_tcp (transport_tcp, network_constants.default_ipc_port);
}
void reset_confirmation_height (nano::block_store & store, nano::account const & account)
{
auto transaction = store.tx_begin_write ();
uint64_t confirmation_height;
store.confirmation_height_get (transaction, account, confirmation_height);
store.confirmation_height_clear (transaction, account, confirmation_height);
}
void check_block_response_count (nano::system & system, nano::rpc & rpc, boost::property_tree::ptree & request, uint64_t size_count)
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (size_count, response.json.get_child ("blocks").front ().second.size ());
}
class scoped_io_thread_name_change
{
public:
scoped_io_thread_name_change ()
{
renew ();
}
~scoped_io_thread_name_change ()
{
reset ();
}
void reset ()
{
nano::thread_role::set (nano::thread_role::name::unknown);
}
void renew ()
{
nano::thread_role::set (nano::thread_role::name::io);
}
};
}
TEST (rpc, account_balance)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_balance");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string balance_text (response.json.get<std::string> ("balance"));
ASSERT_EQ ("340282366920938463463374607431768211455", balance_text);
std::string pending_text (response.json.get<std::string> ("pending"));
ASSERT_EQ ("0", pending_text);
}
TEST (rpc, account_block_count)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_block_count");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string block_count_text (response.json.get<std::string> ("block_count"));
ASSERT_EQ ("1", block_count_text);
}
TEST (rpc, account_create)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_create");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
test_response response0 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response0.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response0.status);
auto account_text0 (response0.json.get<std::string> ("account"));
nano::account account0;
ASSERT_FALSE (account0.decode_account (account_text0));
ASSERT_TRUE (system.wallet (0)->exists (account0));
uint64_t max_index (std::numeric_limits<uint32_t>::max ());
request.put ("index", max_index);
test_response response1 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
auto account_text1 (response1.json.get<std::string> ("account"));
nano::account account1;
ASSERT_FALSE (account1.decode_account (account_text1));
ASSERT_TRUE (system.wallet (0)->exists (account1));
request.put ("index", max_index + 1);
test_response response2 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_EQ (std::error_code (nano::error_common::invalid_index).message (), response2.json.get<std::string> ("error"));
}
TEST (rpc, account_weight)
{
nano::keypair key;
nano::system system (24000, 1);
nano::block_hash latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::change_block block (latest, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
ASSERT_EQ (nano::process_result::progress, node1.process (block).code);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_weight");
request.put ("account", key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string balance_text (response.json.get<std::string> ("weight"));
ASSERT_EQ ("340282366920938463463374607431768211455", balance_text);
}
TEST (rpc, wallet_contains)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
node->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "wallet_contains");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string exists_text (response.json.get<std::string> ("exists"));
ASSERT_EQ ("1", exists_text);
}
TEST (rpc, wallet_doesnt_contain)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
node->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "wallet_contains");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string exists_text (response.json.get<std::string> ("exists"));
ASSERT_EQ ("0", exists_text);
}
TEST (rpc, validate_account_number)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "validate_account_number");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
std::string exists_text (response.json.get<std::string> ("valid"));
ASSERT_EQ ("1", exists_text);
}
TEST (rpc, validate_account_invalid)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
std::string account;
nano::test_genesis_key.pub.encode_account (account);
account[0] ^= 0x1;
boost::property_tree::ptree request;
request.put ("action", "validate_account_number");
request.put ("account", account);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string exists_text (response.json.get<std::string> ("valid"));
ASSERT_EQ ("0", exists_text);
}
TEST (rpc, send)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "send");
request.put ("source", nano::test_genesis_key.pub.to_account ());
request.put ("destination", nano::test_genesis_key.pub.to_account ());
request.put ("amount", "100");
system.deadline_set (10s);
boost::thread thread2 ([&system]() {
while (system.nodes[0]->balance (nano::test_genesis_key.pub) == nano::genesis_amount)
{
ASSERT_NO_ERROR (system.poll ());
}
});
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string block_text (response.json.get<std::string> ("block"));
nano::block_hash block;
ASSERT_FALSE (block.decode_hex (block_text));
ASSERT_TRUE (node->ledger.block_exists (block));
ASSERT_EQ (node->latest (nano::test_genesis_key.pub), block);
thread2.join ();
}
TEST (rpc, send_fail)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
node->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "send");
request.put ("source", nano::test_genesis_key.pub.to_account ());
request.put ("destination", nano::test_genesis_key.pub.to_account ());
request.put ("amount", "100");
std::atomic<bool> done (false);
system.deadline_set (10s);
boost::thread thread2 ([&system, &done]() {
while (!done)
{
ASSERT_NO_ERROR (system.poll ());
}
});
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
done = true;
ASSERT_EQ (std::error_code (nano::error_common::account_not_found_wallet).message (), response.json.get<std::string> ("error"));
thread2.join ();
}
TEST (rpc, send_work)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "send");
request.put ("source", nano::test_genesis_key.pub.to_account ());
request.put ("destination", nano::test_genesis_key.pub.to_account ());
request.put ("amount", "100");
request.put ("work", "1");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (10s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (std::error_code (nano::error_common::invalid_work).message (), response.json.get<std::string> ("error"));
request.erase ("work");
request.put ("work", nano::to_string_hex (*system.nodes[0]->work_generate_blocking (system.nodes[0]->latest (nano::test_genesis_key.pub))));
test_response response2 (request, rpc.config.port, system.io_ctx);
system.deadline_set (10s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
std::string block_text (response2.json.get<std::string> ("block"));
nano::block_hash block;
ASSERT_FALSE (block.decode_hex (block_text));
ASSERT_TRUE (system.nodes[0]->ledger.block_exists (block));
ASSERT_EQ (system.nodes[0]->latest (nano::test_genesis_key.pub), block);
}
TEST (rpc, send_work_disabled)
{
nano::system system (24000, 0);
nano::node_config node_config (24000, system.logging);
node_config.work_threads = 0;
auto & node = *system.add_node (node_config);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "send");
request.put ("source", nano::test_genesis_key.pub.to_account ());
request.put ("destination", nano::test_genesis_key.pub.to_account ());
request.put ("amount", "100");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (10s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (std::error_code (nano::error_common::disabled_work_generation).message (), response.json.get<std::string> ("error"));
}
}
TEST (rpc, send_idempotent)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "send");
request.put ("source", nano::test_genesis_key.pub.to_account ());
request.put ("destination", nano::account (0).to_account ());
request.put ("amount", (nano::genesis_amount - (nano::genesis_amount / 4)).convert_to<std::string> ());
request.put ("id", "123abc");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string block_text (response.json.get<std::string> ("block"));
nano::block_hash block;
ASSERT_FALSE (block.decode_hex (block_text));
ASSERT_TRUE (system.nodes[0]->ledger.block_exists (block));
ASSERT_EQ (system.nodes[0]->balance (nano::test_genesis_key.pub), nano::genesis_amount / 4);
test_response response2 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_EQ ("", response2.json.get<std::string> ("error", ""));
ASSERT_EQ (block_text, response2.json.get<std::string> ("block"));
ASSERT_EQ (system.nodes[0]->balance (nano::test_genesis_key.pub), nano::genesis_amount / 4);
request.erase ("id");
request.put ("id", "456def");
test_response response3 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response3.status);
ASSERT_EQ (std::error_code (nano::error_common::insufficient_balance).message (), response3.json.get<std::string> ("error"));
}
TEST (rpc, stop)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "stop");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
};
}
TEST (rpc, wallet_add)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
nano::keypair key1;
std::string key_text;
key1.prv.data.encode_hex (key_text);
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "wallet_add");
request.put ("key", key_text);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text1 (response.json.get<std::string> ("account"));
ASSERT_EQ (account_text1, key1.pub.to_account ());
ASSERT_TRUE (system.wallet (0)->exists (key1.pub));
}
TEST (rpc, wallet_password_valid)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "password_valid");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text1 (response.json.get<std::string> ("valid"));
ASSERT_EQ (account_text1, "1");
}
TEST (rpc, wallet_password_change)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "password_change");
request.put ("password", "test");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text1 (response.json.get<std::string> ("changed"));
ASSERT_EQ (account_text1, "1");
scoped_thread_name_io.reset ();
auto transaction (system.wallet (0)->wallets.tx_begin_write ());
ASSERT_TRUE (system.wallet (0)->store.valid_password (transaction));
ASSERT_TRUE (system.wallet (0)->enter_password (transaction, ""));
ASSERT_FALSE (system.wallet (0)->store.valid_password (transaction));
ASSERT_FALSE (system.wallet (0)->enter_password (transaction, "test"));
ASSERT_TRUE (system.wallet (0)->store.valid_password (transaction));
}
TEST (rpc, wallet_password_enter)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
nano::raw_key password_l;
password_l.data.clear ();
system.deadline_set (10s);
while (password_l.data == 0)
{
ASSERT_NO_ERROR (system.poll ());
system.wallet (0)->store.password.value (password_l);
}
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "password_enter");
request.put ("password", "");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text1 (response.json.get<std::string> ("valid"));
ASSERT_EQ (account_text1, "1");
}
TEST (rpc, wallet_representative)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "wallet_representative");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text1 (response.json.get<std::string> ("representative"));
ASSERT_EQ (account_text1, nano::genesis_account.to_account ());
}
TEST (rpc, wallet_representative_set)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
nano::keypair key;
request.put ("action", "wallet_representative_set");
request.put ("representative", key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto transaction (system.nodes[0]->wallets.tx_begin_read ());
ASSERT_EQ (key.pub, system.nodes[0]->wallets.items.begin ()->second->store.representative (transaction));
}
TEST (rpc, wallet_representative_set_force)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
nano::keypair key;
request.put ("action", "wallet_representative_set");
request.put ("representative", key.pub.to_account ());
request.put ("update_existing_accounts", true);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
{
auto transaction (system.nodes[0]->wallets.tx_begin_read ());
ASSERT_EQ (key.pub, system.nodes[0]->wallets.items.begin ()->second->store.representative (transaction));
}
nano::account representative (0);
while (representative != key.pub)
{
auto transaction (system.nodes[0]->store.tx_begin_read ());
nano::account_info info;
if (!system.nodes[0]->store.account_get (transaction, nano::test_genesis_key.pub, info))
{
representative = info.representative;
}
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, account_list)
{
nano::system system (24000, 1);
nano::keypair key2;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key2.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "account_list");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts_node (response.json.get_child ("accounts"));
std::vector<nano::account> accounts;
for (auto i (accounts_node.begin ()), j (accounts_node.end ()); i != j; ++i)
{
auto account (i->second.get<std::string> (""));
nano::account number;
ASSERT_FALSE (number.decode_account (account));
accounts.push_back (number);
}
ASSERT_EQ (2, accounts.size ());
for (auto i (accounts.begin ()), j (accounts.end ()); i != j; ++i)
{
ASSERT_TRUE (system.wallet (0)->exists (*i));
}
}
TEST (rpc, wallet_key_valid)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "wallet_key_valid");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string exists_text (response.json.get<std::string> ("valid"));
ASSERT_EQ ("1", exists_text);
}
TEST (rpc, wallet_create)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_create");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string wallet_text (response.json.get<std::string> ("wallet"));
nano::wallet_id wallet_id;
ASSERT_FALSE (wallet_id.decode_hex (wallet_text));
ASSERT_NE (system.nodes[0]->wallets.items.end (), system.nodes[0]->wallets.items.find (wallet_id));
}
TEST (rpc, wallet_create_seed)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
nano::raw_key seed;
auto prv = nano::deterministic_key (seed, 0);
auto pub (nano::pub_key (prv));
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_create");
request.put ("seed", seed.data.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
system.poll ();
}
ASSERT_EQ (200, response.status);
std::string wallet_text (response.json.get<std::string> ("wallet"));
nano::wallet_id wallet_id;
ASSERT_FALSE (wallet_id.decode_hex (wallet_text));
auto existing (system.nodes[0]->wallets.items.find (wallet_id));
ASSERT_NE (system.nodes[0]->wallets.items.end (), existing);
{
auto transaction (system.nodes[0]->wallets.tx_begin_read ());
nano::raw_key seed0;
existing->second->store.seed (seed0, transaction);
ASSERT_EQ (seed, seed0);
}
auto account_text (response.json.get<std::string> ("last_restored_account"));
nano::account account;
ASSERT_FALSE (account.decode_account (account_text));
ASSERT_TRUE (existing->second->exists (account));
ASSERT_EQ (pub, account);
ASSERT_EQ ("1", response.json.get<std::string> ("restored_count"));
}
TEST (rpc, wallet_export)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_export");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string wallet_json (response.json.get<std::string> ("json"));
bool error (false);
scoped_thread_name_io.reset ();
auto transaction (system.nodes[0]->wallets.tx_begin_write ());
nano::kdf kdf;
nano::wallet_store store (error, kdf, transaction, nano::genesis_account, 1, "0", wallet_json);
ASSERT_FALSE (error);
ASSERT_TRUE (store.exists (transaction, nano::test_genesis_key.pub));
}
TEST (rpc, wallet_destroy)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto wallet_id (system.nodes[0]->wallets.items.begin ()->first);
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_destroy");
request.put ("wallet", wallet_id.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (system.nodes[0]->wallets.items.end (), system.nodes[0]->wallets.items.find (wallet_id));
}
TEST (rpc, account_move)
{
nano::system system (24000, 1);
auto wallet_id (system.nodes[0]->wallets.items.begin ()->first);
auto destination (system.wallet (0));
destination->insert_adhoc (nano::test_genesis_key.prv);
nano::keypair key;
auto source_id = nano::random_wallet_id ();
auto source (system.nodes[0]->wallets.create (source_id));
source->insert_adhoc (key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_move");
request.put ("wallet", wallet_id.to_string ());
request.put ("source", source_id.to_string ());
boost::property_tree::ptree keys;
boost::property_tree::ptree entry;
entry.put ("", key.pub.to_account ());
keys.push_back (std::make_pair ("", entry));
request.add_child ("accounts", keys);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ ("1", response.json.get<std::string> ("moved"));
ASSERT_TRUE (destination->exists (key.pub));
ASSERT_TRUE (destination->exists (nano::test_genesis_key.pub));
auto transaction (system.nodes[0]->wallets.tx_begin_read ());
ASSERT_EQ (source->store.end (), source->store.begin (transaction));
}
TEST (rpc, block)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block");
request.put ("hash", system.nodes[0]->latest (nano::genesis_account).to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto contents (response.json.get<std::string> ("contents"));
ASSERT_FALSE (contents.empty ());
ASSERT_TRUE (response.json.get<bool> ("confirmed")); // Genesis block is confirmed by default
}
TEST (rpc, block_account)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
nano::genesis genesis;
boost::property_tree::ptree request;
request.put ("action", "block_account");
request.put ("hash", genesis.hash ().to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text (response.json.get<std::string> ("account"));
nano::account account;
ASSERT_FALSE (account.decode_account (account_text));
}
TEST (rpc, chain)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::keypair key;
auto genesis (system.nodes[0]->latest (nano::test_genesis_key.pub));
ASSERT_FALSE (genesis.is_zero ());
auto block (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, 1));
ASSERT_NE (nullptr, block);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "chain");
request.put ("block", block->hash ().to_string ());
request.put ("count", std::to_string (std::numeric_limits<uint64_t>::max ()));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
std::vector<nano::block_hash> blocks;
for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i)
{
blocks.push_back (nano::block_hash (i->second.get<std::string> ("")));
}
ASSERT_EQ (2, blocks.size ());
ASSERT_EQ (block->hash (), blocks[0]);
ASSERT_EQ (genesis, blocks[1]);
}
TEST (rpc, chain_limit)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::keypair key;
auto genesis (system.nodes[0]->latest (nano::test_genesis_key.pub));
ASSERT_FALSE (genesis.is_zero ());
auto block (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, 1));
ASSERT_NE (nullptr, block);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "chain");
request.put ("block", block->hash ().to_string ());
request.put ("count", 1);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
std::vector<nano::block_hash> blocks;
for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i)
{
blocks.push_back (nano::block_hash (i->second.get<std::string> ("")));
}
ASSERT_EQ (1, blocks.size ());
ASSERT_EQ (block->hash (), blocks[0]);
}
TEST (rpc, chain_offset)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::keypair key;
auto genesis (system.nodes[0]->latest (nano::test_genesis_key.pub));
ASSERT_FALSE (genesis.is_zero ());
auto block (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, 1));
ASSERT_NE (nullptr, block);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "chain");
request.put ("block", block->hash ().to_string ());
request.put ("count", std::to_string (std::numeric_limits<uint64_t>::max ()));
request.put ("offset", 1);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
std::vector<nano::block_hash> blocks;
for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i)
{
blocks.push_back (nano::block_hash (i->second.get<std::string> ("")));
}
ASSERT_EQ (1, blocks.size ());
ASSERT_EQ (genesis, blocks[0]);
}
TEST (rpc, frontier)
{
nano::system system (24000, 1);
std::unordered_map<nano::account, nano::block_hash> source;
{
auto transaction (system.nodes[0]->store.tx_begin_write ());
for (auto i (0); i < 1000; ++i)
{
nano::keypair key;
nano::block_hash hash;
nano::random_pool::generate_block (hash.bytes.data (), hash.bytes.size ());
source[key.pub] = hash;
system.nodes[0]->store.confirmation_height_put (transaction, key.pub, 0);
system.nodes[0]->store.account_put (transaction, key.pub, nano::account_info (hash, 0, 0, 0, 0, 0, nano::epoch::epoch_0));
}
}
scoped_io_thread_name_change scoped_thread_name_io;
nano::keypair key;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "frontiers");
request.put ("account", nano::account (0).to_account ());
request.put ("count", std::to_string (std::numeric_limits<uint64_t>::max ()));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & frontiers_node (response.json.get_child ("frontiers"));
std::unordered_map<nano::account, nano::block_hash> frontiers;
for (auto i (frontiers_node.begin ()), j (frontiers_node.end ()); i != j; ++i)
{
nano::account account;
account.decode_account (i->first);
nano::block_hash frontier;
frontier.decode_hex (i->second.get<std::string> (""));
frontiers[account] = frontier;
}
ASSERT_EQ (1, frontiers.erase (nano::test_genesis_key.pub));
ASSERT_EQ (source, frontiers);
}
TEST (rpc, frontier_limited)
{
nano::system system (24000, 1);
std::unordered_map<nano::account, nano::block_hash> source;
{
auto transaction (system.nodes[0]->store.tx_begin_write ());
for (auto i (0); i < 1000; ++i)
{
nano::keypair key;
nano::block_hash hash;
nano::random_pool::generate_block (hash.bytes.data (), hash.bytes.size ());
source[key.pub] = hash;
system.nodes[0]->store.confirmation_height_put (transaction, key.pub, 0);
system.nodes[0]->store.account_put (transaction, key.pub, nano::account_info (hash, 0, 0, 0, 0, 0, nano::epoch::epoch_0));
}
}
scoped_io_thread_name_change scoped_thread_name_io;
nano::keypair key;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "frontiers");
request.put ("account", nano::account (0).to_account ());
request.put ("count", std::to_string (100));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & frontiers_node (response.json.get_child ("frontiers"));
ASSERT_EQ (100, frontiers_node.size ());
}
TEST (rpc, frontier_startpoint)
{
nano::system system (24000, 1);
std::unordered_map<nano::account, nano::block_hash> source;
{
auto transaction (system.nodes[0]->store.tx_begin_write ());
for (auto i (0); i < 1000; ++i)
{
nano::keypair key;
nano::block_hash hash;
nano::random_pool::generate_block (hash.bytes.data (), hash.bytes.size ());
source[key.pub] = hash;
system.nodes[0]->store.confirmation_height_put (transaction, key.pub, 0);
system.nodes[0]->store.account_put (transaction, key.pub, nano::account_info (hash, 0, 0, 0, 0, 0, nano::epoch::epoch_0));
}
}
scoped_io_thread_name_change scoped_thread_name_io;
nano::keypair key;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "frontiers");
request.put ("account", source.begin ()->first.to_account ());
request.put ("count", std::to_string (1));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & frontiers_node (response.json.get_child ("frontiers"));
ASSERT_EQ (1, frontiers_node.size ());
ASSERT_EQ (source.begin ()->first.to_account (), frontiers_node.begin ()->first);
}
TEST (rpc, history)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto change (system.wallet (0)->change_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub));
ASSERT_NE (nullptr, change);
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, send);
auto receive (system.wallet (0)->receive_action (*send, nano::test_genesis_key.pub, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, receive);
auto node0 (system.nodes[0]);
nano::genesis genesis;
nano::state_block usend (nano::genesis_account, node0->latest (nano::genesis_account), nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio, nano::genesis_account, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (node0->latest (nano::genesis_account)));
nano::state_block ureceive (nano::genesis_account, usend.hash (), nano::genesis_account, nano::genesis_amount, usend.hash (), nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (usend.hash ()));
nano::state_block uchange (nano::genesis_account, ureceive.hash (), nano::keypair ().pub, nano::genesis_amount, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (ureceive.hash ()));
{
auto transaction (node0->store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, usend).code);
ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, ureceive).code);
ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, uchange).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node0->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node0, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "history");
request.put ("hash", uchange.hash ().to_string ());
request.put ("count", 100);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::vector<std::tuple<std::string, std::string, std::string, std::string>> history_l;
auto & history_node (response.json.get_child ("history"));
for (auto i (history_node.begin ()), n (history_node.end ()); i != n; ++i)
{
history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account"), i->second.get<std::string> ("amount"), i->second.get<std::string> ("hash")));
}
ASSERT_EQ (5, history_l.size ());
ASSERT_EQ ("receive", std::get<0> (history_l[0]));
ASSERT_EQ (ureceive.hash ().to_string (), std::get<3> (history_l[0]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[0]));
ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[0]));
ASSERT_EQ (5, history_l.size ());
ASSERT_EQ ("send", std::get<0> (history_l[1]));
ASSERT_EQ (usend.hash ().to_string (), std::get<3> (history_l[1]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[1]));
ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[1]));
ASSERT_EQ ("receive", std::get<0> (history_l[2]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[2]));
ASSERT_EQ (system.nodes[0]->config.receive_minimum.to_string_dec (), std::get<2> (history_l[2]));
ASSERT_EQ (receive->hash ().to_string (), std::get<3> (history_l[2]));
ASSERT_EQ ("send", std::get<0> (history_l[3]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[3]));
ASSERT_EQ (system.nodes[0]->config.receive_minimum.to_string_dec (), std::get<2> (history_l[3]));
ASSERT_EQ (send->hash ().to_string (), std::get<3> (history_l[3]));
ASSERT_EQ ("receive", std::get<0> (history_l[4]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[4]));
ASSERT_EQ (nano::genesis_amount.convert_to<std::string> (), std::get<2> (history_l[4]));
ASSERT_EQ (genesis.hash ().to_string (), std::get<3> (history_l[4]));
}
TEST (rpc, account_history)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto change (system.wallet (0)->change_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub));
ASSERT_NE (nullptr, change);
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, send);
auto receive (system.wallet (0)->receive_action (*send, nano::test_genesis_key.pub, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, receive);
auto node0 (system.nodes[0]);
nano::genesis genesis;
nano::state_block usend (nano::genesis_account, node0->latest (nano::genesis_account), nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio, nano::genesis_account, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (node0->latest (nano::genesis_account)));
nano::state_block ureceive (nano::genesis_account, usend.hash (), nano::genesis_account, nano::genesis_amount, usend.hash (), nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (usend.hash ()));
nano::state_block uchange (nano::genesis_account, ureceive.hash (), nano::keypair ().pub, nano::genesis_amount, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (ureceive.hash ()));
{
auto transaction (node0->store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, usend).code);
ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, ureceive).code);
ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, uchange).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node0->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node0, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
{
boost::property_tree::ptree request;
request.put ("action", "account_history");
request.put ("account", nano::genesis_account.to_account ());
request.put ("count", 100);
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::vector<std::tuple<std::string, std::string, std::string, std::string, std::string>> history_l;
auto & history_node (response.json.get_child ("history"));
for (auto i (history_node.begin ()), n (history_node.end ()); i != n; ++i)
{
history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account"), i->second.get<std::string> ("amount"), i->second.get<std::string> ("hash"), i->second.get<std::string> ("height")));
}
ASSERT_EQ (5, history_l.size ());
ASSERT_EQ ("receive", std::get<0> (history_l[0]));
ASSERT_EQ (ureceive.hash ().to_string (), std::get<3> (history_l[0]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[0]));
ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[0]));
ASSERT_EQ ("6", std::get<4> (history_l[0])); // change block (height 7) is skipped by account_history since "raw" is not set
ASSERT_EQ ("send", std::get<0> (history_l[1]));
ASSERT_EQ (usend.hash ().to_string (), std::get<3> (history_l[1]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[1]));
ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[1]));
ASSERT_EQ ("5", std::get<4> (history_l[1]));
ASSERT_EQ ("receive", std::get<0> (history_l[2]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[2]));
ASSERT_EQ (system.nodes[0]->config.receive_minimum.to_string_dec (), std::get<2> (history_l[2]));
ASSERT_EQ (receive->hash ().to_string (), std::get<3> (history_l[2]));
ASSERT_EQ ("4", std::get<4> (history_l[2]));
ASSERT_EQ ("send", std::get<0> (history_l[3]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[3]));
ASSERT_EQ (system.nodes[0]->config.receive_minimum.to_string_dec (), std::get<2> (history_l[3]));
ASSERT_EQ (send->hash ().to_string (), std::get<3> (history_l[3]));
ASSERT_EQ ("3", std::get<4> (history_l[3]));
ASSERT_EQ ("receive", std::get<0> (history_l[4]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[4]));
ASSERT_EQ (nano::genesis_amount.convert_to<std::string> (), std::get<2> (history_l[4]));
ASSERT_EQ (genesis.hash ().to_string (), std::get<3> (history_l[4]));
ASSERT_EQ ("1", std::get<4> (history_l[4])); // change block (height 2) is skipped
}
// Test count and reverse
{
boost::property_tree::ptree request;
request.put ("action", "account_history");
request.put ("account", nano::genesis_account.to_account ());
request.put ("reverse", true);
request.put ("count", 1);
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & history_node (response.json.get_child ("history"));
ASSERT_EQ (1, history_node.size ());
ASSERT_EQ ("1", history_node.begin ()->second.get<std::string> ("height"));
ASSERT_EQ (change->hash ().to_string (), response.json.get<std::string> ("next"));
}
// Test filtering
scoped_thread_name_io.reset ();
auto account2 (system.wallet (0)->deterministic_insert ());
auto send2 (system.wallet (0)->send_action (nano::test_genesis_key.pub, account2, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, send2);
auto receive2 (system.wallet (0)->receive_action (*send2, account2, system.nodes[0]->config.receive_minimum.number ()));
scoped_thread_name_io.renew ();
// Test filter for send state blocks
ASSERT_NE (nullptr, receive2);
{
boost::property_tree::ptree request;
request.put ("action", "account_history");
request.put ("account", nano::test_genesis_key.pub.to_account ());
boost::property_tree::ptree other_account;
other_account.put ("", account2.to_account ());
boost::property_tree::ptree filtered_accounts;
filtered_accounts.push_back (std::make_pair ("", other_account));
request.add_child ("account_filter", filtered_accounts);
request.put ("count", 100);
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto history_node (response.json.get_child ("history"));
ASSERT_EQ (history_node.size (), 2);
}
// Test filter for receive state blocks
{
boost::property_tree::ptree request;
request.put ("action", "account_history");
request.put ("account", account2.to_account ());
boost::property_tree::ptree other_account;
other_account.put ("", nano::test_genesis_key.pub.to_account ());
boost::property_tree::ptree filtered_accounts;
filtered_accounts.push_back (std::make_pair ("", other_account));
request.add_child ("account_filter", filtered_accounts);
request.put ("count", 100);
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto history_node (response.json.get_child ("history"));
ASSERT_EQ (history_node.size (), 1);
}
}
TEST (rpc, history_count)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto change (system.wallet (0)->change_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub));
ASSERT_NE (nullptr, change);
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, send);
auto receive (system.wallet (0)->receive_action (*send, nano::test_genesis_key.pub, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, receive);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "history");
request.put ("hash", receive->hash ().to_string ());
request.put ("count", 1);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & history_node (response.json.get_child ("history"));
ASSERT_EQ (1, history_node.size ());
}
TEST (rpc, process_block)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
nano::keypair key;
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "process");
std::string json;
send.serialize_json (json);
request.put ("block", json);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
system.deadline_set (10s);
while (system.nodes[0]->latest (nano::test_genesis_key.pub) != send.hash ())
{
ASSERT_NO_ERROR (system.poll ());
}
std::string send_hash (response.json.get<std::string> ("hash"));
ASSERT_EQ (send.hash ().to_string (), send_hash);
}
request.put ("json_block", true);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_blocks::invalid_block);
ASSERT_EQ (ec.message (), response.json.get<std::string> ("error"));
}
}
TEST (rpc, process_json_block)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
nano::keypair key;
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "process");
boost::property_tree::ptree block_node;
send.serialize_json (block_node);
request.add_child ("block", block_node);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_blocks::invalid_block);
ASSERT_EQ (ec.message (), response.json.get<std::string> ("error"));
}
request.put ("json_block", true);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
system.deadline_set (10s);
while (system.nodes[0]->latest (nano::test_genesis_key.pub) != send.hash ())
{
ASSERT_NO_ERROR (system.poll ());
}
std::string send_hash (response.json.get<std::string> ("hash"));
ASSERT_EQ (send.hash ().to_string (), send_hash);
}
}
TEST (rpc, process_block_with_work_watcher)
{
nano::system system;
nano::node_config node_config (24000, system.logging);
node_config.enable_voting = false;
node_config.work_watcher_period = 1s;
auto & node1 = *system.add_node (node_config);
nano::keypair key;
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto send (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, latest, nano::test_genesis_key.pub, nano::genesis_amount - 100, nano::test_genesis_key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (latest)));
uint64_t difficulty1 (0);
nano::work_validate (*send, &difficulty1);
auto multiplier1 = nano::difficulty::to_multiplier (difficulty1, node1.network_params.network.publish_threshold);
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "process");
request.put ("work_watcher", true);
std::string json;
send->serialize_json (json);
request.put ("block", json);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
system.deadline_set (10s);
while (system.nodes[0]->latest (nano::test_genesis_key.pub) != send->hash ())
{
ASSERT_NO_ERROR (system.poll ());
}
system.deadline_set (10s);
auto updated (false);
uint64_t updated_difficulty;
while (!updated)
{
nano::unique_lock<std::mutex> lock (node1.active.mutex);
//fill multipliers_cb and update active difficulty;
for (auto i (0); i < node1.active.multipliers_cb.size (); i++)
{
node1.active.multipliers_cb.push_back (multiplier1 * (1 + i / 100.));
}
node1.active.update_active_difficulty (lock);
auto const existing (node1.active.roots.find (send->qualified_root ()));
//if existing is junk the block has been confirmed already
ASSERT_NE (existing, node1.active.roots.end ());
updated = existing->difficulty != difficulty1;
updated_difficulty = existing->difficulty;
lock.unlock ();
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_GT (updated_difficulty, difficulty1);
}
TEST (rpc, process_block_no_work)
{
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
nano::keypair key;
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
send.block_work_set (0);
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "process");
std::string json;
send.serialize_json (json);
request.put ("block", json);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_FALSE (response.json.get<std::string> ("error", "").empty ());
}
TEST (rpc, process_republish)
{
nano::system system (24000, 2);
scoped_io_thread_name_change scoped_thread_name_io;
nano::keypair key;
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "process");
std::string json;
send.serialize_json (json);
request.put ("block", json);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
system.deadline_set (10s);
while (system.nodes[1]->latest (nano::test_genesis_key.pub) != send.hash ())
{
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, process_subtype_send)
{
nano::system system (24000, 2);
scoped_io_thread_name_change scoped_thread_name_io;
nano::keypair key;
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::state_block send (nano::genesis_account, latest, nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "process");
std::string json;
send.serialize_json (json);
request.put ("block", json);
request.put ("subtype", "receive");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_rpc::invalid_subtype_balance);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
request.put ("subtype", "change");
test_response response2 (request, rpc.config.port, system.io_ctx);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_EQ (response2.json.get<std::string> ("error"), ec.message ());
request.put ("subtype", "send");
test_response response3 (request, rpc.config.port, system.io_ctx);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response3.status);
ASSERT_EQ (send.hash ().to_string (), response3.json.get<std::string> ("hash"));
system.deadline_set (10s);
while (system.nodes[1]->latest (nano::test_genesis_key.pub) != send.hash ())
{
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, process_subtype_open)
{
nano::system system (24000, 2);
nano::keypair key;
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::state_block send (nano::genesis_account, latest, nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
{
auto transaction (node1.store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, send).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
node1.active.start (std::make_shared<nano::state_block> (send));
nano::state_block open (key.pub, 0, key.pub, nano::Gxrb_ratio, send.hash (), key.prv, key.pub, *node1.work_generate_blocking (key.pub));
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "process");
std::string json;
open.serialize_json (json);
request.put ("block", json);
request.put ("subtype", "send");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_rpc::invalid_subtype_balance);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
request.put ("subtype", "epoch");
test_response response2 (request, rpc.config.port, system.io_ctx);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_EQ (response2.json.get<std::string> ("error"), ec.message ());
request.put ("subtype", "open");
test_response response3 (request, rpc.config.port, system.io_ctx);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response3.status);
ASSERT_EQ (open.hash ().to_string (), response3.json.get<std::string> ("hash"));
system.deadline_set (10s);
while (system.nodes[1]->latest (key.pub) != open.hash ())
{
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, process_subtype_receive)
{
nano::system system (24000, 2);
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::state_block send (nano::genesis_account, latest, nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio, nano::test_genesis_key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
{
auto transaction (node1.store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, send).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
node1.active.start (std::make_shared<nano::state_block> (send));
nano::state_block receive (nano::test_genesis_key.pub, send.hash (), nano::test_genesis_key.pub, nano::genesis_amount, send.hash (), nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (send.hash ()));
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "process");
std::string json;
receive.serialize_json (json);
request.put ("block", json);
request.put ("subtype", "send");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_rpc::invalid_subtype_balance);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
request.put ("subtype", "open");
test_response response2 (request, rpc.config.port, system.io_ctx);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ec = nano::error_rpc::invalid_subtype_previous;
ASSERT_EQ (response2.json.get<std::string> ("error"), ec.message ());
request.put ("subtype", "receive");
test_response response3 (request, rpc.config.port, system.io_ctx);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response3.status);
ASSERT_EQ (receive.hash ().to_string (), response3.json.get<std::string> ("hash"));
system.deadline_set (10s);
while (system.nodes[1]->latest (nano::test_genesis_key.pub) != receive.hash ())
{
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, keepalive)
{
nano::system system (24000, 1);
auto node1 (std::make_shared<nano::node> (system.io_ctx, 24001, nano::unique_path (), system.alarm, system.logging, system.work));
node1->start ();
system.nodes.push_back (node1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "keepalive");
auto address (boost::str (boost::format ("%1%") % node1->network.endpoint ().address ()));
auto port (boost::str (boost::format ("%1%") % node1->network.endpoint ().port ()));
request.put ("address", address);
request.put ("port", port);
ASSERT_EQ (nullptr, system.nodes[0]->network.udp_channels.channel (node1->network.endpoint ()));
ASSERT_EQ (0, system.nodes[0]->network.size ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
system.deadline_set (10s);
while (system.nodes[0]->network.find_channel (node1->network.endpoint ()) == nullptr)
{
ASSERT_EQ (0, system.nodes[0]->network.size ());
ASSERT_NO_ERROR (system.poll ());
}
node1->stop ();
}
TEST (rpc, payment_init)
{
nano::system system (24000, 1);
auto node1 (system.nodes[0]);
auto wallet_id = nano::random_wallet_id ();
auto wallet (node1->wallets.create (wallet_id));
ASSERT_TRUE (node1->wallets.items.find (wallet_id) != node1->wallets.items.end ());
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "payment_init");
request.put ("wallet", wallet_id.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ ("Ready", response.json.get<std::string> ("status"));
}
TEST (rpc, payment_begin_end)
{
nano::system system (24000, 1);
auto node1 (system.nodes[0]);
auto wallet_id = nano::random_wallet_id ();
auto wallet (node1->wallets.create (wallet_id));
ASSERT_TRUE (node1->wallets.items.find (wallet_id) != node1->wallets.items.end ());
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "payment_begin");
request1.put ("wallet", wallet_id.to_string ());
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
auto account_text (response1.json.get<std::string> ("account"));
nano::account account;
ASSERT_FALSE (account.decode_account (account_text));
ASSERT_TRUE (wallet->exists (account));
nano::root root1;
{
auto transaction (node1->store.tx_begin_read ());
root1 = node1->ledger.latest_root (transaction, account);
}
uint64_t work (0);
while (!nano::work_validate (root1, work))
{
++work;
ASSERT_LT (work, 50);
}
system.deadline_set (10s);
while (nano::work_validate (root1, work))
{
auto ec = system.poll ();
auto transaction (wallet->wallets.tx_begin_read ());
ASSERT_FALSE (wallet->store.work_get (transaction, account, work));
ASSERT_NO_ERROR (ec);
}
ASSERT_EQ (wallet->free_accounts.end (), wallet->free_accounts.find (account));
boost::property_tree::ptree request2;
request2.put ("action", "payment_end");
request2.put ("wallet", wallet_id.to_string ());
request2.put ("account", account.to_account ());
test_response response2 (request2, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_TRUE (wallet->exists (account));
ASSERT_NE (wallet->free_accounts.end (), wallet->free_accounts.find (account));
rpc.stop ();
system.stop ();
}
TEST (rpc, payment_end_nonempty)
{
nano::system system (24000, 1);
auto node1 (system.nodes[0]);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto transaction (node1->wallets.tx_begin_read ());
system.wallet (0)->init_free_accounts (transaction);
auto wallet_id (node1->wallets.items.begin ()->first);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "payment_end");
request1.put ("wallet", wallet_id.to_string ());
request1.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_FALSE (response1.json.get<std::string> ("error", "").empty ());
}
TEST (rpc, payment_zero_balance)
{
nano::system system (24000, 1);
auto node1 (system.nodes[0]);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto transaction (node1->wallets.tx_begin_read ());
system.wallet (0)->init_free_accounts (transaction);
auto wallet_id (node1->wallets.items.begin ()->first);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "payment_begin");
request1.put ("wallet", wallet_id.to_string ());
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
auto account_text (response1.json.get<std::string> ("account"));
nano::account account;
ASSERT_FALSE (account.decode_account (account_text));
ASSERT_NE (nano::test_genesis_key.pub, account);
}
TEST (rpc, payment_begin_reuse)
{
nano::system system (24000, 1);
auto node1 (system.nodes[0]);
auto wallet_id = nano::random_wallet_id ();
auto wallet (node1->wallets.create (wallet_id));
ASSERT_TRUE (node1->wallets.items.find (wallet_id) != node1->wallets.items.end ());
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "payment_begin");
request1.put ("wallet", wallet_id.to_string ());
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
auto account_text (response1.json.get<std::string> ("account"));
nano::account account;
ASSERT_FALSE (account.decode_account (account_text));
ASSERT_TRUE (wallet->exists (account));
ASSERT_EQ (wallet->free_accounts.end (), wallet->free_accounts.find (account));
boost::property_tree::ptree request2;
request2.put ("action", "payment_end");
request2.put ("wallet", wallet_id.to_string ());
request2.put ("account", account.to_account ());
test_response response2 (request2, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_TRUE (wallet->exists (account));
ASSERT_NE (wallet->free_accounts.end (), wallet->free_accounts.find (account));
test_response response3 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response3.status);
auto account2_text (response1.json.get<std::string> ("account"));
nano::account account2;
ASSERT_FALSE (account2.decode_account (account2_text));
ASSERT_EQ (account, account2);
}
TEST (rpc, payment_begin_locked)
{
nano::system system (24000, 1);
auto node1 (system.nodes[0]);
auto wallet_id = nano::random_wallet_id ();
auto wallet (node1->wallets.create (wallet_id));
{
auto transaction (wallet->wallets.tx_begin_write ());
wallet->store.rekey (transaction, "1");
ASSERT_TRUE (wallet->store.attempt_password (transaction, ""));
}
scoped_io_thread_name_change scoped_thread_name_io;
ASSERT_TRUE (node1->wallets.items.find (wallet_id) != node1->wallets.items.end ());
enable_ipc_transport_tcp (node1->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "payment_begin");
request1.put ("wallet", wallet_id.to_string ());
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_FALSE (response1.json.get<std::string> ("error", "").empty ());
}
TEST (rpc, payment_wait)
{
nano::system system (24000, 1);
auto node1 (system.nodes[0]);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "payment_wait");
request1.put ("account", key.pub.to_account ());
request1.put ("amount", nano::amount (nano::Mxrb_ratio).to_string_dec ());
request1.put ("timeout", "100");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("nothing", response1.json.get<std::string> ("status"));
request1.put ("timeout", "100000");
scoped_thread_name_io.reset ();
system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, nano::Mxrb_ratio);
system.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (500), [&]() {
system.nodes.front ()->worker.push_task ([&]() {
system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, nano::Mxrb_ratio);
});
});
scoped_thread_name_io.renew ();
test_response response2 (request1, rpc.config.port, system.io_ctx);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_EQ ("success", response2.json.get<std::string> ("status"));
request1.put ("amount", nano::amount (nano::Mxrb_ratio * 2).to_string_dec ());
test_response response3 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response3.status);
ASSERT_EQ ("success", response2.json.get<std::string> ("status"));
}
TEST (rpc, peers)
{
nano::system system (24000, 2);
scoped_io_thread_name_change scoped_thread_name_io;
nano::endpoint endpoint (boost::asio::ip::address_v6::from_string ("fc00::1"), 4000);
auto node = system.nodes.front ();
node->network.udp_channels.insert (endpoint, node->network_params.protocol.protocol_version);
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "peers");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & peers_node (response.json.get_child ("peers"));
ASSERT_EQ (2, peers_node.size ());
ASSERT_EQ (std::to_string (node->network_params.protocol.protocol_version), peers_node.get<std::string> ("[::1]:24001"));
// Previously "[::ffff:80.80.80.80]:4000", but IPv4 address cause "No such node thrown in the test body" issue with peers_node.get
std::stringstream endpoint_text;
endpoint_text << endpoint;
ASSERT_EQ (std::to_string (node->network_params.protocol.protocol_version), peers_node.get<std::string> (endpoint_text.str ()));
}
TEST (rpc, peers_node_id)
{
nano::system system (24000, 2);
scoped_io_thread_name_change scoped_thread_name_io;
nano::endpoint endpoint (boost::asio::ip::address_v6::from_string ("fc00::1"), 4000);
auto node = system.nodes.front ();
node->network.udp_channels.insert (endpoint, node->network_params.protocol.protocol_version);
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "peers");
request.put ("peer_details", true);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & peers_node (response.json.get_child ("peers"));
ASSERT_EQ (2, peers_node.size ());
auto tree1 (peers_node.get_child ("[::1]:24001"));
ASSERT_EQ (std::to_string (node->network_params.protocol.protocol_version), tree1.get<std::string> ("protocol_version"));
ASSERT_EQ (system.nodes[1]->node_id.pub.to_node_id (), tree1.get<std::string> ("node_id"));
std::stringstream endpoint_text;
endpoint_text << endpoint;
auto tree2 (peers_node.get_child (endpoint_text.str ()));
ASSERT_EQ (std::to_string (node->network_params.protocol.protocol_version), tree2.get<std::string> ("protocol_version"));
ASSERT_EQ ("", tree2.get<std::string> ("node_id"));
}
TEST (rpc, pending)
{
nano::system system (24000, 1);
nano::keypair key1;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto block1 (system.wallet (0)->send_action (nano::test_genesis_key.pub, key1.pub, 100));
scoped_io_thread_name_change scoped_thread_name_io;
system.deadline_set (5s);
while (system.nodes[0]->active.active (*block1))
{
ASSERT_NO_ERROR (system.poll ());
}
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "pending");
request.put ("account", key1.pub.to_account ());
request.put ("count", "100");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
ASSERT_EQ (1, blocks_node.size ());
nano::block_hash hash (blocks_node.begin ()->second.get<std::string> (""));
ASSERT_EQ (block1->hash (), hash);
}
request.put ("sorting", "true"); // Sorting test
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
ASSERT_EQ (1, blocks_node.size ());
nano::block_hash hash (blocks_node.begin ()->first);
ASSERT_EQ (block1->hash (), hash);
std::string amount (blocks_node.begin ()->second.get<std::string> (""));
ASSERT_EQ ("100", amount);
}
request.put ("threshold", "100"); // Threshold test
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
ASSERT_EQ (1, blocks_node.size ());
std::unordered_map<nano::block_hash, nano::uint128_union> blocks;
for (auto i (blocks_node.begin ()), j (blocks_node.end ()); i != j; ++i)
{
nano::block_hash hash;
hash.decode_hex (i->first);
nano::uint128_union amount;
amount.decode_dec (i->second.get<std::string> (""));
blocks[hash] = amount;
boost::optional<std::string> source (i->second.get_optional<std::string> ("source"));
ASSERT_FALSE (source.is_initialized ());
boost::optional<uint8_t> min_version (i->second.get_optional<uint8_t> ("min_version"));
ASSERT_FALSE (min_version.is_initialized ());
}
ASSERT_EQ (blocks[block1->hash ()], 100);
}
request.put ("threshold", "101");
{
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
ASSERT_EQ (0, blocks_node.size ());
}
request.put ("threshold", "0");
request.put ("source", "true");
request.put ("min_version", "true");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
ASSERT_EQ (1, blocks_node.size ());
std::unordered_map<nano::block_hash, nano::uint128_union> amounts;
std::unordered_map<nano::block_hash, nano::account> sources;
for (auto i (blocks_node.begin ()), j (blocks_node.end ()); i != j; ++i)
{
nano::block_hash hash;
hash.decode_hex (i->first);
amounts[hash].decode_dec (i->second.get<std::string> ("amount"));
sources[hash].decode_account (i->second.get<std::string> ("source"));
ASSERT_EQ (i->second.get<uint8_t> ("min_version"), 0);
}
ASSERT_EQ (amounts[block1->hash ()], 100);
ASSERT_EQ (sources[block1->hash ()], nano::test_genesis_key.pub);
}
request.put ("account", key1.pub.to_account ());
request.put ("source", "false");
request.put ("min_version", "false");
auto check_block_response_count = [&system, &request, &rpc](size_t size) {
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (size, response.json.get_child ("blocks").size ());
};
request.put ("include_only_confirmed", "true");
check_block_response_count (1);
scoped_thread_name_io.reset ();
reset_confirmation_height (system.nodes.front ()->store, block1->account ());
scoped_thread_name_io.renew ();
check_block_response_count (0);
}
TEST (rpc, search_pending)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto wallet (system.nodes[0]->wallets.items.begin ()->first.to_string ());
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block block (latest, nano::test_genesis_key.pub, nano::genesis_amount - system.nodes[0]->config.receive_minimum.number (), nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (latest));
{
auto transaction (system.nodes[0]->store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, system.nodes[0]->ledger.process (transaction, block).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "search_pending");
request.put ("wallet", wallet);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
system.deadline_set (10s);
while (system.nodes[0]->balance (nano::test_genesis_key.pub) != nano::genesis_amount)
{
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, version)
{
nano::system system (24000, 1);
auto node1 (system.nodes[0]);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "version");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("1", response1.json.get<std::string> ("rpc_version"));
ASSERT_EQ (200, response1.status);
{
auto transaction (system.nodes[0]->store.tx_begin_read ());
ASSERT_EQ (std::to_string (node1->store.version_get (transaction)), response1.json.get<std::string> ("store_version"));
}
ASSERT_EQ (std::to_string (node1->network_params.protocol.protocol_version), response1.json.get<std::string> ("protocol_version"));
ASSERT_EQ (boost::str (boost::format ("Nano %1%") % NANO_VERSION_STRING), response1.json.get<std::string> ("node_vendor"));
auto network_label (node1->network_params.network.get_current_network_as_string ());
ASSERT_EQ (network_label, response1.json.get<std::string> ("network"));
auto genesis_open (node1->latest (nano::test_genesis_key.pub));
ASSERT_EQ (genesis_open.to_string (), response1.json.get<std::string> ("network_identifier"));
ASSERT_EQ (BUILD_INFO, response1.json.get<std::string> ("build_info"));
auto headers (response1.resp.base ());
auto allow (headers.at ("Allow"));
auto content_type (headers.at ("Content-Type"));
auto access_control_allow_origin (headers.at ("Access-Control-Allow-Origin"));
auto access_control_allow_methods (headers.at ("Access-Control-Allow-Methods"));
auto access_control_allow_headers (headers.at ("Access-Control-Allow-Headers"));
auto connection (headers.at ("Connection"));
ASSERT_EQ ("POST, OPTIONS", allow);
ASSERT_EQ ("application/json", content_type);
ASSERT_EQ ("*", access_control_allow_origin);
ASSERT_EQ (allow, access_control_allow_methods);
ASSERT_EQ ("Accept, Accept-Language, Content-Language, Content-Type", access_control_allow_headers);
ASSERT_EQ ("close", connection);
}
TEST (rpc, work_generate)
{
nano::system system (24000, 1);
auto node (system.nodes[0]);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
nano::block_hash hash (1);
boost::property_tree::ptree request;
request.put ("action", "work_generate");
request.put ("hash", hash.to_string ());
auto verify_response = [node, &rpc, &system](auto & request, auto & hash) {
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (hash.to_string (), response.json.get<std::string> ("hash"));
auto work_text (response.json.get<std::string> ("work"));
uint64_t work, result_difficulty;
ASSERT_FALSE (nano::from_string_hex (work_text, work));
ASSERT_FALSE (nano::work_validate (hash, work, &result_difficulty));
auto response_difficulty_text (response.json.get<std::string> ("difficulty"));
uint64_t response_difficulty;
ASSERT_FALSE (nano::from_string_hex (response_difficulty_text, response_difficulty));
ASSERT_EQ (result_difficulty, response_difficulty);
auto multiplier = response.json.get<double> ("multiplier");
ASSERT_NEAR (nano::difficulty::to_multiplier (result_difficulty, node->network_params.network.publish_threshold), multiplier, 1e-6);
};
verify_response (request, hash);
request.put ("use_peers", "true");
verify_response (request, hash);
}
TEST (rpc, work_generate_difficulty)
{
nano::system system;
nano::node_config node_config (24000, system.logging);
node_config.max_work_generate_difficulty = 0xffff000000000000;
auto node = system.add_node (node_config);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
nano::block_hash hash (1);
boost::property_tree::ptree request;
request.put ("action", "work_generate");
request.put ("hash", hash.to_string ());
{
uint64_t difficulty (0xfff0000000000000);
request.put ("difficulty", nano::to_string_hex (difficulty));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (10s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto work_text (response.json.get<std::string> ("work"));
uint64_t work;
ASSERT_FALSE (nano::from_string_hex (work_text, work));
uint64_t result_difficulty;
ASSERT_FALSE (nano::work_validate (hash, work, &result_difficulty));
auto response_difficulty_text (response.json.get<std::string> ("difficulty"));
uint64_t response_difficulty;
ASSERT_FALSE (nano::from_string_hex (response_difficulty_text, response_difficulty));
ASSERT_EQ (result_difficulty, response_difficulty);
auto multiplier = response.json.get<double> ("multiplier");
// Expected multiplier from base threshold, not from the given difficulty
ASSERT_EQ (nano::difficulty::to_multiplier (result_difficulty, node->network_params.network.publish_threshold), multiplier);
ASSERT_GE (result_difficulty, difficulty);
}
{
uint64_t difficulty (0xffff000000000000);
request.put ("difficulty", nano::to_string_hex (difficulty));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (20s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto work_text (response.json.get<std::string> ("work"));
uint64_t work;
ASSERT_FALSE (nano::from_string_hex (work_text, work));
uint64_t result_difficulty;
ASSERT_FALSE (nano::work_validate (hash, work, &result_difficulty));
ASSERT_GE (result_difficulty, difficulty);
}
{
uint64_t difficulty (node->config.max_work_generate_difficulty + 1);
request.put ("difficulty", nano::to_string_hex (difficulty));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_rpc::difficulty_limit);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
}
}
TEST (rpc, work_generate_multiplier)
{
nano::system system;
nano::node_config node_config (24000, system.logging);
node_config.max_work_generate_difficulty = 0xffff000000000000;
auto node = system.add_node (node_config);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
nano::block_hash hash (1);
boost::property_tree::ptree request;
request.put ("action", "work_generate");
request.put ("hash", hash.to_string ());
{
// When both difficulty and multiplier are given, should use multiplier
// Give base difficulty and very high multiplier to test
request.put ("difficulty", nano::to_string_hex (0xff00000000000000));
double multiplier{ 100.0 };
request.put ("multiplier", multiplier);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (10s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto work_text (response.json.get<std::string> ("work"));
uint64_t work;
ASSERT_FALSE (nano::from_string_hex (work_text, work));
uint64_t result_difficulty;
ASSERT_FALSE (nano::work_validate (hash, work, &result_difficulty));
auto response_difficulty_text (response.json.get<std::string> ("difficulty"));
uint64_t response_difficulty;
ASSERT_FALSE (nano::from_string_hex (response_difficulty_text, response_difficulty));
ASSERT_EQ (result_difficulty, response_difficulty);
auto result_multiplier = response.json.get<double> ("multiplier");
ASSERT_GE (result_multiplier, multiplier);
}
{
request.put ("multiplier", -1.5);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_rpc::bad_multiplier_format);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
}
{
double max_multiplier (nano::difficulty::to_multiplier (node->config.max_work_generate_difficulty, node->network_params.network.publish_threshold));
request.put ("multiplier", max_multiplier + 1);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_rpc::difficulty_limit);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
}
}
TEST (rpc, work_cancel)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
nano::block_hash hash1 (1);
boost::property_tree::ptree request1;
request1.put ("action", "work_cancel");
request1.put ("hash", hash1.to_string ());
std::atomic<bool> done (false);
system.deadline_set (10s);
while (!done)
{
system.work.generate (hash1, [&done](boost::optional<uint64_t> work_a) {
done = !work_a;
});
test_response response1 (request1, rpc.config.port, system.io_ctx);
std::error_code ec;
while (response1.status == 0)
{
ec = system.poll ();
}
ASSERT_EQ (200, response1.status);
ASSERT_NO_ERROR (ec);
}
}
TEST (rpc, work_peer_bad)
{
nano::system system (24000, 2);
auto & node1 (*system.nodes[0]);
auto & node2 (*system.nodes[1]);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
node2.config.work_peers.push_back (std::make_pair (boost::asio::ip::address_v6::any ().to_string (), 0));
nano::block_hash hash1 (1);
std::atomic<uint64_t> work (0);
node2.work_generate (hash1, [&work](boost::optional<uint64_t> work_a) {
ASSERT_TRUE (work_a.is_initialized ());
work = *work_a;
});
system.deadline_set (5s);
while (nano::work_validate (hash1, work))
{
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, work_peer_one)
{
nano::system system (24000, 2);
auto & node1 (*system.nodes[0]);
auto & node2 (*system.nodes[1]);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
node2.config.work_peers.push_back (std::make_pair (node1.network.endpoint ().address ().to_string (), rpc.config.port));
nano::keypair key1;
uint64_t work (0);
node2.work_generate (key1.pub, [&work](boost::optional<uint64_t> work_a) {
ASSERT_TRUE (work_a.is_initialized ());
work = *work_a;
});
system.deadline_set (5s);
while (nano::work_validate (key1.pub, work))
{
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, work_peer_many)
{
nano::system system1 (24000, 1);
nano::system system2 (24001, 1);
nano::system system3 (24002, 1);
nano::system system4 (24003, 1);
auto & node1 (*system1.nodes[0]);
auto & node2 (*system2.nodes[0]);
auto & node3 (*system3.nodes[0]);
auto & node4 (*system4.nodes[0]);
nano::keypair key;
nano::rpc_config config2 (true);
config2.port += 0;
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node2.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server2 (node2, node_rpc_config);
nano::ipc_rpc_processor ipc_rpc_processor2 (system2.io_ctx, config2);
nano::rpc rpc2 (system2.io_ctx, config2, ipc_rpc_processor2);
rpc2.start ();
nano::rpc_config config3 (true);
config3.port += 1;
enable_ipc_transport_tcp (node3.config.ipc_config.transport_tcp, node3.network_params.network.default_ipc_port + 1);
nano::ipc::ipc_server ipc_server3 (node3, node_rpc_config);
nano::ipc_rpc_processor ipc_rpc_processor3 (system3.io_ctx, config3);
nano::rpc rpc3 (system3.io_ctx, config3, ipc_rpc_processor3);
rpc3.start ();
nano::rpc_config config4 (true);
config4.port += 2;
enable_ipc_transport_tcp (node4.config.ipc_config.transport_tcp, node4.network_params.network.default_ipc_port + 2);
nano::ipc::ipc_server ipc_server4 (node4, node_rpc_config);
nano::ipc_rpc_processor ipc_rpc_processor4 (system4.io_ctx, config4);
nano::rpc rpc4 (system2.io_ctx, config4, ipc_rpc_processor4);
rpc4.start ();
node1.config.work_peers.push_back (std::make_pair (node2.network.endpoint ().address ().to_string (), rpc2.config.port));
node1.config.work_peers.push_back (std::make_pair (node3.network.endpoint ().address ().to_string (), rpc3.config.port));
node1.config.work_peers.push_back (std::make_pair (node4.network.endpoint ().address ().to_string (), rpc4.config.port));
for (auto i (0); i < 10; ++i)
{
nano::keypair key1;
uint64_t work (0);
node1.work_generate (key1.pub, [&work](boost::optional<uint64_t> work_a) {
ASSERT_TRUE (work_a.is_initialized ());
work = *work_a;
});
while (nano::work_validate (key1.pub, work))
{
system1.poll ();
system2.poll ();
system3.poll ();
system4.poll ();
}
}
}
TEST (rpc, block_count)
{
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "block_count");
{
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("1", response1.json.get<std::string> ("count"));
ASSERT_EQ ("0", response1.json.get<std::string> ("unchecked"));
ASSERT_EQ ("1", response1.json.get<std::string> ("cemented"));
}
}
// Should be able to get all counts even when enable_control is false.
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (false);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "block_count");
{
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("1", response1.json.get<std::string> ("count"));
ASSERT_EQ ("0", response1.json.get<std::string> ("unchecked"));
ASSERT_EQ ("1", response1.json.get<std::string> ("cemented"));
}
}
}
TEST (rpc, frontier_count)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "frontier_count");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("1", response1.json.get<std::string> ("count"));
}
TEST (rpc, account_count)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "account_count");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("1", response1.json.get<std::string> ("count"));
}
TEST (rpc, available_supply)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "available_supply");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("0", response1.json.get<std::string> ("available"));
scoped_thread_name_io.reset ();
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::keypair key;
auto block (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, 1));
scoped_thread_name_io.renew ();
test_response response2 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_EQ ("1", response2.json.get<std::string> ("available"));
scoped_thread_name_io.reset ();
auto block2 (system.wallet (0)->send_action (nano::test_genesis_key.pub, 0, 100)); // Sending to burning 0 account
scoped_thread_name_io.renew ();
test_response response3 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response3.status);
ASSERT_EQ ("1", response3.json.get<std::string> ("available"));
}
TEST (rpc, mrai_to_raw)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "mrai_to_raw");
request1.put ("amount", "1");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ (nano::Mxrb_ratio.convert_to<std::string> (), response1.json.get<std::string> ("amount"));
}
TEST (rpc, mrai_from_raw)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "mrai_from_raw");
request1.put ("amount", nano::Mxrb_ratio.convert_to<std::string> ());
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("1", response1.json.get<std::string> ("amount"));
}
TEST (rpc, krai_to_raw)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "krai_to_raw");
request1.put ("amount", "1");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ (nano::kxrb_ratio.convert_to<std::string> (), response1.json.get<std::string> ("amount"));
}
TEST (rpc, krai_from_raw)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "krai_from_raw");
request1.put ("amount", nano::kxrb_ratio.convert_to<std::string> ());
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("1", response1.json.get<std::string> ("amount"));
}
TEST (rpc, nano_to_raw)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "nano_to_raw");
request1.put ("amount", "1");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ (nano::xrb_ratio.convert_to<std::string> (), response1.json.get<std::string> ("amount"));
}
TEST (rpc, nano_from_raw)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request1;
request1.put ("action", "nano_from_raw");
request1.put ("amount", nano::xrb_ratio.convert_to<std::string> ());
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ ("1", response1.json.get<std::string> ("amount"));
}
TEST (rpc, account_representative)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("account", nano::genesis_account.to_account ());
request.put ("action", "account_representative");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text1 (response.json.get<std::string> ("representative"));
ASSERT_EQ (account_text1, nano::genesis_account.to_account ());
}
TEST (rpc, account_representative_set)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
nano::keypair rep;
request.put ("account", nano::genesis_account.to_account ());
request.put ("representative", rep.pub.to_account ());
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("action", "account_representative_set");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string block_text1 (response.json.get<std::string> ("block"));
nano::block_hash hash;
ASSERT_FALSE (hash.decode_hex (block_text1));
ASSERT_FALSE (hash.is_zero ());
auto transaction (system.nodes[0]->store.tx_begin_read ());
ASSERT_TRUE (system.nodes[0]->store.block_exists (transaction, hash));
ASSERT_EQ (rep.pub, system.nodes[0]->store.block_get (transaction, hash)->representative ());
}
TEST (rpc, account_representative_set_work_disabled)
{
nano::system system (24000, 0);
nano::node_config node_config (24000, system.logging);
node_config.work_threads = 0;
auto & node = *system.add_node (node_config);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
nano::keypair rep;
request.put ("account", nano::genesis_account.to_account ());
request.put ("representative", rep.pub.to_account ());
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("action", "account_representative_set");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (10s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (std::error_code (nano::error_common::disabled_work_generation).message (), response.json.get<std::string> ("error"));
}
}
TEST (rpc, bootstrap)
{
nano::system system0 (24000, 1);
nano::system system1 (24001, 1);
auto latest (system1.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block send (latest, nano::genesis_account, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system1.nodes[0]->work_generate_blocking (latest));
{
auto transaction (system1.nodes[0]->store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, system1.nodes[0]->ledger.process (transaction, send).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
auto & node = system0.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "bootstrap");
request.put ("address", "::ffff:127.0.0.1");
request.put ("port", system1.nodes[0]->network.endpoint ().port ());
test_response response (request, rpc.config.port, system0.io_ctx);
while (response.status == 0)
{
system0.poll ();
}
system1.deadline_set (10s);
while (system0.nodes[0]->latest (nano::genesis_account) != system1.nodes[0]->latest (nano::genesis_account))
{
ASSERT_NO_ERROR (system0.poll ());
ASSERT_NO_ERROR (system1.poll ());
}
}
TEST (rpc, account_remove)
{
nano::system system0 (24000, 1);
auto key1 (system0.wallet (0)->deterministic_insert ());
scoped_io_thread_name_change scoped_thread_name_io;
ASSERT_TRUE (system0.wallet (0)->exists (key1));
auto & node = system0.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_remove");
request.put ("wallet", system0.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("account", key1.to_account ());
test_response response (request, rpc.config.port, system0.io_ctx);
while (response.status == 0)
{
system0.poll ();
}
ASSERT_FALSE (system0.wallet (0)->exists (key1));
}
TEST (rpc, representatives)
{
nano::system system0 (24000, 1);
auto & node = system0.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "representatives");
test_response response (request, rpc.config.port, system0.io_ctx);
while (response.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response.status);
auto & representatives_node (response.json.get_child ("representatives"));
std::vector<nano::account> representatives;
for (auto i (representatives_node.begin ()), n (representatives_node.end ()); i != n; ++i)
{
nano::account account;
ASSERT_FALSE (account.decode_account (i->first));
representatives.push_back (account);
}
ASSERT_EQ (1, representatives.size ());
ASSERT_EQ (nano::genesis_account, representatives[0]);
}
// wallet_seed is only available over IPC's unsafe encoding, and when running on test network
TEST (rpc, wallet_seed)
{
nano::system system (24000, 1);
nano::raw_key seed;
{
auto transaction (system.nodes[0]->wallets.tx_begin_read ());
system.wallet (0)->store.seed (seed, transaction);
}
scoped_io_thread_name_change scoped_thread_name_io;
auto & node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_seed");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
test_response response (request, rpc_config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
{
std::string seed_text (response.json.get<std::string> ("seed"));
ASSERT_EQ (seed.data.to_string (), seed_text);
}
}
TEST (rpc, wallet_change_seed)
{
nano::system system0 (24000, 1);
nano::raw_key seed;
{
auto transaction (system0.nodes[0]->wallets.tx_begin_read ());
nano::raw_key seed0;
system0.wallet (0)->store.seed (seed0, transaction);
ASSERT_NE (seed, seed0);
}
scoped_io_thread_name_change scoped_thread_name_io;
auto prv = nano::deterministic_key (seed, 0);
auto pub (nano::pub_key (prv));
auto & node = system0.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_change_seed");
request.put ("wallet", system0.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("seed", seed.data.to_string ());
test_response response (request, rpc.config.port, system0.io_ctx);
system0.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system0.poll ());
}
ASSERT_EQ (200, response.status);
{
auto transaction (system0.nodes[0]->wallets.tx_begin_read ());
nano::raw_key seed0;
system0.wallet (0)->store.seed (seed0, transaction);
ASSERT_EQ (seed, seed0);
}
auto account_text (response.json.get<std::string> ("last_restored_account"));
nano::account account;
ASSERT_FALSE (account.decode_account (account_text));
ASSERT_TRUE (system0.wallet (0)->exists (account));
ASSERT_EQ (pub, account);
ASSERT_EQ ("1", response.json.get<std::string> ("restored_count"));
}
TEST (rpc, wallet_frontiers)
{
nano::system system0 (24000, 1);
system0.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
auto & node = system0.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_frontiers");
request.put ("wallet", system0.nodes[0]->wallets.items.begin ()->first.to_string ());
test_response response (request, rpc.config.port, system0.io_ctx);
while (response.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response.status);
auto & frontiers_node (response.json.get_child ("frontiers"));
std::vector<nano::account> frontiers;
for (auto i (frontiers_node.begin ()), n (frontiers_node.end ()); i != n; ++i)
{
frontiers.push_back (nano::account (i->second.get<std::string> ("")));
}
ASSERT_EQ (1, frontiers.size ());
ASSERT_EQ (system0.nodes[0]->latest (nano::genesis_account), frontiers[0]);
}
TEST (rpc, work_validate)
{
nano::network_params params;
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
nano::block_hash hash (1);
uint64_t work1 (*node1.work_generate_blocking (hash));
boost::property_tree::ptree request;
request.put ("action", "work_validate");
request.put ("hash", hash.to_string ());
request.put ("work", nano::to_string_hex (work1));
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string validate_text (response.json.get<std::string> ("valid"));
ASSERT_EQ ("1", validate_text);
std::string difficulty_text (response.json.get<std::string> ("difficulty"));
uint64_t difficulty;
ASSERT_FALSE (nano::from_string_hex (difficulty_text, difficulty));
ASSERT_GE (difficulty, params.network.publish_threshold);
double multiplier (response.json.get<double> ("multiplier"));
ASSERT_NEAR (multiplier, nano::difficulty::to_multiplier (difficulty, params.network.publish_threshold), 1e-6);
}
uint64_t work2 (0);
request.put ("work", nano::to_string_hex (work2));
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string validate_text (response.json.get<std::string> ("valid"));
ASSERT_EQ ("0", validate_text);
std::string difficulty_text (response.json.get<std::string> ("difficulty"));
uint64_t difficulty;
ASSERT_FALSE (nano::from_string_hex (difficulty_text, difficulty));
ASSERT_GE (params.network.publish_threshold, difficulty);
double multiplier (response.json.get<double> ("multiplier"));
ASSERT_NEAR (multiplier, nano::difficulty::to_multiplier (difficulty, params.network.publish_threshold), 1e-6);
}
uint64_t result_difficulty;
ASSERT_FALSE (nano::work_validate (hash, work1, &result_difficulty));
ASSERT_GE (result_difficulty, params.network.publish_threshold);
request.put ("work", nano::to_string_hex (work1));
request.put ("difficulty", nano::to_string_hex (result_difficulty));
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
bool validate (response.json.get<bool> ("valid"));
ASSERT_TRUE (validate);
}
uint64_t difficulty4 (0xfff0000000000000);
request.put ("work", nano::to_string_hex (work1));
request.put ("difficulty", nano::to_string_hex (difficulty4));
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
bool validate (response.json.get<bool> ("valid"));
ASSERT_EQ (result_difficulty >= difficulty4, validate);
}
uint64_t work3 (*node1.work_generate_blocking (hash, difficulty4));
request.put ("work", nano::to_string_hex (work3));
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
bool validate (response.json.get<bool> ("valid"));
ASSERT_TRUE (validate);
}
}
TEST (rpc, successors)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::keypair key;
auto genesis (system.nodes[0]->latest (nano::test_genesis_key.pub));
ASSERT_FALSE (genesis.is_zero ());
auto block (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, 1));
ASSERT_NE (nullptr, block);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "successors");
request.put ("block", genesis.to_string ());
request.put ("count", std::to_string (std::numeric_limits<uint64_t>::max ()));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
std::vector<nano::block_hash> blocks;
for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i)
{
blocks.push_back (nano::block_hash (i->second.get<std::string> ("")));
}
ASSERT_EQ (2, blocks.size ());
ASSERT_EQ (genesis, blocks[0]);
ASSERT_EQ (block->hash (), blocks[1]);
// RPC chain "reverse" option
request.put ("action", "chain");
request.put ("reverse", "true");
test_response response2 (request, rpc.config.port, system.io_ctx);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_EQ (response.json, response2.json);
}
TEST (rpc, bootstrap_any)
{
nano::system system0 (24000, 1);
nano::system system1 (24001, 1);
auto latest (system1.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block send (latest, nano::genesis_account, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system1.nodes[0]->work_generate_blocking (latest));
{
auto transaction (system1.nodes[0]->store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, system1.nodes[0]->ledger.process (transaction, send).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
auto & node = system0.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "bootstrap_any");
test_response response (request, rpc.config.port, system0.io_ctx);
while (response.status == 0)
{
system0.poll ();
}
std::string success (response.json.get<std::string> ("success"));
ASSERT_TRUE (success.empty ());
}
TEST (rpc, republish)
{
nano::system system (24000, 2);
nano::keypair key;
nano::genesis genesis;
auto & node1 (*system.nodes[0]);
auto latest (node1.latest (nano::test_genesis_key.pub));
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
node1.process (send);
nano::open_block open (send.hash (), key.pub, key.pub, key.prv, key.pub, *node1.work_generate_blocking (key.pub));
ASSERT_EQ (nano::process_result::progress, node1.process (open).code);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "republish");
request.put ("hash", send.hash ().to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
system.deadline_set (10s);
while (system.nodes[1]->balance (nano::test_genesis_key.pub) == nano::genesis_amount)
{
ASSERT_NO_ERROR (system.poll ());
}
auto & blocks_node (response.json.get_child ("blocks"));
std::vector<nano::block_hash> blocks;
for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i)
{
blocks.push_back (nano::block_hash (i->second.get<std::string> ("")));
}
ASSERT_EQ (1, blocks.size ());
ASSERT_EQ (send.hash (), blocks[0]);
request.put ("hash", genesis.hash ().to_string ());
request.put ("count", 1);
test_response response1 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
blocks_node = response1.json.get_child ("blocks");
blocks.clear ();
for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i)
{
blocks.push_back (nano::block_hash (i->second.get<std::string> ("")));
}
ASSERT_EQ (1, blocks.size ());
ASSERT_EQ (genesis.hash (), blocks[0]);
request.put ("hash", open.hash ().to_string ());
request.put ("sources", 2);
test_response response2 (request, rpc.config.port, system.io_ctx);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
blocks_node = response2.json.get_child ("blocks");
blocks.clear ();
for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i)
{
blocks.push_back (nano::block_hash (i->second.get<std::string> ("")));
}
ASSERT_EQ (3, blocks.size ());
ASSERT_EQ (genesis.hash (), blocks[0]);
ASSERT_EQ (send.hash (), blocks[1]);
ASSERT_EQ (open.hash (), blocks[2]);
}
TEST (rpc, deterministic_key)
{
nano::system system0 (24000, 1);
nano::raw_key seed;
{
auto transaction (system0.nodes[0]->wallets.tx_begin_read ());
system0.wallet (0)->store.seed (seed, transaction);
}
nano::account account0 (system0.wallet (0)->deterministic_insert ());
nano::account account1 (system0.wallet (0)->deterministic_insert ());
nano::account account2 (system0.wallet (0)->deterministic_insert ());
auto & node = system0.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "deterministic_key");
request.put ("seed", seed.data.to_string ());
request.put ("index", "0");
test_response response0 (request, rpc.config.port, system0.io_ctx);
while (response0.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response0.status);
std::string validate_text (response0.json.get<std::string> ("account"));
ASSERT_EQ (account0.to_account (), validate_text);
request.put ("index", "2");
test_response response1 (request, rpc.config.port, system0.io_ctx);
while (response1.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response1.status);
validate_text = response1.json.get<std::string> ("account");
ASSERT_NE (account1.to_account (), validate_text);
ASSERT_EQ (account2.to_account (), validate_text);
}
TEST (rpc, accounts_balances)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "accounts_balances");
boost::property_tree::ptree entry;
boost::property_tree::ptree peers_l;
entry.put ("", nano::test_genesis_key.pub.to_account ());
peers_l.push_back (std::make_pair ("", entry));
request.add_child ("accounts", peers_l);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
for (auto & balances : response.json.get_child ("balances"))
{
std::string account_text (balances.first);
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), account_text);
std::string balance_text (balances.second.get<std::string> ("balance"));
ASSERT_EQ ("340282366920938463463374607431768211455", balance_text);
std::string pending_text (balances.second.get<std::string> ("pending"));
ASSERT_EQ ("0", pending_text);
}
}
TEST (rpc, accounts_frontiers)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "accounts_frontiers");
boost::property_tree::ptree entry;
boost::property_tree::ptree peers_l;
entry.put ("", nano::test_genesis_key.pub.to_account ());
peers_l.push_back (std::make_pair ("", entry));
request.add_child ("accounts", peers_l);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
for (auto & frontiers : response.json.get_child ("frontiers"))
{
std::string account_text (frontiers.first);
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), account_text);
std::string frontier_text (frontiers.second.get<std::string> (""));
ASSERT_EQ (system.nodes[0]->latest (nano::genesis_account), frontier_text);
}
}
TEST (rpc, accounts_pending)
{
nano::system system (24000, 1);
nano::keypair key1;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto block1 (system.wallet (0)->send_action (nano::test_genesis_key.pub, key1.pub, 100));
scoped_io_thread_name_change scoped_thread_name_io;
system.deadline_set (5s);
while (system.nodes[0]->active.active (*block1))
{
ASSERT_NO_ERROR (system.poll ());
}
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "accounts_pending");
boost::property_tree::ptree entry;
boost::property_tree::ptree peers_l;
entry.put ("", key1.pub.to_account ());
peers_l.push_back (std::make_pair ("", entry));
request.add_child ("accounts", peers_l);
request.put ("count", "100");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
for (auto & blocks : response.json.get_child ("blocks"))
{
std::string account_text (blocks.first);
ASSERT_EQ (key1.pub.to_account (), account_text);
nano::block_hash hash1 (blocks.second.begin ()->second.get<std::string> (""));
ASSERT_EQ (block1->hash (), hash1);
}
}
request.put ("sorting", "true"); // Sorting test
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
for (auto & blocks : response.json.get_child ("blocks"))
{
std::string account_text (blocks.first);
ASSERT_EQ (key1.pub.to_account (), account_text);
nano::block_hash hash1 (blocks.second.begin ()->first);
ASSERT_EQ (block1->hash (), hash1);
std::string amount (blocks.second.begin ()->second.get<std::string> (""));
ASSERT_EQ ("100", amount);
}
}
request.put ("threshold", "100"); // Threshold test
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::unordered_map<nano::block_hash, nano::uint128_union> blocks;
for (auto & pending : response.json.get_child ("blocks"))
{
std::string account_text (pending.first);
ASSERT_EQ (key1.pub.to_account (), account_text);
for (auto i (pending.second.begin ()), j (pending.second.end ()); i != j; ++i)
{
nano::block_hash hash;
hash.decode_hex (i->first);
nano::uint128_union amount;
amount.decode_dec (i->second.get<std::string> (""));
blocks[hash] = amount;
boost::optional<std::string> source (i->second.get_optional<std::string> ("source"));
ASSERT_FALSE (source.is_initialized ());
}
}
ASSERT_EQ (blocks[block1->hash ()], 100);
}
request.put ("source", "true");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::unordered_map<nano::block_hash, nano::uint128_union> amounts;
std::unordered_map<nano::block_hash, nano::account> sources;
for (auto & pending : response.json.get_child ("blocks"))
{
std::string account_text (pending.first);
ASSERT_EQ (key1.pub.to_account (), account_text);
for (auto i (pending.second.begin ()), j (pending.second.end ()); i != j; ++i)
{
nano::block_hash hash;
hash.decode_hex (i->first);
amounts[hash].decode_dec (i->second.get<std::string> ("amount"));
sources[hash].decode_account (i->second.get<std::string> ("source"));
}
}
ASSERT_EQ (amounts[block1->hash ()], 100);
ASSERT_EQ (sources[block1->hash ()], nano::test_genesis_key.pub);
}
request.put ("include_only_confirmed", "true");
check_block_response_count (system, rpc, request, 1);
scoped_thread_name_io.reset ();
reset_confirmation_height (system.nodes.front ()->store, block1->account ());
scoped_thread_name_io.renew ();
check_block_response_count (system, rpc, request, 0);
}
TEST (rpc, blocks)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "blocks");
boost::property_tree::ptree entry;
boost::property_tree::ptree peers_l;
entry.put ("", system.nodes[0]->latest (nano::genesis_account).to_string ());
peers_l.push_back (std::make_pair ("", entry));
request.add_child ("hashes", peers_l);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
for (auto & blocks : response.json.get_child ("blocks"))
{
std::string hash_text (blocks.first);
ASSERT_EQ (system.nodes[0]->latest (nano::genesis_account).to_string (), hash_text);
std::string blocks_text (blocks.second.get<std::string> (""));
ASSERT_FALSE (blocks_text.empty ());
}
}
TEST (rpc, wallet_info)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::keypair key;
system.wallet (0)->insert_adhoc (key.prv);
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, 1));
nano::account account (system.wallet (0)->deterministic_insert ());
{
auto transaction (system.nodes[0]->wallets.tx_begin_write ());
system.wallet (0)->store.erase (transaction, account);
}
account = system.wallet (0)->deterministic_insert ();
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_info");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string balance_text (response.json.get<std::string> ("balance"));
ASSERT_EQ ("340282366920938463463374607431768211454", balance_text);
std::string pending_text (response.json.get<std::string> ("pending"));
ASSERT_EQ ("1", pending_text);
std::string count_text (response.json.get<std::string> ("accounts_count"));
ASSERT_EQ ("3", count_text);
std::string adhoc_count (response.json.get<std::string> ("adhoc_count"));
ASSERT_EQ ("2", adhoc_count);
std::string deterministic_count (response.json.get<std::string> ("deterministic_count"));
ASSERT_EQ ("1", deterministic_count);
std::string index_text (response.json.get<std::string> ("deterministic_index"));
ASSERT_EQ ("2", index_text);
}
TEST (rpc, wallet_balances)
{
nano::system system0 (24000, 1);
system0.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto & node = system0.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_balances");
request.put ("wallet", system0.nodes[0]->wallets.items.begin ()->first.to_string ());
test_response response (request, rpc.config.port, system0.io_ctx);
while (response.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response.status);
for (auto & balances : response.json.get_child ("balances"))
{
std::string account_text (balances.first);
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), account_text);
std::string balance_text (balances.second.get<std::string> ("balance"));
ASSERT_EQ ("340282366920938463463374607431768211455", balance_text);
std::string pending_text (balances.second.get<std::string> ("pending"));
ASSERT_EQ ("0", pending_text);
}
nano::keypair key;
scoped_thread_name_io.reset ();
system0.wallet (0)->insert_adhoc (key.prv);
auto send (system0.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, 1));
scoped_thread_name_io.renew ();
request.put ("threshold", "2");
test_response response1 (request, rpc.config.port, system0.io_ctx);
while (response1.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response1.status);
for (auto & balances : response1.json.get_child ("balances"))
{
std::string account_text (balances.first);
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), account_text);
std::string balance_text (balances.second.get<std::string> ("balance"));
ASSERT_EQ ("340282366920938463463374607431768211454", balance_text);
std::string pending_text (balances.second.get<std::string> ("pending"));
ASSERT_EQ ("0", pending_text);
}
}
TEST (rpc, pending_exists)
{
nano::system system (24000, 1);
nano::keypair key1;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto hash0 (system.nodes[0]->latest (nano::genesis_account));
auto block1 (system.wallet (0)->send_action (nano::test_genesis_key.pub, key1.pub, 100));
scoped_io_thread_name_change scoped_thread_name_io;
system.deadline_set (5s);
while (system.nodes[0]->active.active (*block1))
{
ASSERT_NO_ERROR (system.poll ());
}
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
auto pending_exists = [&system, &request, &rpc](const char * exists_a) {
test_response response0 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response0.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response0.status);
std::string exists_text (response0.json.get<std::string> ("exists"));
ASSERT_EQ (exists_a, exists_text);
};
request.put ("action", "pending_exists");
request.put ("hash", hash0.to_string ());
pending_exists ("0");
request.put ("hash", block1->hash ().to_string ());
pending_exists ("1");
request.put ("include_only_confirmed", "true");
pending_exists ("1");
scoped_thread_name_io.reset ();
reset_confirmation_height (system.nodes.front ()->store, block1->account ());
scoped_thread_name_io.renew ();
pending_exists ("0");
}
TEST (rpc, wallet_pending)
{
nano::system system0 (24000, 1);
nano::keypair key1;
system0.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system0.wallet (0)->insert_adhoc (key1.prv);
auto block1 (system0.wallet (0)->send_action (nano::test_genesis_key.pub, key1.pub, 100));
auto iterations (0);
scoped_io_thread_name_change scoped_thread_name_io;
while (system0.nodes[0]->active.active (*block1))
{
system0.poll ();
++iterations;
ASSERT_LT (iterations, 200);
}
auto & node = system0.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system0.io_ctx, rpc_config);
nano::rpc rpc (system0.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_pending");
request.put ("wallet", system0.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("count", "100");
test_response response (request, rpc.config.port, system0.io_ctx);
while (response.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (1, response.json.get_child ("blocks").size ());
for (auto & pending : response.json.get_child ("blocks"))
{
std::string account_text (pending.first);
ASSERT_EQ (key1.pub.to_account (), account_text);
nano::block_hash hash1 (pending.second.begin ()->second.get<std::string> (""));
ASSERT_EQ (block1->hash (), hash1);
}
request.put ("threshold", "100"); // Threshold test
test_response response0 (request, rpc.config.port, system0.io_ctx);
while (response0.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response0.status);
std::unordered_map<nano::block_hash, nano::uint128_union> blocks;
ASSERT_EQ (1, response0.json.get_child ("blocks").size ());
for (auto & pending : response0.json.get_child ("blocks"))
{
std::string account_text (pending.first);
ASSERT_EQ (key1.pub.to_account (), account_text);
for (auto i (pending.second.begin ()), j (pending.second.end ()); i != j; ++i)
{
nano::block_hash hash;
hash.decode_hex (i->first);
nano::uint128_union amount;
amount.decode_dec (i->second.get<std::string> (""));
blocks[hash] = amount;
boost::optional<std::string> source (i->second.get_optional<std::string> ("source"));
ASSERT_FALSE (source.is_initialized ());
boost::optional<uint8_t> min_version (i->second.get_optional<uint8_t> ("min_version"));
ASSERT_FALSE (min_version.is_initialized ());
}
}
ASSERT_EQ (blocks[block1->hash ()], 100);
request.put ("threshold", "101");
test_response response1 (request, rpc.config.port, system0.io_ctx);
while (response1.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response1.status);
auto & pending1 (response1.json.get_child ("blocks"));
ASSERT_EQ (0, pending1.size ());
request.put ("threshold", "0");
request.put ("source", "true");
request.put ("min_version", "true");
test_response response2 (request, rpc.config.port, system0.io_ctx);
while (response2.status == 0)
{
system0.poll ();
}
ASSERT_EQ (200, response2.status);
std::unordered_map<nano::block_hash, nano::uint128_union> amounts;
std::unordered_map<nano::block_hash, nano::account> sources;
ASSERT_EQ (1, response0.json.get_child ("blocks").size ());
for (auto & pending : response2.json.get_child ("blocks"))
{
std::string account_text (pending.first);
ASSERT_EQ (key1.pub.to_account (), account_text);
for (auto i (pending.second.begin ()), j (pending.second.end ()); i != j; ++i)
{
nano::block_hash hash;
hash.decode_hex (i->first);
amounts[hash].decode_dec (i->second.get<std::string> ("amount"));
sources[hash].decode_account (i->second.get<std::string> ("source"));
ASSERT_EQ (i->second.get<uint8_t> ("min_version"), 0);
}
}
ASSERT_EQ (amounts[block1->hash ()], 100);
ASSERT_EQ (sources[block1->hash ()], nano::test_genesis_key.pub);
request.put ("include_only_confirmed", "true");
check_block_response_count (system0, rpc, request, 1);
scoped_thread_name_io.reset ();
reset_confirmation_height (system0.nodes.front ()->store, block1->account ());
scoped_thread_name_io.renew ();
{
test_response response (request, rpc.config.port, system0.io_ctx);
system0.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system0.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (0, response.json.get_child ("blocks").size ());
}
}
TEST (rpc, receive_minimum)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "receive_minimum");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string amount (response.json.get<std::string> ("amount"));
ASSERT_EQ (system.nodes[0]->config.receive_minimum.to_string_dec (), amount);
}
TEST (rpc, receive_minimum_set)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "receive_minimum_set");
request.put ("amount", "100");
ASSERT_NE (system.nodes[0]->config.receive_minimum.to_string_dec (), "100");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string success (response.json.get<std::string> ("success"));
ASSERT_TRUE (success.empty ());
ASSERT_EQ (system.nodes[0]->config.receive_minimum.to_string_dec (), "100");
}
TEST (rpc, work_get)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->work_cache_blocking (nano::test_genesis_key.pub, system.nodes[0]->latest (nano::test_genesis_key.pub));
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "work_get");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string work_text (response.json.get<std::string> ("work"));
uint64_t work (1);
auto transaction (system.nodes[0]->wallets.tx_begin_read ());
system.nodes[0]->wallets.items.begin ()->second->store.work_get (transaction, nano::genesis_account, work);
ASSERT_EQ (nano::to_string_hex (work), work_text);
}
TEST (rpc, wallet_work_get)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->work_cache_blocking (nano::test_genesis_key.pub, system.nodes[0]->latest (nano::test_genesis_key.pub));
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_work_get");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto transaction (system.nodes[0]->wallets.tx_begin_read ());
for (auto & works : response.json.get_child ("works"))
{
std::string account_text (works.first);
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), account_text);
std::string work_text (works.second.get<std::string> (""));
uint64_t work (1);
system.nodes[0]->wallets.items.begin ()->second->store.work_get (transaction, nano::genesis_account, work);
ASSERT_EQ (nano::to_string_hex (work), work_text);
}
}
TEST (rpc, work_set)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
uint64_t work0 (100);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "work_set");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("account", nano::test_genesis_key.pub.to_account ());
request.put ("work", nano::to_string_hex (work0));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string success (response.json.get<std::string> ("success"));
ASSERT_TRUE (success.empty ());
uint64_t work1 (1);
auto transaction (system.nodes[0]->wallets.tx_begin_read ());
system.nodes[0]->wallets.items.begin ()->second->store.work_get (transaction, nano::genesis_account, work1);
ASSERT_EQ (work1, work0);
}
TEST (rpc, search_pending_all)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block block (latest, nano::test_genesis_key.pub, nano::genesis_amount - system.nodes[0]->config.receive_minimum.number (), nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (latest));
{
auto transaction (system.nodes[0]->store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, system.nodes[0]->ledger.process (transaction, block).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "search_pending_all");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
system.deadline_set (10s);
while (system.nodes[0]->balance (nano::test_genesis_key.pub) != nano::genesis_amount)
{
ASSERT_NO_ERROR (system.poll ());
}
}
TEST (rpc, wallet_republish)
{
nano::system system (24000, 1);
nano::genesis genesis;
nano::keypair key;
while (key.pub < nano::test_genesis_key.pub)
{
nano::keypair key1;
key.pub = key1.pub;
key.prv.data = key1.prv.data;
}
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
system.nodes[0]->process (send);
nano::open_block open (send.hash (), key.pub, key.pub, key.prv, key.pub, *node1.work_generate_blocking (key.pub));
ASSERT_EQ (nano::process_result::progress, system.nodes[0]->process (open).code);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_republish");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("count", 1);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks_node (response.json.get_child ("blocks"));
std::vector<nano::block_hash> blocks;
for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i)
{
blocks.push_back (nano::block_hash (i->second.get<std::string> ("")));
}
ASSERT_EQ (2, blocks.size ());
ASSERT_EQ (send.hash (), blocks[0]);
ASSERT_EQ (open.hash (), blocks[1]);
}
TEST (rpc, delegators)
{
nano::system system (24000, 1);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
system.nodes[0]->process (send);
nano::open_block open (send.hash (), nano::test_genesis_key.pub, key.pub, key.prv, key.pub, *node1.work_generate_blocking (key.pub));
ASSERT_EQ (nano::process_result::progress, system.nodes[0]->process (open).code);
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
scoped_io_thread_name_change scoped_thread_name_io;
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "delegators");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & delegators_node (response.json.get_child ("delegators"));
boost::property_tree::ptree delegators;
for (auto i (delegators_node.begin ()), n (delegators_node.end ()); i != n; ++i)
{
delegators.put ((i->first), (i->second.get<std::string> ("")));
}
ASSERT_EQ (2, delegators.size ());
ASSERT_EQ ("100", delegators.get<std::string> (nano::test_genesis_key.pub.to_account ()));
ASSERT_EQ ("340282366920938463463374607431768211355", delegators.get<std::string> (key.pub.to_account ()));
}
TEST (rpc, delegators_count)
{
nano::system system (24000, 1);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
auto latest (node1.latest (nano::test_genesis_key.pub));
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
node1.process (send);
nano::open_block open (send.hash (), nano::test_genesis_key.pub, key.pub, key.prv, key.pub, *node1.work_generate_blocking (key.pub));
ASSERT_EQ (nano::process_result::progress, system.nodes[0]->process (open).code);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "delegators_count");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string count (response.json.get<std::string> ("count"));
ASSERT_EQ ("2", count);
}
TEST (rpc, account_info)
{
nano::system system (24000, 1);
nano::keypair key;
nano::genesis genesis;
auto & node1 (*system.nodes[0]);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_info");
request.put ("account", nano::account ().to_account ());
// Test for a non existing account
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto error (response.json.get_optional<std::string> ("error"));
ASSERT_TRUE (error.is_initialized ());
ASSERT_EQ (error.get (), std::error_code (nano::error_common::account_not_found).message ());
}
scoped_thread_name_io.reset ();
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
system.nodes[0]->process (send);
auto time (nano::seconds_since_epoch ());
{
auto transaction = node1.store.tx_begin_write ();
node1.store.confirmation_height_put (transaction, nano::test_genesis_key.pub, 1);
}
scoped_thread_name_io.renew ();
request.put ("account", nano::test_genesis_key.pub.to_account ());
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string frontier (response.json.get<std::string> ("frontier"));
ASSERT_EQ (send.hash ().to_string (), frontier);
std::string open_block (response.json.get<std::string> ("open_block"));
ASSERT_EQ (genesis.hash ().to_string (), open_block);
std::string representative_block (response.json.get<std::string> ("representative_block"));
ASSERT_EQ (genesis.hash ().to_string (), representative_block);
std::string balance (response.json.get<std::string> ("balance"));
ASSERT_EQ ("100", balance);
std::string modified_timestamp (response.json.get<std::string> ("modified_timestamp"));
ASSERT_LT (std::abs ((long)time - stol (modified_timestamp)), 5);
std::string block_count (response.json.get<std::string> ("block_count"));
ASSERT_EQ ("2", block_count);
std::string confirmation_height (response.json.get<std::string> ("confirmation_height"));
ASSERT_EQ ("1", confirmation_height);
ASSERT_EQ (0, response.json.get<uint8_t> ("account_version"));
boost::optional<std::string> weight (response.json.get_optional<std::string> ("weight"));
ASSERT_FALSE (weight.is_initialized ());
boost::optional<std::string> pending (response.json.get_optional<std::string> ("pending"));
ASSERT_FALSE (pending.is_initialized ());
boost::optional<std::string> representative (response.json.get_optional<std::string> ("representative"));
ASSERT_FALSE (representative.is_initialized ());
}
// Test for optional values
request.put ("weight", "true");
request.put ("pending", "1");
request.put ("representative", "1");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
std::string weight2 (response.json.get<std::string> ("weight"));
ASSERT_EQ ("100", weight2);
std::string pending2 (response.json.get<std::string> ("pending"));
ASSERT_EQ ("0", pending2);
std::string representative2 (response.json.get<std::string> ("representative"));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), representative2);
}
}
/** Make sure we can use json block literals instead of string as input */
TEST (rpc, json_block_input)
{
nano::system system (24000, 1);
nano::keypair key;
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
nano::state_block send (nano::genesis_account, node1.latest (nano::test_genesis_key.pub), nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "sign");
request.put ("json_block", "true");
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("account", key.pub.to_account ());
boost::property_tree::ptree json;
send.serialize_json (json);
request.add_child ("block", json);
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
system.poll ();
}
ASSERT_EQ (200, response.status);
bool json_error{ false };
nano::state_block block (json_error, response.json.get_child ("block"));
ASSERT_FALSE (json_error);
ASSERT_FALSE (nano::validate_message (key.pub, send.hash (), block.block_signature ()));
ASSERT_NE (block.block_signature (), send.block_signature ());
ASSERT_EQ (block.hash (), send.hash ());
}
/** Make sure we can receive json block literals instead of string as output */
TEST (rpc, json_block_output)
{
nano::system system (24000, 1);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
system.nodes[0]->process (send);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block_info");
request.put ("json_block", "true");
request.put ("hash", send.hash ().to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
// Make sure contents contains a valid JSON subtree instread of stringified json
bool json_error{ false };
nano::send_block send_from_json (json_error, response.json.get_child ("contents"));
ASSERT_FALSE (json_error);
}
TEST (rpc, blocks_info)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
auto check_blocks = [&system](test_response & response) {
for (auto & blocks : response.json.get_child ("blocks"))
{
std::string hash_text (blocks.first);
ASSERT_EQ (system.nodes[0]->latest (nano::genesis_account).to_string (), hash_text);
std::string account_text (blocks.second.get<std::string> ("block_account"));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), account_text);
std::string amount_text (blocks.second.get<std::string> ("amount"));
ASSERT_EQ (nano::genesis_amount.convert_to<std::string> (), amount_text);
std::string blocks_text (blocks.second.get<std::string> ("contents"));
ASSERT_FALSE (blocks_text.empty ());
boost::optional<std::string> pending (blocks.second.get_optional<std::string> ("pending"));
ASSERT_FALSE (pending.is_initialized ());
boost::optional<std::string> source (blocks.second.get_optional<std::string> ("source_account"));
ASSERT_FALSE (source.is_initialized ());
std::string balance_text (blocks.second.get<std::string> ("balance"));
ASSERT_EQ (nano::genesis_amount.convert_to<std::string> (), balance_text);
ASSERT_TRUE (blocks.second.get<bool> ("confirmed")); // Genesis block is confirmed by default
}
};
boost::property_tree::ptree request;
request.put ("action", "blocks_info");
boost::property_tree::ptree entry;
boost::property_tree::ptree hashes;
entry.put ("", system.nodes[0]->latest (nano::genesis_account).to_string ());
hashes.push_back (std::make_pair ("", entry));
request.add_child ("hashes", hashes);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
check_blocks (response);
}
std::string random_hash = nano::block_hash ().to_string ();
entry.put ("", random_hash);
hashes.push_back (std::make_pair ("", entry));
request.erase ("hashes");
request.add_child ("hashes", hashes);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response.json.get<std::string> ("error"));
}
request.put ("include_not_found", "true");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
check_blocks (response);
auto & blocks_not_found (response.json.get_child ("blocks_not_found"));
ASSERT_EQ (1, blocks_not_found.size ());
ASSERT_EQ (random_hash, blocks_not_found.begin ()->second.get<std::string> (""));
}
request.put ("source", "true");
request.put ("pending", "1");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
for (auto & blocks : response.json.get_child ("blocks"))
{
std::string source (blocks.second.get<std::string> ("source_account"));
ASSERT_EQ ("0", source);
std::string pending (blocks.second.get<std::string> ("pending"));
ASSERT_EQ ("0", pending);
}
}
}
TEST (rpc, blocks_info_subtype)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub, nano::Gxrb_ratio));
ASSERT_NE (nullptr, send);
auto receive (system.wallet (0)->receive_action (*send, key.pub, nano::Gxrb_ratio));
ASSERT_NE (nullptr, receive);
auto change (system.wallet (0)->change_action (nano::test_genesis_key.pub, key.pub));
ASSERT_NE (nullptr, change);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "blocks_info");
boost::property_tree::ptree hashes;
boost::property_tree::ptree entry;
entry.put ("", send->hash ().to_string ());
hashes.push_back (std::make_pair ("", entry));
entry.put ("", receive->hash ().to_string ());
hashes.push_back (std::make_pair ("", entry));
entry.put ("", change->hash ().to_string ());
hashes.push_back (std::make_pair ("", entry));
request.add_child ("hashes", hashes);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto & blocks (response.json.get_child ("blocks"));
ASSERT_EQ (3, blocks.size ());
auto send_subtype (blocks.get_child (send->hash ().to_string ()).get<std::string> ("subtype"));
ASSERT_EQ (send_subtype, "send");
auto receive_subtype (blocks.get_child (receive->hash ().to_string ()).get<std::string> ("subtype"));
ASSERT_EQ (receive_subtype, "receive");
auto change_subtype (blocks.get_child (change->hash ().to_string ()).get<std::string> ("subtype"));
ASSERT_EQ (change_subtype, "change");
}
TEST (rpc, work_peers_all)
{
nano::system system (24000, 1);
auto & node1 (*system.nodes[0]);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "work_peer_add");
request.put ("address", "::1");
request.put ("port", "0");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string success (response.json.get<std::string> ("success", ""));
ASSERT_TRUE (success.empty ());
boost::property_tree::ptree request1;
request1.put ("action", "work_peers");
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
auto & peers_node (response1.json.get_child ("work_peers"));
std::vector<std::string> peers;
for (auto i (peers_node.begin ()), n (peers_node.end ()); i != n; ++i)
{
peers.push_back (i->second.get<std::string> (""));
}
ASSERT_EQ (1, peers.size ());
ASSERT_EQ ("::1:0", peers[0]);
boost::property_tree::ptree request2;
request2.put ("action", "work_peers_clear");
test_response response2 (request2, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
success = response2.json.get<std::string> ("success", "");
ASSERT_TRUE (success.empty ());
test_response response3 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response3.status);
peers_node = response3.json.get_child ("work_peers");
ASSERT_EQ (0, peers_node.size ());
}
TEST (rpc, block_count_type)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, send);
auto receive (system.wallet (0)->receive_action (*send, nano::test_genesis_key.pub, system.nodes[0]->config.receive_minimum.number ()));
ASSERT_NE (nullptr, receive);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block_count_type");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string send_count (response.json.get<std::string> ("send"));
ASSERT_EQ ("0", send_count);
std::string receive_count (response.json.get<std::string> ("receive"));
ASSERT_EQ ("0", receive_count);
std::string open_count (response.json.get<std::string> ("open"));
ASSERT_EQ ("1", open_count);
std::string change_count (response.json.get<std::string> ("change"));
ASSERT_EQ ("0", change_count);
std::string state_count (response.json.get<std::string> ("state"));
ASSERT_EQ ("2", state_count);
}
TEST (rpc, ledger)
{
nano::system system (24000, 1);
nano::keypair key;
nano::genesis genesis;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
auto latest (node1.latest (nano::test_genesis_key.pub));
auto genesis_balance (nano::genesis_amount);
auto send_amount (genesis_balance - 100);
genesis_balance -= send_amount;
nano::send_block send (latest, key.pub, genesis_balance, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
node1.process (send);
nano::open_block open (send.hash (), nano::test_genesis_key.pub, key.pub, key.prv, key.pub, *node1.work_generate_blocking (key.pub));
ASSERT_EQ (nano::process_result::progress, node1.process (open).code);
auto time (nano::seconds_since_epoch ());
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "ledger");
request.put ("sorting", true);
request.put ("count", "1");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
for (auto & account : response.json.get_child ("accounts"))
{
std::string account_text (account.first);
ASSERT_EQ (key.pub.to_account (), account_text);
std::string frontier (account.second.get<std::string> ("frontier"));
ASSERT_EQ (open.hash ().to_string (), frontier);
std::string open_block (account.second.get<std::string> ("open_block"));
ASSERT_EQ (open.hash ().to_string (), open_block);
std::string representative_block (account.second.get<std::string> ("representative_block"));
ASSERT_EQ (open.hash ().to_string (), representative_block);
std::string balance_text (account.second.get<std::string> ("balance"));
ASSERT_EQ (send_amount.convert_to<std::string> (), balance_text);
std::string modified_timestamp (account.second.get<std::string> ("modified_timestamp"));
ASSERT_LT (std::abs ((long)time - stol (modified_timestamp)), 5);
std::string block_count (account.second.get<std::string> ("block_count"));
ASSERT_EQ ("1", block_count);
boost::optional<std::string> weight (account.second.get_optional<std::string> ("weight"));
ASSERT_FALSE (weight.is_initialized ());
boost::optional<std::string> pending (account.second.get_optional<std::string> ("pending"));
ASSERT_FALSE (pending.is_initialized ());
boost::optional<std::string> representative (account.second.get_optional<std::string> ("representative"));
ASSERT_FALSE (representative.is_initialized ());
}
}
// Test for optional values
request.put ("weight", true);
request.put ("pending", true);
request.put ("representative", true);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
for (auto & account : response.json.get_child ("accounts"))
{
boost::optional<std::string> weight (account.second.get_optional<std::string> ("weight"));
ASSERT_TRUE (weight.is_initialized ());
ASSERT_EQ ("0", weight.get ());
boost::optional<std::string> pending (account.second.get_optional<std::string> ("pending"));
ASSERT_TRUE (pending.is_initialized ());
ASSERT_EQ ("0", pending.get ());
boost::optional<std::string> representative (account.second.get_optional<std::string> ("representative"));
ASSERT_TRUE (representative.is_initialized ());
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), representative.get ());
}
}
// Test threshold
request.put ("count", 2);
request.put ("threshold", genesis_balance + 1);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (1, accounts.size ());
auto account (accounts.begin ());
ASSERT_EQ (key.pub.to_account (), account->first);
std::string balance_text (account->second.get<std::string> ("balance"));
ASSERT_EQ (send_amount.convert_to<std::string> (), balance_text);
}
auto send2_amount (50);
genesis_balance -= send2_amount;
nano::send_block send2 (send.hash (), key.pub, genesis_balance, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (send.hash ()));
scoped_thread_name_io.reset ();
node1.process (send2);
scoped_thread_name_io.renew ();
// When asking for pending, pending amount is taken into account for threshold so the account must show up
request.put ("count", 2);
request.put ("threshold", (send_amount + send2_amount).convert_to<std::string> ());
request.put ("pending", true);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (1, accounts.size ());
auto account (accounts.begin ());
ASSERT_EQ (key.pub.to_account (), account->first);
std::string balance_text (account->second.get<std::string> ("balance"));
ASSERT_EQ (send_amount.convert_to<std::string> (), balance_text);
std::string pending_text (account->second.get<std::string> ("pending"));
ASSERT_EQ (std::to_string (send2_amount), pending_text);
}
}
TEST (rpc, accounts_create)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "accounts_create");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("count", "8");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts (response.json.get_child ("accounts"));
for (auto i (accounts.begin ()), n (accounts.end ()); i != n; ++i)
{
std::string account_text (i->second.get<std::string> (""));
nano::account account;
ASSERT_FALSE (account.decode_account (account_text));
ASSERT_TRUE (system.wallet (0)->exists (account));
}
ASSERT_EQ (8, accounts.size ());
}
TEST (rpc, block_create)
{
nano::system system (24000, 1);
nano::keypair key;
nano::genesis genesis;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
auto latest (node1.latest (nano::test_genesis_key.pub));
auto send_work = *node1.work_generate_blocking (latest);
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, send_work);
auto open_work = *node1.work_generate_blocking (key.pub);
nano::open_block open (send.hash (), nano::test_genesis_key.pub, key.pub, key.prv, key.pub, open_work);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block_create");
request.put ("type", "send");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("account", nano::test_genesis_key.pub.to_account ());
request.put ("previous", latest.to_string ());
request.put ("amount", "340282366920938463463374607431768211355");
request.put ("destination", key.pub.to_account ());
request.put ("work", nano::to_string_hex (send_work));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string send_hash (response.json.get<std::string> ("hash"));
ASSERT_EQ (send.hash ().to_string (), send_hash);
auto send_text (response.json.get<std::string> ("block"));
boost::property_tree::ptree block_l;
std::stringstream block_stream (send_text);
boost::property_tree::read_json (block_stream, block_l);
auto send_block (nano::deserialize_block_json (block_l));
ASSERT_EQ (send.hash (), send_block->hash ());
scoped_thread_name_io.reset ();
system.nodes[0]->process (send);
scoped_thread_name_io.renew ();
boost::property_tree::ptree request1;
request1.put ("action", "block_create");
request1.put ("type", "open");
std::string key_text;
key.prv.data.encode_hex (key_text);
request1.put ("key", key_text);
request1.put ("representative", nano::test_genesis_key.pub.to_account ());
request1.put ("source", send.hash ().to_string ());
request1.put ("work", nano::to_string_hex (open_work));
test_response response1 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response1.status);
std::string open_hash (response1.json.get<std::string> ("hash"));
ASSERT_EQ (open.hash ().to_string (), open_hash);
auto open_text (response1.json.get<std::string> ("block"));
std::stringstream block_stream1 (open_text);
boost::property_tree::read_json (block_stream1, block_l);
auto open_block (nano::deserialize_block_json (block_l));
ASSERT_EQ (open.hash (), open_block->hash ());
scoped_thread_name_io.reset ();
ASSERT_EQ (nano::process_result::progress, system.nodes[0]->process (open).code);
scoped_thread_name_io.renew ();
request1.put ("representative", key.pub.to_account ());
test_response response2 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
std::string open2_hash (response2.json.get<std::string> ("hash"));
ASSERT_NE (open.hash ().to_string (), open2_hash); // different blocks with wrong representative
auto change_work = *node1.work_generate_blocking (open.hash ());
nano::change_block change (open.hash (), key.pub, key.prv, key.pub, change_work);
request1.put ("type", "change");
request1.put ("work", nano::to_string_hex (change_work));
test_response response4 (request1, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response4.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response4.status);
std::string change_hash (response4.json.get<std::string> ("hash"));
ASSERT_EQ (change.hash ().to_string (), change_hash);
auto change_text (response4.json.get<std::string> ("block"));
std::stringstream block_stream4 (change_text);
boost::property_tree::read_json (block_stream4, block_l);
auto change_block (nano::deserialize_block_json (block_l));
ASSERT_EQ (change.hash (), change_block->hash ());
scoped_thread_name_io.reset ();
ASSERT_EQ (nano::process_result::progress, node1.process (change).code);
nano::send_block send2 (send.hash (), key.pub, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (send.hash ()));
ASSERT_EQ (nano::process_result::progress, system.nodes[0]->process (send2).code);
scoped_thread_name_io.renew ();
boost::property_tree::ptree request2;
request2.put ("action", "block_create");
request2.put ("type", "receive");
request2.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request2.put ("account", key.pub.to_account ());
request2.put ("source", send2.hash ().to_string ());
request2.put ("previous", change.hash ().to_string ());
request2.put ("work", nano::to_string_hex (*node1.work_generate_blocking (change.hash ())));
test_response response5 (request2, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response5.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response5.status);
std::string receive_hash (response4.json.get<std::string> ("hash"));
auto receive_text (response5.json.get<std::string> ("block"));
std::stringstream block_stream5 (change_text);
boost::property_tree::read_json (block_stream5, block_l);
auto receive_block (nano::deserialize_block_json (block_l));
ASSERT_EQ (receive_hash, receive_block->hash ().to_string ());
system.nodes[0]->process_active (std::move (receive_block));
latest = system.nodes[0]->latest (key.pub);
ASSERT_EQ (receive_hash, latest.to_string ());
}
TEST (rpc, block_create_state)
{
nano::system system (24000, 1);
nano::keypair key;
nano::genesis genesis;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
boost::property_tree::ptree request;
request.put ("action", "block_create");
request.put ("type", "state");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("account", nano::test_genesis_key.pub.to_account ());
request.put ("previous", genesis.hash ().to_string ());
request.put ("representative", nano::test_genesis_key.pub.to_account ());
request.put ("balance", (nano::genesis_amount - nano::Gxrb_ratio).convert_to<std::string> ());
request.put ("link", key.pub.to_account ());
request.put ("work", nano::to_string_hex (*system.nodes[0]->work_generate_blocking (genesis.hash ())));
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string state_hash (response.json.get<std::string> ("hash"));
auto state_text (response.json.get<std::string> ("block"));
std::stringstream block_stream (state_text);
boost::property_tree::ptree block_l;
boost::property_tree::read_json (block_stream, block_l);
auto state_block (nano::deserialize_block_json (block_l));
ASSERT_NE (nullptr, state_block);
ASSERT_EQ (nano::block_type::state, state_block->type ());
ASSERT_EQ (state_hash, state_block->hash ().to_string ());
scoped_thread_name_io.reset ();
auto process_result (system.nodes[0]->process (*state_block));
ASSERT_EQ (nano::process_result::progress, process_result.code);
}
TEST (rpc, block_create_state_open)
{
nano::system system (24000, 1);
nano::keypair key;
nano::genesis genesis;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto send_block (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, nano::Gxrb_ratio));
ASSERT_NE (nullptr, send_block);
boost::property_tree::ptree request;
request.put ("action", "block_create");
request.put ("type", "state");
request.put ("key", key.prv.data.to_string ());
request.put ("account", key.pub.to_account ());
request.put ("previous", 0);
request.put ("representative", nano::test_genesis_key.pub.to_account ());
request.put ("balance", nano::Gxrb_ratio.convert_to<std::string> ());
request.put ("link", send_block->hash ().to_string ());
request.put ("work", nano::to_string_hex (*system.nodes[0]->work_generate_blocking (key.pub)));
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string state_hash (response.json.get<std::string> ("hash"));
auto state_text (response.json.get<std::string> ("block"));
std::stringstream block_stream (state_text);
boost::property_tree::ptree block_l;
boost::property_tree::read_json (block_stream, block_l);
auto state_block (nano::deserialize_block_json (block_l));
ASSERT_NE (nullptr, state_block);
ASSERT_EQ (nano::block_type::state, state_block->type ());
ASSERT_EQ (state_hash, state_block->hash ().to_string ());
ASSERT_TRUE (system.nodes[0]->latest (key.pub).is_zero ());
scoped_thread_name_io.reset ();
auto process_result (system.nodes[0]->process (*state_block));
ASSERT_EQ (nano::process_result::progress, process_result.code);
ASSERT_FALSE (system.nodes[0]->latest (key.pub).is_zero ());
}
// Missing "work" parameter should cause work to be generated for us.
TEST (rpc, block_create_state_request_work)
{
nano::genesis genesis;
// Test work generation for state blocks both with and without previous (in the latter
// case, the account will be used for work generation)
std::vector<std::string> previous_test_input{ genesis.hash ().to_string (), std::string ("0") };
for (auto previous : previous_test_input)
{
nano::system system (24000, 1);
nano::keypair key;
nano::genesis genesis;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
scoped_io_thread_name_change scoped_thread_name_io;
boost::property_tree::ptree request;
request.put ("action", "block_create");
request.put ("type", "state");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("account", nano::test_genesis_key.pub.to_account ());
request.put ("representative", nano::test_genesis_key.pub.to_account ());
request.put ("balance", (nano::genesis_amount - nano::Gxrb_ratio).convert_to<std::string> ());
request.put ("link", key.pub.to_account ());
request.put ("previous", previous);
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
boost::property_tree::ptree block_l;
std::stringstream block_stream (response.json.get<std::string> ("block"));
boost::property_tree::read_json (block_stream, block_l);
auto block (nano::deserialize_block_json (block_l));
ASSERT_NE (nullptr, block);
ASSERT_FALSE (nano::work_validate (*block));
}
}
TEST (rpc, block_hash)
{
nano::system system (24000, 1);
nano::keypair key;
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
auto & node1 (*system.nodes[0]);
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block_hash");
std::string json;
send.serialize_json (json);
request.put ("block", json);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string send_hash (response.json.get<std::string> ("hash"));
ASSERT_EQ (send.hash ().to_string (), send_hash);
}
TEST (rpc, wallet_lock)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
scoped_io_thread_name_change scoped_thread_name_io;
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
{
auto transaction (system.wallet (0)->wallets.tx_begin_read ());
ASSERT_TRUE (system.wallet (0)->store.valid_password (transaction));
}
request.put ("wallet", wallet);
request.put ("action", "wallet_lock");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text1 (response.json.get<std::string> ("locked"));
ASSERT_EQ (account_text1, "1");
auto transaction (system.wallet (0)->wallets.tx_begin_read ());
ASSERT_FALSE (system.wallet (0)->store.valid_password (transaction));
}
TEST (rpc, wallet_locked)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "wallet_locked");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string account_text1 (response.json.get<std::string> ("locked"));
ASSERT_EQ (account_text1, "0");
}
TEST (rpc, wallet_create_fail)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
// lmdb_max_dbs should be removed once the wallet store is refactored to support more wallets.
for (int i = 0; i < 127; i++)
{
node->wallets.create (nano::random_wallet_id ());
}
rpc.start ();
scoped_io_thread_name_change scoped_thread_name_io;
boost::property_tree::ptree request;
request.put ("action", "wallet_create");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (std::error_code (nano::error_common::wallet_lmdb_max_dbs).message (), response.json.get<std::string> ("error"));
}
TEST (rpc, wallet_ledger)
{
nano::system system (24000, 1);
nano::keypair key;
nano::genesis genesis;
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
auto latest (system.nodes[0]->latest (nano::test_genesis_key.pub));
nano::send_block send (latest, key.pub, 100, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *node1.work_generate_blocking (latest));
system.nodes[0]->process (send);
nano::open_block open (send.hash (), nano::test_genesis_key.pub, key.pub, key.prv, key.pub, *node1.work_generate_blocking (key.pub));
ASSERT_EQ (nano::process_result::progress, node1.process (open).code);
auto time (nano::seconds_since_epoch ());
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_ledger");
request.put ("wallet", system.nodes[0]->wallets.items.begin ()->first.to_string ());
request.put ("sorting", "1");
request.put ("count", "1");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
for (auto & accounts : response.json.get_child ("accounts"))
{
std::string account_text (accounts.first);
ASSERT_EQ (key.pub.to_account (), account_text);
std::string frontier (accounts.second.get<std::string> ("frontier"));
ASSERT_EQ (open.hash ().to_string (), frontier);
std::string open_block (accounts.second.get<std::string> ("open_block"));
ASSERT_EQ (open.hash ().to_string (), open_block);
std::string representative_block (accounts.second.get<std::string> ("representative_block"));
ASSERT_EQ (open.hash ().to_string (), representative_block);
std::string balance_text (accounts.second.get<std::string> ("balance"));
ASSERT_EQ ("340282366920938463463374607431768211355", balance_text);
std::string modified_timestamp (accounts.second.get<std::string> ("modified_timestamp"));
ASSERT_LT (std::abs ((long)time - stol (modified_timestamp)), 5);
std::string block_count (accounts.second.get<std::string> ("block_count"));
ASSERT_EQ ("1", block_count);
boost::optional<std::string> weight (accounts.second.get_optional<std::string> ("weight"));
ASSERT_FALSE (weight.is_initialized ());
boost::optional<std::string> pending (accounts.second.get_optional<std::string> ("pending"));
ASSERT_FALSE (pending.is_initialized ());
boost::optional<std::string> representative (accounts.second.get_optional<std::string> ("representative"));
ASSERT_FALSE (representative.is_initialized ());
}
// Test for optional values
request.put ("weight", "true");
request.put ("pending", "1");
request.put ("representative", "false");
test_response response2 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
for (auto & accounts : response2.json.get_child ("accounts"))
{
boost::optional<std::string> weight (accounts.second.get_optional<std::string> ("weight"));
ASSERT_TRUE (weight.is_initialized ());
ASSERT_EQ ("0", weight.get ());
boost::optional<std::string> pending (accounts.second.get_optional<std::string> ("pending"));
ASSERT_TRUE (pending.is_initialized ());
ASSERT_EQ ("0", pending.get ());
boost::optional<std::string> representative (accounts.second.get_optional<std::string> ("representative"));
ASSERT_FALSE (representative.is_initialized ());
}
}
TEST (rpc, wallet_add_watch)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("action", "wallet_add_watch");
boost::property_tree::ptree entry;
boost::property_tree::ptree peers_l;
entry.put ("", nano::test_genesis_key.pub.to_account ());
peers_l.push_back (std::make_pair ("", entry));
request.add_child ("accounts", peers_l);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string success (response.json.get<std::string> ("success"));
ASSERT_TRUE (success.empty ());
ASSERT_TRUE (system.wallet (0)->exists (nano::test_genesis_key.pub));
// Make sure using special wallet key as pubkey fails
nano::public_key bad_key (1);
entry.put ("", bad_key.to_account ());
peers_l.push_back (std::make_pair ("", entry));
request.erase ("accounts");
request.add_child ("accounts", peers_l);
test_response response_error (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response_error.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response_error.status);
std::error_code ec (nano::error_common::bad_public_key);
ASSERT_EQ (response_error.json.get<std::string> ("error"), ec.message ());
}
TEST (rpc, online_reps)
{
nano::system system (24000, 2);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
ASSERT_TRUE (system.nodes[1]->online_reps.online_stake () == system.nodes[1]->config.online_weight_minimum.number ());
auto send_block (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, nano::Gxrb_ratio));
ASSERT_NE (nullptr, send_block);
scoped_io_thread_name_change scoped_thread_name_io;
system.deadline_set (10s);
while (system.nodes[1]->online_reps.list ().empty ())
{
ASSERT_NO_ERROR (system.poll ());
}
enable_ipc_transport_tcp (system.nodes[1]->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*system.nodes[1], node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "representatives_online");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto representatives (response.json.get_child ("representatives"));
auto item (representatives.begin ());
ASSERT_NE (representatives.end (), item);
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), item->second.get<std::string> (""));
boost::optional<std::string> weight (item->second.get_optional<std::string> ("weight"));
ASSERT_FALSE (weight.is_initialized ());
system.deadline_set (5s);
while (system.nodes[1]->block (send_block->hash ()) == nullptr)
{
ASSERT_NO_ERROR (system.poll ());
}
//Test weight option
request.put ("weight", "true");
test_response response2 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto representatives2 (response2.json.get_child ("representatives"));
auto item2 (representatives2.begin ());
ASSERT_NE (representatives2.end (), item2);
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), item2->first);
auto weight2 (item2->second.get<std::string> ("weight"));
ASSERT_EQ (system.nodes[1]->weight (nano::test_genesis_key.pub).convert_to<std::string> (), weight2);
//Test accounts filter
scoped_thread_name_io.reset ();
auto new_rep (system.wallet (1)->deterministic_insert ());
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, new_rep, system.nodes[0]->config.receive_minimum.number ()));
scoped_thread_name_io.renew ();
ASSERT_NE (nullptr, send);
system.deadline_set (5s);
while (system.nodes[1]->block (send->hash ()) == nullptr)
{
ASSERT_NO_ERROR (system.poll ());
}
scoped_thread_name_io.reset ();
auto receive (system.wallet (1)->receive_action (*send, new_rep, system.nodes[0]->config.receive_minimum.number ()));
scoped_thread_name_io.renew ();
ASSERT_NE (nullptr, receive);
system.deadline_set (5s);
while (system.nodes[1]->block (receive->hash ()) == nullptr)
{
ASSERT_NO_ERROR (system.poll ());
}
scoped_thread_name_io.reset ();
auto change (system.wallet (0)->change_action (nano::test_genesis_key.pub, new_rep));
scoped_thread_name_io.renew ();
ASSERT_NE (nullptr, change);
system.deadline_set (5s);
while (system.nodes[1]->block (change->hash ()) == nullptr)
{
ASSERT_NO_ERROR (system.poll ());
}
system.deadline_set (5s);
while (system.nodes[1]->online_reps.list ().size () != 2)
{
ASSERT_NO_ERROR (system.poll ());
}
boost::property_tree::ptree child_rep;
child_rep.put ("", new_rep.to_account ());
boost::property_tree::ptree filtered_accounts;
filtered_accounts.push_back (std::make_pair ("", child_rep));
request.add_child ("accounts", filtered_accounts);
test_response response3 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response3.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto representatives3 (response3.json.get_child ("representatives"));
auto item3 (representatives3.begin ());
ASSERT_NE (representatives3.end (), item3);
ASSERT_EQ (new_rep.to_account (), item3->first);
ASSERT_EQ (representatives3.size (), 1);
system.nodes[1]->stop ();
}
// If this test fails, try increasing the num_blocks size.
TEST (rpc, confirmation_height_currently_processing)
{
// The chains should be longer than the batch_write_size to test the amount of blocks confirmed is correct.
nano::system system;
nano::node_config node_config (24000, system.logging);
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto node = system.add_node (node_config);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
// Do enough blocks to reliably call RPC before the confirmation height has finished
auto previous_genesis_chain_hash = node->latest (nano::test_genesis_key.pub);
{
constexpr auto num_blocks = 1000;
auto transaction = node->store.tx_begin_write ();
for (auto i = num_blocks; i > 0; --i)
{
nano::send_block send (previous_genesis_chain_hash, nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio + i + 1, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (previous_genesis_chain_hash));
ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, send).code);
previous_genesis_chain_hash = send.hash ();
}
nano::keypair key1;
nano::send_block send (previous_genesis_chain_hash, key1.pub, nano::genesis_amount - nano::Gxrb_ratio - 1, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (previous_genesis_chain_hash));
ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, send).code);
previous_genesis_chain_hash = send.hash ();
}
scoped_io_thread_name_change scoped_thread_name_io;
std::shared_ptr<nano::block> frontier;
{
auto transaction = node->store.tx_begin_read ();
frontier = node->store.block_get (transaction, previous_genesis_chain_hash);
}
// Begin process for confirming the block (and setting confirmation height)
node->block_confirm (frontier);
boost::property_tree::ptree request;
request.put ("action", "confirmation_height_currently_processing");
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
system.deadline_set (10s);
while (!node->pending_confirmation_height.is_processing_block (previous_genesis_chain_hash))
{
ASSERT_NO_ERROR (system.poll ());
}
// Make the request
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (10s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto hash (response.json.get<std::string> ("hash"));
ASSERT_EQ (frontier->hash ().to_string (), hash);
}
// Wait until confirmation has been set
system.deadline_set (10s);
while (true)
{
auto transaction = node->store.tx_begin_read ();
if (node->ledger.block_confirmed (transaction, frontier->hash ()))
{
break;
}
ASSERT_NO_ERROR (system.poll ());
}
// Make the same request, it should now return an error
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (10s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_rpc::confirmation_height_not_processing);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
}
}
TEST (rpc, confirmation_history)
{
nano::system system (24000, 1);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
ASSERT_TRUE (system.nodes[0]->active.list_confirmed ().empty ());
auto block (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, nano::Gxrb_ratio));
scoped_io_thread_name_change scoped_thread_name_io;
system.deadline_set (10s);
while (system.nodes[0]->active.list_confirmed ().empty ())
{
ASSERT_NO_ERROR (system.poll ());
}
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "confirmation_history");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto representatives (response.json.get_child ("confirmations"));
auto item (representatives.begin ());
ASSERT_NE (representatives.end (), item);
auto hash (item->second.get<std::string> ("hash"));
auto tally (item->second.get<std::string> ("tally"));
ASSERT_EQ (1, item->second.count ("duration"));
ASSERT_EQ (1, item->second.count ("time"));
ASSERT_EQ (1, item->second.count ("request_count"));
ASSERT_EQ (block->hash ().to_string (), hash);
nano::amount tally_num;
tally_num.decode_dec (tally);
assert (tally_num == nano::genesis_amount || tally_num == (nano::genesis_amount - nano::Gxrb_ratio));
system.stop ();
}
TEST (rpc, confirmation_history_hash)
{
nano::system system (24000, 1);
nano::keypair key;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
ASSERT_TRUE (system.nodes[0]->active.list_confirmed ().empty ());
auto send1 (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, nano::Gxrb_ratio));
auto send2 (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, nano::Gxrb_ratio));
auto send3 (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, nano::Gxrb_ratio));
scoped_io_thread_name_change scoped_thread_name_io;
system.deadline_set (10s);
while (system.nodes[0]->active.list_confirmed ().size () != 3)
{
ASSERT_NO_ERROR (system.poll ());
}
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "confirmation_history");
request.put ("hash", send2->hash ().to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto representatives (response.json.get_child ("confirmations"));
ASSERT_EQ (representatives.size (), 1);
auto item (representatives.begin ());
ASSERT_NE (representatives.end (), item);
auto hash (item->second.get<std::string> ("hash"));
auto tally (item->second.get<std::string> ("tally"));
ASSERT_FALSE (item->second.get<std::string> ("duration", "").empty ());
ASSERT_FALSE (item->second.get<std::string> ("time", "").empty ());
ASSERT_EQ (send2->hash ().to_string (), hash);
nano::amount tally_num;
tally_num.decode_dec (tally);
assert (tally_num == nano::genesis_amount || tally_num == (nano::genesis_amount - nano::Gxrb_ratio) || tally_num == (nano::genesis_amount - 2 * nano::Gxrb_ratio) || tally_num == (nano::genesis_amount - 3 * nano::Gxrb_ratio));
system.stop ();
}
TEST (rpc, block_confirm)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::genesis genesis;
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, nano::test_genesis_key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.nodes[0]->work_generate_blocking (genesis.hash ())));
{
auto transaction (system.nodes[0]->store.tx_begin_write ());
ASSERT_EQ (nano::process_result::progress, system.nodes[0]->ledger.process (transaction, *send1).code);
}
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block_confirm");
request.put ("hash", send1->hash ().to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ ("1", response.json.get<std::string> ("started"));
}
TEST (rpc, block_confirm_absent)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block_confirm");
request.put ("hash", "0");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response.json.get<std::string> ("error"));
}
TEST (rpc, block_confirm_confirmed)
{
nano::system system (24000, 1);
auto path (nano::unique_path ());
nano::node_config config;
config.peering_port = 24001;
config.callback_address = "localhost";
config.callback_port = 24002;
config.callback_target = "/";
config.logging.init (path);
auto node (std::make_shared<nano::node> (system.io_ctx, path, system.alarm, config, system.work));
node->start ();
system.nodes.push_back (node);
nano::genesis genesis;
{
auto transaction (node->store.tx_begin_read ());
ASSERT_TRUE (node->ledger.block_confirmed (transaction, genesis.hash ()));
}
ASSERT_EQ (0, node->stats.count (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out));
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block_confirm");
request.put ("hash", genesis.hash ().to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ ("1", response.json.get<std::string> ("started"));
// Check confirmation history
auto confirmed (node->active.list_confirmed ());
ASSERT_EQ (1, confirmed.size ());
ASSERT_EQ (genesis.hash (), confirmed.begin ()->winner->hash ());
// Check callback
system.deadline_set (5s);
while (node->stats.count (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out) == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
// Callback result is error because callback target port isn't listening
ASSERT_EQ (1, node->stats.count (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out));
node->stop ();
}
TEST (rpc, node_id)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "node_id");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (system.nodes[0]->node_id.prv.data.to_string (), response.json.get<std::string> ("private"));
ASSERT_EQ (system.nodes[0]->node_id.pub.to_account (), response.json.get<std::string> ("as_account"));
ASSERT_EQ (system.nodes[0]->node_id.pub.to_node_id (), response.json.get<std::string> ("node_id"));
}
TEST (rpc, stats_clear)
{
nano::system system (24000, 1);
nano::keypair key;
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
system.nodes[0]->stats.inc (nano::stat::type::ledger, nano::stat::dir::in);
ASSERT_EQ (1, system.nodes[0]->stats.count (nano::stat::type::ledger, nano::stat::dir::in));
boost::property_tree::ptree request;
request.put ("action", "stats_clear");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
std::string success (response.json.get<std::string> ("success"));
ASSERT_TRUE (success.empty ());
ASSERT_EQ (0, system.nodes[0]->stats.count (nano::stat::type::ledger, nano::stat::dir::in));
ASSERT_LE (system.nodes[0]->stats.last_reset ().count (), 5);
}
TEST (rpc, unchecked)
{
nano::system system (24000, 1);
nano::keypair key;
auto & node (*system.nodes[0]);
enable_ipc_transport_tcp (node.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
auto open (std::make_shared<nano::state_block> (key.pub, 0, key.pub, 1, key.pub, key.prv, key.pub, *system.work.generate (key.pub)));
auto open2 (std::make_shared<nano::state_block> (key.pub, 0, key.pub, 2, key.pub, key.prv, key.pub, *system.work.generate (key.pub)));
node.process_active (open);
node.process_active (open2);
node.block_processor.flush ();
boost::property_tree::ptree request;
request.put ("action", "unchecked");
request.put ("count", 2);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks (response.json.get_child ("blocks"));
ASSERT_EQ (2, blocks.size ());
ASSERT_EQ (1, blocks.count (open->hash ().to_string ()));
ASSERT_EQ (1, blocks.count (open2->hash ().to_string ()));
}
request.put ("json_block", true);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & blocks (response.json.get_child ("blocks"));
ASSERT_EQ (2, blocks.size ());
auto & open_block (blocks.get_child (open->hash ().to_string ()));
ASSERT_EQ ("state", open_block.get<std::string> ("type"));
}
}
TEST (rpc, unchecked_get)
{
nano::system system (24000, 1);
nano::keypair key;
auto & node (*system.nodes[0]);
enable_ipc_transport_tcp (node.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
auto open (std::make_shared<nano::state_block> (key.pub, 0, key.pub, 1, key.pub, key.prv, key.pub, *system.work.generate (key.pub)));
node.process_active (open);
node.block_processor.flush ();
boost::property_tree::ptree request;
request.put ("action", "unchecked_get");
request.put ("hash", open->hash ().to_string ());
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (1, response.json.count ("contents"));
auto timestamp (response.json.get<uint64_t> ("modified_timestamp"));
ASSERT_LE (timestamp, nano::seconds_since_epoch ());
}
request.put ("json_block", true);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & contents (response.json.get_child ("contents"));
ASSERT_EQ ("state", contents.get<std::string> ("type"));
auto timestamp (response.json.get<uint64_t> ("modified_timestamp"));
ASSERT_LE (timestamp, nano::seconds_since_epoch ());
}
}
TEST (rpc, unopened)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::account account1 (1), account2 (account1.number () + 1);
auto genesis (system.nodes[0]->latest (nano::test_genesis_key.pub));
ASSERT_FALSE (genesis.is_zero ());
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, account1, 1));
ASSERT_NE (nullptr, send);
auto send2 (system.wallet (0)->send_action (nano::test_genesis_key.pub, account2, 10));
ASSERT_NE (nullptr, send2);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
{
boost::property_tree::ptree request;
request.put ("action", "unopened");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (2, accounts.size ());
ASSERT_EQ ("1", accounts.get<std::string> (account1.to_account ()));
ASSERT_EQ ("10", accounts.get<std::string> (account2.to_account ()));
}
{
// starting at second account should get a single result
boost::property_tree::ptree request;
request.put ("action", "unopened");
request.put ("account", account2.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (1, accounts.size ());
ASSERT_EQ ("10", accounts.get<std::string> (account2.to_account ()));
}
{
// starting at third account should get no results
boost::property_tree::ptree request;
request.put ("action", "unopened");
request.put ("account", nano::account (account2.number () + 1).to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (0, accounts.size ());
}
{
// using count=1 should get a single result
boost::property_tree::ptree request;
request.put ("action", "unopened");
request.put ("count", "1");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (1, accounts.size ());
ASSERT_EQ ("1", accounts.get<std::string> (account1.to_account ()));
}
{
// using threshold at 5 should get a single result
boost::property_tree::ptree request;
request.put ("action", "unopened");
request.put ("threshold", 5);
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (1, accounts.size ());
ASSERT_EQ ("10", accounts.get<std::string> (account2.to_account ()));
}
}
TEST (rpc, unopened_burn)
{
nano::system system (24000, 1);
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto genesis (system.nodes[0]->latest (nano::test_genesis_key.pub));
ASSERT_FALSE (genesis.is_zero ());
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, nano::burn_account, 1));
ASSERT_NE (nullptr, send);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "unopened");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (0, accounts.size ());
}
TEST (rpc, unopened_no_accounts)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "unopened");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto & accounts (response.json.get_child ("accounts"));
ASSERT_EQ (0, accounts.size ());
}
TEST (rpc, uptime)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "uptime");
std::this_thread::sleep_for (std::chrono::seconds (1));
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_LE (1, response.json.get<int> ("seconds"));
}
TEST (rpc, wallet_history)
{
nano::system system (24000, 1);
auto node0 (system.nodes[0]);
nano::genesis genesis;
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
auto timestamp1 (nano::seconds_since_epoch ());
auto send (system.wallet (0)->send_action (nano::test_genesis_key.pub, nano::test_genesis_key.pub, node0->config.receive_minimum.number ()));
ASSERT_NE (nullptr, send);
std::this_thread::sleep_for (std::chrono::milliseconds (1000));
auto timestamp2 (nano::seconds_since_epoch ());
auto receive (system.wallet (0)->receive_action (*send, nano::test_genesis_key.pub, node0->config.receive_minimum.number ()));
ASSERT_NE (nullptr, receive);
nano::keypair key;
std::this_thread::sleep_for (std::chrono::milliseconds (1000));
auto timestamp3 (nano::seconds_since_epoch ());
auto send2 (system.wallet (0)->send_action (nano::test_genesis_key.pub, key.pub, node0->config.receive_minimum.number ()));
ASSERT_NE (nullptr, send2);
system.deadline_set (10s);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "wallet_history");
request.put ("wallet", node0->wallets.items.begin ()->first.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::vector<std::tuple<std::string, std::string, std::string, std::string, std::string, std::string>> history_l;
auto & history_node (response.json.get_child ("history"));
for (auto i (history_node.begin ()), n (history_node.end ()); i != n; ++i)
{
history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account"), i->second.get<std::string> ("amount"), i->second.get<std::string> ("hash"), i->second.get<std::string> ("block_account"), i->second.get<std::string> ("local_timestamp")));
}
ASSERT_EQ (4, history_l.size ());
ASSERT_EQ ("send", std::get<0> (history_l[0]));
ASSERT_EQ (key.pub.to_account (), std::get<1> (history_l[0]));
ASSERT_EQ (node0->config.receive_minimum.to_string_dec (), std::get<2> (history_l[0]));
ASSERT_EQ (send2->hash ().to_string (), std::get<3> (history_l[0]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<4> (history_l[0]));
ASSERT_EQ (std::to_string (timestamp3), std::get<5> (history_l[0]));
ASSERT_EQ ("receive", std::get<0> (history_l[1]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[1]));
ASSERT_EQ (node0->config.receive_minimum.to_string_dec (), std::get<2> (history_l[1]));
ASSERT_EQ (receive->hash ().to_string (), std::get<3> (history_l[1]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<4> (history_l[1]));
ASSERT_EQ (std::to_string (timestamp2), std::get<5> (history_l[1]));
ASSERT_EQ ("send", std::get<0> (history_l[2]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[2]));
ASSERT_EQ (node0->config.receive_minimum.to_string_dec (), std::get<2> (history_l[2]));
ASSERT_EQ (send->hash ().to_string (), std::get<3> (history_l[2]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<4> (history_l[2]));
ASSERT_EQ (std::to_string (timestamp1), std::get<5> (history_l[2]));
// Genesis block
ASSERT_EQ ("receive", std::get<0> (history_l[3]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<1> (history_l[3]));
ASSERT_EQ (nano::genesis_amount.convert_to<std::string> (), std::get<2> (history_l[3]));
ASSERT_EQ (genesis.hash ().to_string (), std::get<3> (history_l[3]));
ASSERT_EQ (nano::test_genesis_key.pub.to_account (), std::get<4> (history_l[3]));
}
TEST (rpc, sign_hash)
{
nano::system system (24000, 1);
nano::keypair key;
auto & node1 (*system.nodes[0]);
nano::state_block send (nano::genesis_account, node1.latest (nano::test_genesis_key.pub), nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "sign");
request.put ("hash", send.hash ().to_string ());
request.put ("key", key.prv.data.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
system.poll ();
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_rpc::sign_hash_disabled);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
node_rpc_config.enable_sign_hash = true;
test_response response2 (request, rpc.config.port, system.io_ctx);
while (response2.status == 0)
{
system.poll ();
}
ASSERT_EQ (200, response2.status);
nano::signature signature;
std::string signature_text (response2.json.get<std::string> ("signature"));
ASSERT_FALSE (signature.decode_hex (signature_text));
ASSERT_FALSE (nano::validate_message (key.pub, send.hash (), signature));
}
TEST (rpc, sign_block)
{
nano::system system (24000, 1);
nano::keypair key;
system.wallet (0)->insert_adhoc (key.prv);
auto & node1 (*system.nodes[0]);
nano::state_block send (nano::genesis_account, node1.latest (nano::test_genesis_key.pub), nano::genesis_account, nano::genesis_amount - nano::Gxrb_ratio, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node1.config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (node1, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "sign");
std::string wallet;
system.nodes[0]->wallets.items.begin ()->first.encode_hex (wallet);
request.put ("wallet", wallet);
request.put ("account", key.pub.to_account ());
std::string json;
send.serialize_json (json);
request.put ("block", json);
test_response response (request, rpc.config.port, system.io_ctx);
while (response.status == 0)
{
system.poll ();
}
ASSERT_EQ (200, response.status);
auto contents (response.json.get<std::string> ("block"));
boost::property_tree::ptree block_l;
std::stringstream block_stream (contents);
boost::property_tree::read_json (block_stream, block_l);
auto block (nano::deserialize_block_json (block_l));
ASSERT_FALSE (nano::validate_message (key.pub, send.hash (), block->block_signature ()));
ASSERT_NE (block->block_signature (), send.block_signature ());
ASSERT_EQ (block->hash (), send.hash ());
}
TEST (rpc, memory_stats)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
// Preliminary test adding to the vote uniquer and checking json output is correct
nano::keypair key;
auto block (std::make_shared<nano::state_block> (0, 0, 0, 0, 0, key.prv, key.pub, 0));
std::vector<nano::block_hash> hashes;
hashes.push_back (block->hash ());
auto vote (std::make_shared<nano::vote> (key.pub, key.prv, 0, hashes));
node->vote_uniquer.unique (vote);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "stats");
request.put ("type", "objects");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (response.json.get_child ("node").get_child ("vote_uniquer").get_child ("votes").get<std::string> ("count"), "1");
}
TEST (rpc, block_confirmed)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "block_info");
request.put ("hash", "bad_hash1337");
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ (std::error_code (nano::error_blocks::invalid_block_hash).message (), response.json.get<std::string> ("error"));
request.put ("hash", "0");
test_response response1 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response1.status == 0)
{
system.poll ();
}
ASSERT_EQ (200, response1.status);
ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response1.json.get<std::string> ("error"));
scoped_thread_name_io.reset ();
system.wallet (0)->insert_adhoc (nano::test_genesis_key.prv);
nano::keypair key;
system.wallet (0)->insert_adhoc (key.prv);
// Open an account directly in the ledger
{
auto transaction = node->store.tx_begin_write ();
nano::block_hash latest (node->latest (nano::test_genesis_key.pub));
nano::send_block send1 (latest, key.pub, 300, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (latest));
ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, send1).code);
nano::open_block open1 (send1.hash (), nano::genesis_account, key.pub, key.prv, key.pub, *system.work.generate (key.pub));
ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, open1).code);
}
scoped_thread_name_io.renew ();
// This should not be confirmed
nano::block_hash latest (node->latest (nano::test_genesis_key.pub));
request.put ("hash", latest.to_string ());
test_response response2 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_FALSE (response2.json.get<bool> ("confirmed"));
// Create and process a new send block
auto send = std::make_shared<nano::send_block> (latest, key.pub, 10, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (latest));
node->process_active (send);
node->block_processor.flush ();
// Wait until the confirmation height has been set
system.deadline_set (10s);
while (true)
{
auto transaction = node->store.tx_begin_read ();
if (node->ledger.block_confirmed (transaction, send->hash ()))
{
break;
}
ASSERT_NO_ERROR (system.poll ());
}
// Should no longer be processing the block after confirmation is set
ASSERT_FALSE (node->pending_confirmation_height.is_processing_block (send->hash ()));
// Requesting confirmation for this should now succeed
request.put ("hash", send->hash ().to_string ());
test_response response3 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response3.status == 0)
{
ASSERT_FALSE (system.poll ());
}
ASSERT_EQ (200, response3.status);
ASSERT_TRUE (response3.json.get<bool> ("confirmed"));
}
#if !NANO_ROCKSDB
TEST (rpc, database_txn_tracker)
{
// First try when database tracking is disabled
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "database_txn_tracker");
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_common::tracking_not_enabled);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
}
}
// Now try enabling it but with invalid amounts
nano::system system;
nano::node_config node_config (24000, system.logging);
node_config.diagnostics_config.txn_tracking.enable = true;
auto node = system.add_node (node_config);
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
// clang-format off
auto check_not_correct_amount = [&system, &request, &rpc_port = rpc.config.port]() {
test_response response (request, rpc_port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::error_code ec (nano::error_common::invalid_amount);
ASSERT_EQ (response.json.get<std::string> ("error"), ec.message ());
};
// clang-format on
request.put ("action", "database_txn_tracker");
request.put ("min_read_time", "not a time");
check_not_correct_amount ();
// Read is valid now, but write isn't
request.put ("min_read_time", "1000");
request.put ("min_write_time", "bad time");
check_not_correct_amount ();
// Now try where times are large unattainable numbers
request.put ("min_read_time", "1000000");
request.put ("min_write_time", "1000000");
std::promise<void> keep_txn_alive_promise;
std::promise<void> txn_created_promise;
// clang-format off
std::thread thread ([&store = node->store, &keep_txn_alive_promise, &txn_created_promise]() {
// Use rpc_process_container as a placeholder as this thread is only instantiated by the daemon so won't be used
nano::thread_role::set (nano::thread_role::name::rpc_process_container);
// Create a read transaction to test
auto read_tx = store.tx_begin_read ();
// Sleep so that the read transaction has been alive for at least 1 seconds. A write lock is not used in this test as it can cause a deadlock with
// other writes done in the background
std::this_thread::sleep_for (1s);
txn_created_promise.set_value ();
keep_txn_alive_promise.get_future ().wait ();
});
// clang-format on
txn_created_promise.get_future ().wait ();
// Adjust minimum read time so that it can detect the read transaction being opened
request.put ("min_read_time", "1000");
test_response response (request, rpc.config.port, system.io_ctx);
// It can take a long time to generate stack traces
system.deadline_set (30s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
keep_txn_alive_promise.set_value ();
std::vector<std::tuple<std::string, std::string, std::string, std::vector<std::tuple<std::string, std::string, std::string, std::string>>>> json_l;
auto & json_node (response.json.get_child ("txn_tracking"));
for (auto & stat : json_node)
{
auto & stack_trace = stat.second.get_child ("stacktrace");
std::vector<std::tuple<std::string, std::string, std::string, std::string>> frames_json_l;
for (auto & frame : stack_trace)
{
frames_json_l.emplace_back (frame.second.get<std::string> ("name"), frame.second.get<std::string> ("address"), frame.second.get<std::string> ("source_file"), frame.second.get<std::string> ("source_line"));
}
json_l.emplace_back (stat.second.get<std::string> ("thread"), stat.second.get<std::string> ("time_held_open"), stat.second.get<std::string> ("write"), std::move (frames_json_l));
}
ASSERT_EQ (1, json_l.size ());
auto thread_name = nano::thread_role::get_string (nano::thread_role::name::rpc_process_container);
// Should only have a read transaction
ASSERT_EQ (thread_name, std::get<0> (json_l.front ()));
ASSERT_LE (1000u, boost::lexical_cast<unsigned> (std::get<1> (json_l.front ())));
ASSERT_EQ ("false", std::get<2> (json_l.front ()));
// Due to results being different for different compilers/build options we cannot reliably check the contents.
// The best we can do is just check that there are entries.
ASSERT_TRUE (!std::get<3> (json_l.front ()).empty ());
thread.join ();
}
#endif
TEST (rpc, active_difficulty)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "active_difficulty");
nano::unique_lock<std::mutex> lock (node->active.mutex);
node->active.multipliers_cb.push_front (1.5);
node->active.multipliers_cb.push_front (4.2);
// Also pushes 1.0 to the front of multipliers_cb
node->active.update_active_difficulty (lock);
lock.unlock ();
auto trend_size (node->active.multipliers_cb.size ());
ASSERT_NE (0, trend_size);
auto expected_multiplier{ (1.5 + 4.2 + (trend_size - 2) * 1) / trend_size };
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
auto network_minimum_text (response.json.get<std::string> ("network_minimum"));
uint64_t network_minimum;
ASSERT_FALSE (nano::from_string_hex (network_minimum_text, network_minimum));
ASSERT_EQ (node->network_params.network.publish_threshold, network_minimum);
auto multiplier (response.json.get<double> ("multiplier"));
ASSERT_NEAR (expected_multiplier, multiplier, 1e-6);
auto network_current_text (response.json.get<std::string> ("network_current"));
uint64_t network_current;
ASSERT_FALSE (nano::from_string_hex (network_current_text, network_current));
ASSERT_EQ (nano::difficulty::from_multiplier (expected_multiplier, node->network_params.network.publish_threshold), network_current);
ASSERT_EQ (response.json.not_found (), response.json.find ("difficulty_trend"));
}
// Test include_trend optional
request.put ("include_trend", true);
{
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
auto trend_opt (response.json.get_child_optional ("difficulty_trend"));
ASSERT_TRUE (trend_opt.is_initialized ());
auto & trend (trend_opt.get ());
ASSERT_EQ (trend_size, trend.size ());
system.deadline_set (5s);
bool done = false;
while (!done)
{
// Look for the sequence 4.2, 1.5; we don't know where as the active transaction request loop may prepend values concurrently
double values[2]{ 4.2, 1.5 };
auto it = std::search (trend.begin (), trend.end (), values, values + 2, [](auto a, double b) {
return a.second.template get<double> ("") == b;
});
done = it != trend.end ();
ASSERT_NO_ERROR (system.poll ());
}
}
}
// This is mainly to check for threading issues with TSAN
TEST (rpc, simultaneous_calls)
{
// This tests simulatenous calls to the same node in different threads
nano::system system (24000, 1);
scoped_io_thread_name_change scoped_thread_name_io;
auto node = system.nodes.front ();
nano::thread_runner runner (system.io_ctx, node->config.io_threads);
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
rpc_config.rpc_process.num_ipc_connections = 8;
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_block_count");
request.put ("account", nano::test_genesis_key.pub.to_account ());
constexpr auto num = 100;
std::array<std::unique_ptr<test_response>, num> test_responses;
for (int i = 0; i < num; ++i)
{
test_responses[i] = std::make_unique<test_response> (request, system.io_ctx);
}
std::promise<void> promise;
std::atomic<int> count{ num };
for (int i = 0; i < num; ++i)
{
// clang-format off
std::thread ([&test_responses, &promise, &count, i, port = rpc.config.port ]() {
test_responses[i]->run (port);
if (--count == 0)
{
promise.set_value ();
}
})
.detach ();
// clang-format on
}
promise.get_future ().wait ();
system.deadline_set (60s);
while (std::any_of (test_responses.begin (), test_responses.end (), [](const auto & test_response) { return test_response->status == 0; }))
{
ASSERT_NO_ERROR (system.poll ());
}
for (int i = 0; i < num; ++i)
{
ASSERT_EQ (200, test_responses[i]->status);
std::string block_count_text (test_responses[i]->json.get<std::string> ("block_count"));
ASSERT_EQ ("1", block_count_text);
}
rpc.stop ();
system.stop ();
ipc_server.stop ();
system.io_ctx.stop ();
runner.join ();
}
// This tests that the inprocess RPC (i.e without using IPC) works correctly
TEST (rpc, in_process)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
scoped_io_thread_name_change scoped_thread_name_io;
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::rpc_config rpc_config (true);
nano::node_rpc_config node_rpc_config;
nano::inprocess_rpc_handler inprocess_rpc_handler (*node, node_rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, inprocess_rpc_handler);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_balance");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
std::string balance_text (response.json.get<std::string> ("balance"));
ASSERT_EQ ("340282366920938463463374607431768211455", balance_text);
std::string pending_text (response.json.get<std::string> ("pending"));
ASSERT_EQ ("0", pending_text);
}
TEST (rpc_config, serialization)
{
nano::rpc_config config1;
config1.address = boost::asio::ip::address_v6::any ();
config1.port = 10;
config1.enable_control = true;
config1.max_json_depth = 10;
config1.rpc_process.io_threads = 2;
config1.rpc_process.ipc_address = boost::asio::ip::address_v6::any ();
config1.rpc_process.ipc_port = 2000;
config1.rpc_process.num_ipc_connections = 99;
nano::jsonconfig tree;
config1.serialize_json (tree);
nano::rpc_config config2;
ASSERT_NE (config2.address, config1.address);
ASSERT_NE (config2.port, config1.port);
ASSERT_NE (config2.enable_control, config1.enable_control);
ASSERT_NE (config2.max_json_depth, config1.max_json_depth);
ASSERT_NE (config2.rpc_process.io_threads, config1.rpc_process.io_threads);
ASSERT_NE (config2.rpc_process.ipc_address, config1.rpc_process.ipc_address);
ASSERT_NE (config2.rpc_process.ipc_port, config1.rpc_process.ipc_port);
ASSERT_NE (config2.rpc_process.num_ipc_connections, config1.rpc_process.num_ipc_connections);
bool upgraded{ false };
config2.deserialize_json (upgraded, tree);
ASSERT_EQ (config2.address, config1.address);
ASSERT_EQ (config2.port, config1.port);
ASSERT_EQ (config2.enable_control, config1.enable_control);
ASSERT_EQ (config2.max_json_depth, config1.max_json_depth);
ASSERT_EQ (config2.rpc_process.io_threads, config1.rpc_process.io_threads);
ASSERT_EQ (config2.rpc_process.ipc_address, config1.rpc_process.ipc_address);
ASSERT_EQ (config2.rpc_process.ipc_port, config1.rpc_process.ipc_port);
ASSERT_EQ (config2.rpc_process.num_ipc_connections, config1.rpc_process.num_ipc_connections);
}
TEST (rpc_config, migrate)
{
nano::jsonconfig rpc;
rpc.put ("address", "::1");
rpc.put ("port", 11111);
bool updated = false;
auto data_path = nano::unique_path ();
boost::filesystem::create_directory (data_path);
nano::node_rpc_config nano_rpc_config;
nano_rpc_config.deserialize_json (updated, rpc, data_path);
ASSERT_TRUE (updated);
// Check that the rpc config file is created
auto rpc_path = nano::get_rpc_config_path (data_path);
nano::rpc_config rpc_config;
nano::jsonconfig json;
updated = false;
ASSERT_FALSE (json.read_and_update (rpc_config, rpc_path));
ASSERT_FALSE (updated);
ASSERT_EQ (rpc_config.port, 11111);
}
TEST (rpc, deprecated_account_format)
{
nano::system system (24000, 1);
nano::genesis genesis;
auto node = system.nodes.front ();
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_info");
request.put ("account", nano::test_genesis_key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
boost::optional<std::string> deprecated_account_format (response.json.get_optional<std::string> ("deprecated_account_format"));
ASSERT_FALSE (deprecated_account_format.is_initialized ());
std::string account_text (nano::test_genesis_key.pub.to_account ());
account_text[4] = '-';
request.put ("account", account_text);
test_response response2 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
std::string frontier (response.json.get<std::string> ("frontier"));
ASSERT_EQ (genesis.hash ().to_string (), frontier);
boost::optional<std::string> deprecated_account_format2 (response2.json.get_optional<std::string> ("deprecated_account_format"));
ASSERT_TRUE (deprecated_account_format2.is_initialized ());
}
TEST (rpc, epoch_upgrade)
{
nano::system system (24000, 1);
auto node = system.nodes.front ();
nano::keypair key1, key2, key3;
nano::genesis genesis;
nano::keypair epoch_signer (nano::test_genesis_key);
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - 1, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ()))); // to opened account
ASSERT_EQ (nano::process_result::progress, node->process (*send1).code);
auto send2 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send1->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 2, key2.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send1->hash ()))); // to unopened account (pending)
ASSERT_EQ (nano::process_result::progress, node->process (*send2).code);
auto send3 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send2->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 3, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send2->hash ()))); // to burn (0)
ASSERT_EQ (nano::process_result::progress, node->process (*send3).code);
nano::account max_account (std::numeric_limits<nano::uint256_t>::max ());
auto send4 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send3->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 4, max_account, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send3->hash ()))); // to max account
ASSERT_EQ (nano::process_result::progress, node->process (*send4).code);
auto open (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, 1, send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub)));
ASSERT_EQ (nano::process_result::progress, node->process (*open).code);
// Check accounts epochs
{
auto transaction (node->store.tx_begin_read ());
ASSERT_EQ (2, node->store.account_count (transaction));
for (auto i (node->store.latest_begin (transaction)); i != node->store.latest_end (); ++i)
{
nano::account_info info (i->second);
ASSERT_EQ (info.epoch (), nano::epoch::epoch_0);
}
}
enable_ipc_transport_tcp (node->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "epoch_upgrade");
request.put ("epoch", 1);
request.put ("key", epoch_signer.prv.data.to_string ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
ASSERT_EQ ("1", response.json.get<std::string> ("started"));
system.deadline_set (5s);
bool done (false);
while (!done)
{
auto transaction (node->store.tx_begin_read ());
done = (4 == node->store.account_count (transaction));
ASSERT_NO_ERROR (system.poll ());
}
// Check upgrade
{
auto transaction (node->store.tx_begin_read ());
ASSERT_EQ (4, node->store.account_count (transaction));
for (auto i (node->store.latest_begin (transaction)); i != node->store.latest_end (); ++i)
{
nano::account_info info (i->second);
ASSERT_EQ (info.epoch (), nano::epoch::epoch_1);
}
ASSERT_TRUE (node->store.account_exists (transaction, key1.pub));
ASSERT_TRUE (node->store.account_exists (transaction, key2.pub));
ASSERT_TRUE (node->store.account_exists (transaction, std::numeric_limits<nano::uint256_t>::max ()));
ASSERT_FALSE (node->store.account_exists (transaction, 0));
}
// Epoch 2 upgrade
auto genesis_latest (node->latest (nano::test_genesis_key.pub));
auto send5 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis_latest, nano::test_genesis_key.pub, nano::genesis_amount - 5, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis_latest))); // to burn (0)
ASSERT_EQ (nano::process_result::progress, node->process (*send5).code);
auto send6 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, send5->hash (), nano::test_genesis_key.pub, nano::genesis_amount - 6, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (send5->hash ()))); // to key1 (again)
ASSERT_EQ (nano::process_result::progress, node->process (*send6).code);
auto key1_latest (node->latest (key1.pub));
auto send7 (std::make_shared<nano::state_block> (key1.pub, key1_latest, key1.pub, 0, key3.pub, key1.prv, key1.pub, *system.work.generate (key1_latest))); // to key3
ASSERT_EQ (nano::process_result::progress, node->process (*send7).code);
{
// Check pending entry
auto transaction (node->store.tx_begin_read ());
nano::pending_info info;
ASSERT_FALSE (node->store.pending_get (transaction, nano::pending_key (key3.pub, send7->hash ()), info));
ASSERT_EQ (nano::epoch::epoch_1, info.epoch);
}
request.put ("epoch", 2);
test_response response2 (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response2.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response2.status);
ASSERT_EQ ("1", response2.json.get<std::string> ("started"));
system.deadline_set (5s);
bool done2 (false);
while (!done2)
{
auto transaction (node->store.tx_begin_read ());
done2 = (5 == node->store.account_count (transaction));
ASSERT_NO_ERROR (system.poll ());
}
// Check upgrade
{
auto transaction (node->store.tx_begin_read ());
ASSERT_EQ (5, node->store.account_count (transaction));
for (auto i (node->store.latest_begin (transaction)); i != node->store.latest_end (); ++i)
{
nano::account_info info (i->second);
ASSERT_EQ (info.epoch (), nano::epoch::epoch_2);
}
ASSERT_TRUE (node->store.account_exists (transaction, key1.pub));
ASSERT_TRUE (node->store.account_exists (transaction, key2.pub));
ASSERT_TRUE (node->store.account_exists (transaction, key3.pub));
ASSERT_TRUE (node->store.account_exists (transaction, std::numeric_limits<nano::uint256_t>::max ()));
ASSERT_FALSE (node->store.account_exists (transaction, 0));
}
}
TEST (rpc, account_lazy_start)
{
nano::system system;
nano::node_flags node_flags;
node_flags.disable_legacy_bootstrap = true;
auto node1 = system.add_node (nano::node_config (24000, system.logging), node_flags);
nano::genesis genesis;
nano::keypair key;
// Generating test chain
auto send1 (std::make_shared<nano::state_block> (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (genesis.hash ())));
ASSERT_EQ (nano::process_result::progress, node1->process (*send1).code);
auto open (std::make_shared<nano::open_block> (send1->hash (), key.pub, key.pub, key.prv, key.pub, *system.work.generate (key.pub)));
ASSERT_EQ (nano::process_result::progress, node1->process (*open).code);
// Start lazy bootstrap with account
auto node2 = system.add_node (nano::node_config (24001, system.logging), node_flags);
node2->network.udp_channels.insert (node1->network.endpoint (), node1->network_params.protocol.protocol_version);
enable_ipc_transport_tcp (node2->config.ipc_config.transport_tcp);
nano::node_rpc_config node_rpc_config;
nano::ipc::ipc_server ipc_server (*node2, node_rpc_config);
nano::rpc_config rpc_config (true);
nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config);
nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor);
rpc.start ();
boost::property_tree::ptree request;
request.put ("action", "account_info");
request.put ("account", key.pub.to_account ());
test_response response (request, rpc.config.port, system.io_ctx);
system.deadline_set (5s);
while (response.status == 0)
{
ASSERT_NO_ERROR (system.poll ());
}
ASSERT_EQ (200, response.status);
boost::optional<std::string> account_error (response.json.get_optional<std::string> ("error"));
ASSERT_TRUE (account_error.is_initialized ());
// Check processed blocks
system.deadline_set (10s);
while (node2->bootstrap_initiator.in_progress ())
{
ASSERT_NO_ERROR (system.poll ());
}
node2->block_processor.flush ();
ASSERT_TRUE (node2->ledger.block_exists (send1->hash ()));
ASSERT_TRUE (node2->ledger.block_exists (open->hash ()));
}
| 1 | 16,017 | May be better use nano::keypair that generates ramdom value? | nanocurrency-nano-node | cpp |
@@ -25,7 +25,11 @@ const isPromiseLike = require('../utils').isPromiseLike;
function addToOperationsList(bulkOperation, docType, document) {
// Get the bsonSize
const bsonSize = bson.calculateObjectSize(document, {
- checkKeys: false
+ checkKeys: false,
+
+ // Since we don't know what the user selected for BSON options here,
+ // err on the safe side, and check the size with ignoreUndefined: false.
+ ignoreUndefined: false
});
// Throw error if the doc is bigger than the max BSON size | 1 | 'use strict';
const common = require('./common');
const BulkOperationBase = common.BulkOperationBase;
const utils = require('../utils');
const toError = utils.toError;
const handleCallback = utils.handleCallback;
const BulkWriteResult = common.BulkWriteResult;
const Batch = common.Batch;
const mergeBatchResults = common.mergeBatchResults;
const executeOperation = utils.executeOperation;
const MongoWriteConcernError = require('mongodb-core').MongoWriteConcernError;
const handleMongoWriteConcernError = require('./common').handleMongoWriteConcernError;
const bson = common.bson;
const isPromiseLike = require('../utils').isPromiseLike;
/**
* Add to internal list of Operations
*
* @param {OrderedBulkOperation} bulkOperation
* @param {number} docType number indicating the document type
* @param {object} document
* @return {OrderedBulkOperation}
*/
function addToOperationsList(bulkOperation, docType, document) {
// Get the bsonSize
const bsonSize = bson.calculateObjectSize(document, {
checkKeys: false
});
// Throw error if the doc is bigger than the max BSON size
if (bsonSize >= bulkOperation.s.maxBatchSizeBytes)
throw toError('document is larger than the maximum size ' + bulkOperation.s.maxBatchSizeBytes);
// Create a new batch object if we don't have a current one
if (bulkOperation.s.currentBatch == null)
bulkOperation.s.currentBatch = new Batch(docType, bulkOperation.s.currentIndex);
const maxKeySize = bulkOperation.s.maxKeySize;
// Check if we need to create a new batch
if (
bulkOperation.s.currentBatchSize + 1 >= bulkOperation.s.maxWriteBatchSize ||
bulkOperation.s.currentBatchSizeBytes + maxKeySize + bsonSize >=
bulkOperation.s.maxBatchSizeBytes ||
bulkOperation.s.currentBatch.batchType !== docType
) {
// Save the batch to the execution stack
bulkOperation.s.batches.push(bulkOperation.s.currentBatch);
// Create a new batch
bulkOperation.s.currentBatch = new Batch(docType, bulkOperation.s.currentIndex);
// Reset the current size trackers
bulkOperation.s.currentBatchSize = 0;
bulkOperation.s.currentBatchSizeBytes = 0;
}
if (docType === common.INSERT) {
bulkOperation.s.bulkResult.insertedIds.push({
index: bulkOperation.s.currentIndex,
_id: document._id
});
}
// We have an array of documents
if (Array.isArray(document)) {
throw toError('operation passed in cannot be an Array');
}
bulkOperation.s.currentBatch.originalIndexes.push(bulkOperation.s.currentIndex);
bulkOperation.s.currentBatch.operations.push(document);
bulkOperation.s.currentBatchSize += 1;
bulkOperation.s.currentBatchSizeBytes += maxKeySize + bsonSize;
bulkOperation.s.currentIndex += 1;
// Return bulkOperation
return bulkOperation;
}
/**
* Create a new OrderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {OrderedBulkOperation} a OrderedBulkOperation instance.
*/
class OrderedBulkOperation extends BulkOperationBase {
constructor(topology, collection, options) {
options = options || {};
options = Object.assign(options, { addToOperationsList });
super(topology, collection, options, true);
}
/**
* The callback format for results
* @callback OrderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {OrderedBulkOperation~resultCallback} [callback] The result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
execute(_writeConcern, options, callback) {
const ret = this.bulkExecute(_writeConcern, options, callback);
if (isPromiseLike(ret)) {
return ret;
}
options = ret.options;
callback = ret.callback;
return executeOperation(this.s.topology, executeCommands, [this, options, callback]);
}
}
/**
* Execute next write command in a chain
*
* @param {OrderedBulkOperation} bulkOperation
* @param {object} options
* @param {function} callback
*/
function executeCommands(bulkOperation, options, callback) {
if (bulkOperation.s.batches.length === 0) {
return handleCallback(callback, null, new BulkWriteResult(bulkOperation.s.bulkResult));
}
// Ordered execution of the command
const batch = bulkOperation.s.batches.shift();
function resultHandler(err, result) {
// Error is a driver related error not a bulk op error, terminate
if (((err && err.driver) || (err && err.message)) && !(err instanceof MongoWriteConcernError)) {
return handleCallback(callback, err);
}
// If we have and error
if (err) err.ok = 0;
if (err instanceof MongoWriteConcernError) {
return handleMongoWriteConcernError(batch, bulkOperation.s.bulkResult, true, err, callback);
}
// Merge the results together
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult);
const mergeResult = mergeBatchResults(true, batch, bulkOperation.s.bulkResult, err, result);
if (mergeResult != null) {
return handleCallback(callback, null, writeResult);
}
if (bulkOperation.handleWriteError(callback, writeResult)) return;
// Execute the next command in line
executeCommands(bulkOperation, options, callback);
}
bulkOperation.finalOptionsHandler({ options, batch, resultHandler }, callback);
}
/**
* Returns an unordered batch object
* @ignore
*/
function initializeOrderedBulkOp(topology, collection, options) {
return new OrderedBulkOperation(topology, collection, options);
}
initializeOrderedBulkOp.OrderedBulkOperation = OrderedBulkOperation;
module.exports = initializeOrderedBulkOp;
module.exports.Bulk = OrderedBulkOperation;
| 1 | 15,319 | I think the more complete solution will save off a `ignoreUndefined` in the base class for the bulk operation (with a default of `false`), and use that for calculations. Was there a reason you didn't want to support the option from the operation level? | mongodb-node-mongodb-native | js |
@@ -257,6 +257,10 @@ public class SyncManager {
break;
}
updateSync(sync, SyncState.Status.DONE, 100, callback);
+ } catch (RestClient.RefreshTokenRevokedException re) {
+ logger.e(this, "runSync", re);
+ // Do not do anything - let the logout go through!
+
} catch (Exception e) {
logger.e(this, "runSync", e);
// Update status to failed | 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartsync.manager;
import android.util.Log;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.analytics.EventBuilderHelper;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.HttpAccess;
import com.salesforce.androidsdk.rest.ApiVersionStrings;
import com.salesforce.androidsdk.rest.RestClient;
import com.salesforce.androidsdk.rest.RestRequest;
import com.salesforce.androidsdk.rest.RestResponse;
import com.salesforce.androidsdk.smartstore.app.SmartStoreSDKManager;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.SmartStoreException;
import com.salesforce.androidsdk.smartsync.app.SmartSyncSDKManager;
import com.salesforce.androidsdk.smartsync.target.AdvancedSyncUpTarget;
import com.salesforce.androidsdk.smartsync.target.SyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.SyncUpTarget;
import com.salesforce.androidsdk.smartsync.util.SyncManagerLogger;
import com.salesforce.androidsdk.smartsync.util.SyncOptions;
import com.salesforce.androidsdk.smartsync.util.SyncState;
import com.salesforce.androidsdk.smartsync.util.SyncState.MergeMode;
import com.salesforce.androidsdk.util.JSONObjectHelper;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* Sync Manager
*/
public class SyncManager {
// Constants
private static final int UNCHANGED = -1;
private static final String TAG = "SyncManager";
// For user agent
private static final String SMART_SYNC = "SmartSync";
private static final String FEATURE_SMART_SYNC = "SY";
// Static member
private static Map<String, SyncManager> INSTANCES = new HashMap<String, SyncManager>();
// Members
private Set<Long> runningSyncIds = new HashSet<Long>();
public final String apiVersion;
private final ExecutorService threadPool = Executors.newFixedThreadPool(1);
private SmartStore smartStore;
private RestClient restClient;
private SyncManagerLogger logger;
/**
* Private constructor
* @param smartStore
*/
private SyncManager(SmartStore smartStore, RestClient restClient) {
apiVersion = ApiVersionStrings.getVersionNumber(SalesforceSDKManager.getInstance().getAppContext());
this.smartStore = smartStore;
this.restClient = restClient;
this.logger = new SyncManagerLogger(Log.INFO);
SyncState.setupSyncsSoupIfNeeded(smartStore);
}
/**
* Returns the instance of this class associated with current user.
*
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance() {
return getInstance(null, null);
}
/**
* Returns the instance of this class associated with this user account.
*
* @param account User account.
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account) {
return getInstance(account, null);
}
/**
* Returns the instance of this class associated with this user and community.
* Sync manager returned is ready to use.
*
* @param account User account.
* @param communityId Community ID.
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account, String communityId) {
return getInstance(account, communityId, null);
}
/**
* Returns the instance of this class associated with this user, community and smartstore.
*
* @param account User account. Pass null to user current user.
* @param communityId Community ID. Pass null if not applicable
* @param smartStore SmartStore instance. Pass null to use current user default smartstore.
*
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account, String communityId, SmartStore smartStore) {
if (account == null) {
account = SmartStoreSDKManager.getInstance().getUserAccountManager().getCurrentUser();
}
if (smartStore == null) {
smartStore = SmartSyncSDKManager.getInstance().getSmartStore(account, communityId);
}
String uniqueId = (account != null ? account.getUserId() : "") + ":"
+ smartStore.getDatabase().getPath();
SyncManager instance = INSTANCES.get(uniqueId);
if (instance == null) {
RestClient restClient = null;
/*
* If account is still null, there is no user logged in, which means, the default
* RestClient should be set to the unauthenticated RestClient instance.
*/
if (account == null) {
restClient = SalesforceSDKManager.getInstance().getClientManager().peekUnauthenticatedRestClient();
} else {
restClient = SalesforceSDKManager.getInstance().getClientManager().peekRestClient(account);
}
instance = new SyncManager(smartStore, restClient);
INSTANCES.put(uniqueId, instance);
}
SalesforceSDKManager.getInstance().registerUsedAppFeature(FEATURE_SMART_SYNC);
return instance;
}
/**
* Resets all the sync managers
*/
public static synchronized void reset() {
INSTANCES.clear();
}
/**
* Get details of a sync state
* @param syncId
* @return
* @throws JSONException
*/
public SyncState getSyncStatus(long syncId) throws JSONException {
return SyncState.byId(smartStore, syncId);
}
/**
* Create and run a sync down that will overwrite any modified records
* @param target
* @param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncDown(SyncDownTarget target, String soupName, SyncUpdateCallback callback) throws JSONException {
SyncOptions options = SyncOptions.optionsForSyncDown(MergeMode.OVERWRITE);
return syncDown(target, options, soupName, callback);
}
/**
* Create and run a sync down
* @param target
* @param options
*@param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncDown(SyncDownTarget target, SyncOptions options, String soupName, SyncUpdateCallback callback) throws JSONException {
SyncState sync = SyncState.createSyncDown(smartStore, target, options, soupName);
logger.d(this, "syncDown:", sync);
runSync(sync, callback);
return sync;
}
/**
* Re-run sync but only fetch new/modified records
* @param syncId
* @param callback
* @throws JSONException
*/
public SyncState reSync(long syncId, SyncUpdateCallback callback) throws JSONException {
if (runningSyncIds.contains(syncId)) {
throw new SmartSyncException("Cannot run reSync:" + syncId + ": still running");
}
SyncState sync = SyncState.byId(smartStore, syncId);
if (sync == null) {
throw new SmartSyncException("Cannot run reSync:" + syncId + ": no sync found");
}
if (sync.getType() != SyncState.Type.syncDown) {
throw new SmartSyncException("Cannot run reSync:" + syncId + ": wrong type:" + sync.getType());
}
sync.setTotalSize(-1);
logger.d(this, "reSync:", sync);
runSync(sync, callback);
return sync;
}
/**
* Run a sync
* @param sync
* @param callback
*/
public void runSync(final SyncState sync, final SyncUpdateCallback callback) {
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
threadPool.execute(new Runnable() {
@Override
public void run() {
try {
switch (sync.getType()) {
case syncDown:
syncDown(sync, callback);
break;
case syncUp:
syncUp(sync, callback);
break;
}
updateSync(sync, SyncState.Status.DONE, 100, callback);
} catch (Exception e) {
logger.e(this, "runSync", e);
// Update status to failed
updateSync(sync, SyncState.Status.FAILED, UNCHANGED, callback);
}
}
});
}
/**
* Create and run a sync up
* @param target
* @param options
* @param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncUp(SyncUpTarget target, SyncOptions options, String soupName, SyncUpdateCallback callback) throws JSONException {
SyncState sync = SyncState.createSyncUp(smartStore, target, options, soupName);
logger.d(this, "syncUp:", sync);
runSync(sync, callback);
return sync;
}
/**
* Removes local copies of records that have been deleted on the server
* or do not match the query results on the server anymore.
*
* @param syncId Sync ID.
* @throws JSONException
* @throws IOException
*/
public void cleanResyncGhosts(long syncId) throws JSONException, IOException {
if (runningSyncIds.contains(syncId)) {
throw new SmartSyncException("Cannot run cleanResyncGhosts:" + syncId + ": still running");
}
final SyncState sync = SyncState.byId(smartStore, syncId);
if (sync == null) {
throw new SmartSyncException("Cannot run cleanResyncGhosts:" + syncId + ": no sync found");
}
if (sync.getType() != SyncState.Type.syncDown) {
throw new SmartSyncException("Cannot run cleanResyncGhosts:" + syncId + ": wrong type:" + sync.getType());
}
logger.d(this, "cleanResyncGhosts:", sync);
final String soupName = sync.getSoupName();
final SyncDownTarget target = (SyncDownTarget) sync.getTarget();
// Ask target to clean up ghosts
final int localIdSize = target.cleanGhosts(this, soupName);
threadPool.execute(new Runnable() {
@Override
public void run() {
final JSONObject attributes = new JSONObject();
if (localIdSize > 0) {
try {
attributes.put("numRecords", localIdSize);
attributes.put("syncId", sync.getId());
attributes.put("syncTarget", target.getClass().getName());
EventBuilderHelper.createAndStoreEventSync("cleanResyncGhosts", null, TAG, attributes);
} catch (JSONException e) {
Log.e(TAG, "Unexpected json error for cleanResyncGhosts sync tag: " + sync.getId(), e);
}
}
}
});
}
/**
* Update sync with new status, progress, totalSize
* @param sync
* @param status
* @param progress pass -1 to keep the current value
* @param callback
*/
private void updateSync(SyncState sync, SyncState.Status status, int progress, SyncUpdateCallback callback) {
try {
sync.setStatus(status);
if (progress != UNCHANGED) {
sync.setProgress(progress);
}
switch (status) {
case NEW:
break;
case RUNNING:
runningSyncIds.add(sync.getId());
break;
case DONE:
case FAILED:
int totalSize = sync.getTotalSize();
final JSONObject attributes = new JSONObject();
try {
if (totalSize > 0) {
attributes.put("numRecords", totalSize);
}
attributes.put("syncId", sync.getId());
attributes.put("syncTarget", sync.getTarget().getClass().getName());
} catch (JSONException e) {
logger.e(this, "Exception thrown while building attributes", e);
}
EventBuilderHelper.createAndStoreEvent(sync.getType().name(), null, TAG, attributes);
runningSyncIds.remove(sync.getId());
break;
}
sync.save(smartStore);
} catch (JSONException e) {
logger.e(this, "Unexpected json error for sync: " + sync.getId(), e);
} catch (SmartStoreException e) {
logger.e(this, "Unexpected smart store error for sync: " + sync.getId(), e);
}
finally {
callback.onUpdate(sync);
}
}
private void syncUp(SyncState sync, SyncUpdateCallback callback) throws Exception {
final String soupName = sync.getSoupName();
final SyncUpTarget target = (SyncUpTarget) sync.getTarget();
final SyncOptions options = sync.getOptions();
final Set<String> dirtyRecordIds = target.getIdsOfRecordsToSyncUp(this, soupName);
int totalSize = dirtyRecordIds.size();
sync.setTotalSize(totalSize);
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
int i = 0;
for (final String id : dirtyRecordIds) {
JSONObject record = target.getFromLocalStore(this, soupName, id);
syncUpOneRecord(target, soupName, record, options);
// Updating status
int progress = (i + 1) * 100 / totalSize;
if (progress < 100) {
updateSync(sync, SyncState.Status.RUNNING, progress, callback);
}
// Incrementing i
i++;
}
}
private void syncUpOneRecord(SyncUpTarget target, String soupName,
JSONObject record, SyncOptions options) throws JSONException, IOException {
logger.d(this, "syncUpOneRecord", record);
/*
* Checks if we are attempting to sync up a record that has been updated
* on the server AFTER the client's last sync down. If the merge mode
* passed in tells us to leave the record alone under these
* circumstances, we will do nothing and return here.
*/
final MergeMode mergeMode = options.getMergeMode();
if (mergeMode == MergeMode.LEAVE_IF_CHANGED &&
!target.isNewerThanServer(this, record)) {
// Nothing to do for this record
logger.d(this, "syncUpOneRecord: Record not synced since client does not have the latest from server:", record);
return;
}
// Advanced sync up target take it from here
if (target instanceof AdvancedSyncUpTarget) {
((AdvancedSyncUpTarget) target).syncUpRecord(this, record, options.getFieldlist(), options.getMergeMode());
return;
}
// Do we need to do a create, update or delete
boolean locallyDeleted = target.isLocallyDeleted(record);
boolean locallyCreated = target.isLocallyCreated(record);
boolean locallyUpdated = target.isLocallyUpdated(record);
Action action = null;
if (locallyDeleted)
action = Action.delete;
else if (locallyCreated)
action = Action.create;
else if (locallyUpdated)
action = Action.update;
if (action == null) {
// Nothing to do for this record
return;
}
// Create/update/delete record on server and update smartstore
String recordServerId;
int statusCode;
switch (action) {
case create:
recordServerId = target.createOnServer(this, record, options.getFieldlist());
if (recordServerId != null) {
record.put(target.getIdFieldName(), recordServerId);
target.cleanAndSaveInLocalStore(this, soupName, record);
}
break;
case delete:
statusCode = (locallyCreated
? HttpURLConnection.HTTP_NOT_FOUND // if locally created it can't exist on the server - we don't need to actually do the deleteOnServer call
: target.deleteOnServer(this, record));
if (RestResponse.isSuccess(statusCode) || statusCode == HttpURLConnection.HTTP_NOT_FOUND) {
target.deleteFromLocalStore(this, soupName, record);
}
break;
case update:
statusCode = target.updateOnServer(this, record, options.getFieldlist());
if (RestResponse.isSuccess(statusCode)) {
target.cleanAndSaveInLocalStore(this, soupName, record);
}
// Handling remotely deleted records
else if (statusCode == HttpURLConnection.HTTP_NOT_FOUND) {
if (mergeMode == MergeMode.OVERWRITE) {
recordServerId = target.createOnServer(this, record, options.getFieldlist());
if (recordServerId != null) {
record.put(target.getIdFieldName(), recordServerId);
target.cleanAndSaveInLocalStore(this, soupName, record);
}
}
else {
// Leave local record alone
}
}
break;
}
}
private void syncDown(SyncState sync, SyncUpdateCallback callback) throws Exception {
String soupName = sync.getSoupName();
SyncDownTarget target = (SyncDownTarget) sync.getTarget();
MergeMode mergeMode = sync.getMergeMode();
long maxTimeStamp = sync.getMaxTimeStamp();
JSONArray records = target.startFetch(this, maxTimeStamp);
int countSaved = 0;
int totalSize = target.getTotalSize();
sync.setTotalSize(totalSize);
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
final String idField = sync.getTarget().getIdFieldName();
// Get ids of records to leave alone
Set<String> idsToSkip = null;
if (mergeMode == MergeMode.LEAVE_IF_CHANGED) {
idsToSkip = target.getIdsToSkip(this, soupName);
}
while (records != null) {
// Figure out records to save
JSONArray recordsToSave = idsToSkip == null ? records : removeWithIds(records, idsToSkip, idField);
// Save to smartstore.
target.saveRecordsToLocalStore(this, soupName, recordsToSave);
countSaved += records.length();
maxTimeStamp = Math.max(maxTimeStamp, target.getLatestModificationTimeStamp(records));
// Update sync status.
if (countSaved < totalSize) {
updateSync(sync, SyncState.Status.RUNNING, countSaved*100 / totalSize, callback);
}
// Fetch next records, if any.
records = target.continueFetch(this);
}
sync.setMaxTimeStamp(maxTimeStamp);
}
private JSONArray removeWithIds(JSONArray records, Set<String> idsToSkip, String idField) throws JSONException {
JSONArray arr = new JSONArray();
for (int i = 0; i < records.length(); i++) {
JSONObject record = records.getJSONObject(i);
// Keep ?
String id = JSONObjectHelper.optString(record, idField);
if (id == null || !idsToSkip.contains(id)) {
arr.put(record);
}
}
return arr;
}
/**
* Send request after adding user-agent header that says SmartSync
* @param restRequest
* @return
* @throws IOException
*/
public RestResponse sendSyncWithSmartSyncUserAgent(RestRequest restRequest) throws IOException {
logger.d(this, "sendSyncWithSmartSyncUserAgent:request", restRequest);
RestResponse restResponse = restClient.sendSync(restRequest, new HttpAccess.UserAgentInterceptor(SalesforceSDKManager.getInstance().getUserAgent(SMART_SYNC)));
if (restResponse.isSuccess()) {
logger.d(this, "sendSyncWithSmartSyncUserAgent:response", restResponse);
}
else {
logger.w(this, "sendSyncWithSmartSyncUserAgent:response", restResponse);
}
return restResponse;
}
/**
* @return SmartStore used by this SyncManager
*/
public SmartStore getSmartStore() {
return smartStore;
}
/**
* return SyncManagerLogger used by this SyncManager
*/
public SyncManagerLogger getLogger() {
return logger;
}
/**
* Enum for action
*
*/
public enum Action {
create,
update,
delete
}
/**
* Exception thrown by smart sync manager
*
*/
public static class SmartSyncException extends RuntimeException {
public SmartSyncException(String message) {
super(message);
}
public SmartSyncException(Throwable e) {
super(e);
}
private static final long serialVersionUID = 1L;
}
/**
* Sets the rest client to be used.
*
* @param restClient
*/
public void setRestClient(RestClient restClient) {
this.restClient = restClient;
}
/**
* @return rest client in use
*/
public RestClient getRestClient() {
return this.restClient;
}
/**
* Callback to get sync status udpates
*/
public interface SyncUpdateCallback {
void onUpdate(SyncState sync);
}
}
| 1 | 15,984 | The catch (Exception e) block that follows does an updateSync, which will get a SmartStore instance (while logging out is taking place) - and then terrible things will happen - a database gets created for the outgoing user that won't be openable by the returning user causing the app to crash at logout. This is a somewhat superficial fix - SmartStore should not return an instance for an outgoing user. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -27,6 +27,11 @@ func TestCodestar_WaitUntilStatusAvailable(t *testing.T) {
connection := &CodeStar{}
connectionARN := "mockConnectionARN"
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ m := mocks.NewMockapi(ctrl)
+ m.EXPECT().GetConnection(gomock.Any()).AnyTimes()
+
// WHEN
err := connection.WaitUntilConnectionStatusAvailable(ctx, connectionARN)
| 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package codestar
import (
"context"
"errors"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/codestarconnections"
"github.com/aws/copilot-cli/internal/pkg/aws/codestar/mocks"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
func TestCodestar_WaitUntilStatusAvailable(t *testing.T) {
t.Run("times out if connection status not changed to available in allotted time", func(t *testing.T) {
// GIVEN
ctx, cancel := context.WithDeadline(context.Background(), time.Now())
defer cancel()
connection := &CodeStar{}
connectionARN := "mockConnectionARN"
// WHEN
err := connection.WaitUntilConnectionStatusAvailable(ctx, connectionARN)
// THEN
require.EqualError(t, err, "timed out waiting for connection mockConnectionARN status to change from PENDING to AVAILABLE")
})
t.Run("returns a wrapped error on GetConnection call failure", func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
defer ctrl.Finish()
m := mocks.NewMockapi(ctrl)
m.EXPECT().GetConnection(gomock.Any()).Return(nil, errors.New("some error"))
connection := &CodeStar{
client: m,
}
connectionARN := "mockConnectionARN"
// WHEN
err := connection.WaitUntilConnectionStatusAvailable(context.Background(), connectionARN)
// THEN
require.EqualError(t, err, "get connection details for mockConnectionARN: some error")
})
t.Run("waits until connection status is returned as 'available' and exits gracefully", func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
defer ctrl.Finish()
m := mocks.NewMockapi(ctrl)
connection := &CodeStar{
client: m,
}
connectionARN := "mockConnectionARN"
m.EXPECT().GetConnection(&codestarconnections.GetConnectionInput{
ConnectionArn: aws.String(connectionARN),
}).Return(
&codestarconnections.GetConnectionOutput{Connection: &codestarconnections.Connection{
ConnectionStatus: aws.String(codestarconnections.ConnectionStatusAvailable),
},
}, nil)
// WHEN
err := connection.WaitUntilConnectionStatusAvailable(context.Background(), connectionARN)
// THEN
require.NoError(t, err)
})
}
func TestCodeStar_GetConnectionARN(t *testing.T) {
t.Run("returns wrapped error if ListConnections is unsuccessful", func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
m := mocks.NewMockapi(ctrl)
m.EXPECT().ListConnections(gomock.Any()).Return(nil, errors.New("some error"))
connection := &CodeStar{
client: m,
}
// WHEN
ARN, err := connection.GetConnectionARN("someConnectionName")
// THEN
require.EqualError(t, err, "get list of connections in AWS account: some error")
require.Equal(t, "", ARN)
})
t.Run("returns an error if no connections in the account match the one in the pipeline manifest", func(t *testing.T) {
// GIVEN
connectionName := "string cheese"
ctrl := gomock.NewController(t)
m := mocks.NewMockapi(ctrl)
m.EXPECT().ListConnections(gomock.Any()).Return(
&codestarconnections.ListConnectionsOutput{
Connections: []*codestarconnections.Connection{
{ConnectionName: aws.String("gouda")},
{ConnectionName: aws.String("fontina")},
{ConnectionName: aws.String("brie")},
},
}, nil)
connection := &CodeStar{
client: m,
}
// WHEN
ARN, err := connection.GetConnectionARN(connectionName)
// THEN
require.Equal(t, "", ARN)
require.EqualError(t, err, "cannot find a connectionARN associated with string cheese")
})
t.Run("returns a match", func(t *testing.T) {
// GIVEN
connectionName := "string cheese"
ctrl := gomock.NewController(t)
m := mocks.NewMockapi(ctrl)
m.EXPECT().ListConnections(gomock.Any()).Return(
&codestarconnections.ListConnectionsOutput{
Connections: []*codestarconnections.Connection{
{
ConnectionName: aws.String("gouda"),
ConnectionArn: aws.String("notThisOne"),
},
{
ConnectionName: aws.String("string cheese"),
ConnectionArn: aws.String("thisCheesyFakeARN"),
},
{
ConnectionName: aws.String("fontina"),
ConnectionArn: aws.String("norThisOne"),
},
},
}, nil)
connection := &CodeStar{
client: m,
}
// WHEN
ARN, err := connection.GetConnectionARN(connectionName)
// THEN
require.Equal(t, "thisCheesyFakeARN", ARN)
require.NoError(t, err)
})
t.Run("checks all connections and returns a match when paginated", func(t *testing.T) {
// GIVEN
connectionName := "string cheese"
mockNextToken := "next"
ctrl := gomock.NewController(t)
m := mocks.NewMockapi(ctrl)
m.EXPECT().ListConnections(gomock.Any()).Return(
&codestarconnections.ListConnectionsOutput{
Connections: []*codestarconnections.Connection{
{
ConnectionName: aws.String("gouda"),
ConnectionArn: aws.String("notThisOne"),
},
{
ConnectionName: aws.String("fontina"),
ConnectionArn: aws.String("thisCheesyFakeARN"),
},
},
NextToken: &mockNextToken,
}, nil)
m.EXPECT().ListConnections(&codestarconnections.ListConnectionsInput{
NextToken: &mockNextToken,
}).Return(
&codestarconnections.ListConnectionsOutput{
Connections: []*codestarconnections.Connection{
{
ConnectionName: aws.String("string cheese"),
ConnectionArn: aws.String("thisOne"),
},
},
}, nil)
connection := &CodeStar{
client: m,
}
// WHEN
ARN, err := connection.GetConnectionARN(connectionName)
// THEN
require.Equal(t, "thisOne", ARN)
require.NoError(t, err)
})
}
| 1 | 16,846 | huh does this work without a `Return`? | aws-copilot-cli | go |
@@ -134,11 +134,7 @@ module RSpec
# no-op, required metadata has already been set by the `skip`
# method.
rescue Exception => e
- if pending?
- metadata[:execution_result][:pending_exception] = e
- else
- set_exception(e)
- end
+ set_exception(e)
ensure
run_after_each
end | 1 | module RSpec
module Core
# Wrapper for an instance of a subclass of {ExampleGroup}. An instance of
# `Example` is returned by the {ExampleGroup#example example} method
# exposed to examples, {Hooks#before before} and {Hooks#after after} hooks,
# and yielded to {Hooks#around around} hooks.
#
# Useful for configuring logging and/or taking some action based
# on the state of an example's metadata.
#
# @example
#
# RSpec.configure do |config|
# config.before do
# log example.description
# end
#
# config.after do
# log example.description
# end
#
# config.around do |ex|
# log example.description
# ex.run
# end
# end
#
# shared_examples "auditable" do
# it "does something" do
# log "#{example.full_description}: #{auditable.inspect}"
# auditable.should do_something
# end
# end
#
# @see ExampleGroup
class Example
# @private
#
# Used to define methods that delegate to this example's metadata
def self.delegate_to_metadata(*keys)
keys.each { |key| define_method(key) { @metadata[key] } }
end
delegate_to_metadata :execution_result, :file_path, :full_description,
:location, :pending, :skip
# Returns the string submitted to `example` or its aliases (e.g.
# `specify`, `it`, etc). If no string is submitted (e.g. `it { is_expected.to
# do_something }`) it returns the message generated by the matcher if
# there is one, otherwise returns a message including the location of the
# example.
def description
description = metadata[:description].to_s.empty? ?
"example at #{location}" :
metadata[:description]
RSpec.configuration.format_docstrings_block.call(description)
end
# @attr_reader
#
# Returns the first exception raised in the context of running this
# example (nil if no exception is raised)
attr_reader :exception
# @attr_reader
#
# Returns the metadata object associated with this example.
attr_reader :metadata
# @attr_reader
# @private
#
# Returns the example_group_instance that provides the context for
# running this example.
attr_reader :example_group_instance
# @attr_accessor
# @private
attr_accessor :clock
# Creates a new instance of Example.
# @param example_group_class the subclass of ExampleGroup in which this Example is declared
# @param description the String passed to the `it` method (or alias)
# @param metadata additional args passed to `it` to be used as metadata
# @param example_block the block of code that represents the example
def initialize(example_group_class, description, metadata, example_block=nil)
@example_group_class, @options, @example_block = example_group_class, metadata, example_block
@metadata = @example_group_class.metadata.for_example(description, metadata)
@example_group_instance = @exception = nil
@clock = RSpec::Core::Time
end
# @deprecated access options via metadata instead
def options
@options
end
# Returns the example group class that provides the context for running
# this example.
def example_group
@example_group_class
end
alias_method :pending?, :pending
alias_method :skipped?, :skip
# @api private
# instance_evals the block passed to the constructor in the context of
# the instance of {ExampleGroup}.
# @param example_group_instance the instance of an ExampleGroup subclass
def run(example_group_instance, reporter)
@example_group_instance = example_group_instance
RSpec.current_example = self
start(reporter)
begin
if skipped?
Pending.mark_pending! self, skip
elsif !RSpec.configuration.dry_run?
with_around_each_hooks do
begin
run_before_each
@example_group_instance.instance_exec(self, &@example_block)
if pending?
Pending.mark_fixed! self
raise Pending::PendingExampleFixedError,
'Expected example to fail since it is pending, but it passed.',
metadata[:caller]
end
rescue Pending::SkipDeclaredInExample
# no-op, required metadata has already been set by the `skip`
# method.
rescue Exception => e
if pending?
metadata[:execution_result][:pending_exception] = e
else
set_exception(e)
end
ensure
run_after_each
end
end
end
rescue Exception => e
set_exception(e)
ensure
@example_group_instance.instance_variables.each do |ivar|
@example_group_instance.instance_variable_set(ivar, nil)
end
@example_group_instance = nil
begin
assign_generated_description
rescue Exception => e
set_exception(e, "while assigning the example description")
end
end
finish(reporter)
ensure
RSpec.current_example = nil
end
# Wraps a `Proc` and exposes a `run` method for use in {Hooks#around
# around} hooks.
#
# @note Procsy, itself, is not a public API, but we're documenting it
# here to document how to interact with the object yielded to an
# `around` hook.
#
# @example
#
# RSpec.configure do |c|
# c.around do |ex| # Procsy which wraps the example
# if ex.metadata[:key] == :some_value && some_global_condition
# raise "some message"
# end
# ex.run # run delegates to ex.call
# end
# end
class Procsy
# The `metadata` of the {Example} instance.
attr_reader :metadata
Proc.public_instance_methods(false).each do |name|
define_method(name) { |*a, &b| @proc.__send__(name, *a, &b) }
end
alias run call
def initialize(metadata, &block)
@metadata = metadata
@proc = block
end
# @api private
def wrap(&block)
self.class.new(metadata, &block)
end
end
# @private
def any_apply?(filters)
metadata.any_apply?(filters)
end
# @private
def all_apply?(filters)
@metadata.all_apply?(filters) || @example_group_class.all_apply?(filters)
end
# @private
def around_each_hooks
@around_each_hooks ||= example_group.hooks.around_each_hooks_for(self)
end
# @private
#
# Used internally to set an exception in an after hook, which
# captures the exception but doesn't raise it.
def set_exception(exception, context=nil)
if @exception && context != :dont_print
# An error has already been set; we don't want to override it,
# but we also don't want silence the error, so let's print it.
msg = <<-EOS
An error occurred #{context}
#{exception.class}: #{exception.message}
occurred at #{exception.backtrace.first}
EOS
RSpec.configuration.reporter.message(msg)
end
@exception ||= exception
end
# @private
#
# Used internally to set an exception and fail without actually executing
# the example when an exception is raised in before(:all).
def fail_with_exception(reporter, exception)
start(reporter)
set_exception(exception)
finish(reporter)
end
# @private
def instance_exec_with_rescue(context = nil, &block)
@example_group_instance.instance_exec_with_rescue(self, context, &block)
end
# @private
def instance_exec(*args, &block)
@example_group_instance.instance_exec(*args, &block)
end
private
def with_around_each_hooks(&block)
if around_each_hooks.empty?
yield
else
@example_group_class.hooks.run(:around, :each, self, Procsy.new(metadata, &block))
end
rescue Exception => e
set_exception(e, "in an around(:each) hook")
end
def start(reporter)
reporter.example_started(self)
record :started_at => clock.now
end
def finish(reporter)
pending_message = metadata[:execution_result][:pending_message]
if @exception
record_finished 'failed', :exception => @exception
reporter.example_failed self
false
elsif pending_message
record_finished 'pending', :pending_message => pending_message
reporter.example_pending self
true
else
record_finished 'passed'
reporter.example_passed self
true
end
end
def record_finished(status, results={})
finished_at = clock.now
record results.merge(
:status => status,
:finished_at => finished_at,
:run_time => (finished_at - execution_result[:started_at]).to_f
)
end
def run_before_each
@example_group_instance.setup_mocks_for_rspec
@example_group_class.hooks.run(:before, :each, self)
end
def run_after_each
@example_group_class.hooks.run(:after, :each, self)
verify_mocks
rescue Exception => e
set_exception(e, "in an after(:each) hook")
ensure
@example_group_instance.teardown_mocks_for_rspec
end
def verify_mocks
@example_group_instance.verify_mocks_for_rspec
rescue Exception => e
if metadata[:execution_result][:pending_message]
metadata[:execution_result][:pending_fixed] = false
metadata[:pending] = true
@exception = nil
else
set_exception(e, :dont_print)
end
end
def assign_generated_description
return unless RSpec.configuration.expecting_with_rspec?
if metadata[:description_args].empty?
metadata[:description_args] << RSpec::Matchers.generated_description
end
RSpec::Matchers.clear_generated_description
end
def record(results={})
execution_result.update(results)
end
def skip_message
if String === skip
skip
else
Pending::NO_REASON_GIVEN
end
end
end
end
end
| 1 | 11,750 | Don't think we want this line, right? (Plus "failing" is spelled wrong). | rspec-rspec-core | rb |
@@ -105,6 +105,10 @@ public class FeedItemMenuHandler {
setItemVisibility(menu, R.id.remove_item, fileDownloaded);
+ if (selectedItem.getFeed().isLocalFeed()) {
+ setItemVisibility(menu, R.id.share_item, false);
+ }
+
return true;
}
| 1 | package de.danoeh.antennapod.menuhandler;
import android.content.Context;
import android.os.Handler;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import com.google.android.material.snackbar.Snackbar;
import androidx.annotation.NonNull;
import androidx.fragment.app.Fragment;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.activity.MainActivity;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.preferences.GpodnetPreferences;
import de.danoeh.antennapod.core.preferences.PlaybackPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.playback.PlaybackService;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.sync.SyncService;
import de.danoeh.antennapod.core.sync.model.EpisodeAction;
import de.danoeh.antennapod.core.util.FeedItemUtil;
import de.danoeh.antennapod.core.util.IntentUtils;
import de.danoeh.antennapod.core.util.ShareUtils;
import de.danoeh.antennapod.dialog.ShareDialog;
/**
* Handles interactions with the FeedItemMenu.
*/
public class FeedItemMenuHandler {
private static final String TAG = "FeedItemMenuHandler";
private FeedItemMenuHandler() {
}
/**
* This method should be called in the prepare-methods of menus. It changes
* the visibility of the menu items depending on a FeedItem's attributes.
*
* @param menu An instance of Menu
* @param selectedItem The FeedItem for which the menu is supposed to be prepared
* @return Returns true if selectedItem is not null.
*/
public static boolean onPrepareMenu(Menu menu, FeedItem selectedItem) {
if (menu == null || selectedItem == null) {
return false;
}
boolean hasMedia = selectedItem.getMedia() != null;
boolean isPlaying = hasMedia && selectedItem.getState() == FeedItem.State.PLAYING;
if (!isPlaying) {
setItemVisibility(menu, R.id.skip_episode_item, false);
}
boolean isInQueue = selectedItem.isTagged(FeedItem.TAG_QUEUE);
if (!isInQueue) {
setItemVisibility(menu, R.id.remove_from_queue_item, false);
}
if (!(!isInQueue && selectedItem.getMedia() != null)) {
setItemVisibility(menu, R.id.add_to_queue_item, false);
}
if (!ShareUtils.hasLinkToShare(selectedItem)) {
setItemVisibility(menu, R.id.visit_website_item, false);
}
if (selectedItem.getFeed().isLocalFeed()) {
setItemVisibility(menu, R.id.visit_website_item, false);
}
boolean fileDownloaded = hasMedia && selectedItem.getMedia().fileExists();
setItemVisibility(menu, R.id.remove_new_flag_item, selectedItem.isNew());
if (selectedItem.isPlayed()) {
setItemVisibility(menu, R.id.mark_read_item, false);
} else {
setItemVisibility(menu, R.id.mark_unread_item, false);
}
if (selectedItem.getMedia() == null || selectedItem.getMedia().getPosition() == 0) {
setItemVisibility(menu, R.id.reset_position, false);
}
if(!UserPreferences.isEnableAutodownload() || fileDownloaded) {
setItemVisibility(menu, R.id.activate_auto_download, false);
setItemVisibility(menu, R.id.deactivate_auto_download, false);
} else if (selectedItem.getAutoDownload()) {
setItemVisibility(menu, R.id.activate_auto_download, false);
} else {
setItemVisibility(menu, R.id.deactivate_auto_download, false);
}
// Display proper strings when item has no media
if (!hasMedia && !selectedItem.isPlayed()) {
setItemTitle(menu, R.id.mark_read_item, R.string.mark_read_no_media_label);
}
if (!hasMedia && selectedItem.isPlayed()) {
setItemTitle(menu, R.id.mark_unread_item, R.string.mark_unread_label_no_media);
}
boolean isFavorite = selectedItem.isTagged(FeedItem.TAG_FAVORITE);
setItemVisibility(menu, R.id.add_to_favorites_item, !isFavorite);
setItemVisibility(menu, R.id.remove_from_favorites_item, isFavorite);
setItemVisibility(menu, R.id.remove_item, fileDownloaded);
return true;
}
/**
* Used to set the viability of a menu item.
* This method also does some null-checking so that neither menu nor the menu item are null
* in order to prevent nullpointer exceptions.
* @param menu The menu that should be used
* @param menuId The id of the menu item that will be used
* @param visibility The new visibility status of given menu item
* */
private static void setItemVisibility(Menu menu, int menuId, boolean visibility) {
if (menu == null) {
return;
}
MenuItem item = menu.findItem(menuId);
if (item != null) {
item.setVisible(visibility);
}
}
/**
* This method allows to replace to String of a menu item with a different one.
* @param menu Menu item that should be used
* @param id The id of the string that is going to be replaced.
* @param noMedia The id of the new String that is going to be used.
* */
public static void setItemTitle(Menu menu, int id, int noMedia){
MenuItem item = menu.findItem(id);
if (item != null) {
item.setTitle(noMedia);
}
}
/**
* The same method as {@link #onPrepareMenu(Menu, FeedItem)}, but lets the
* caller also specify a list of menu items that should not be shown.
*
* @param excludeIds Menu item that should be excluded
* @return true if selectedItem is not null.
*/
public static boolean onPrepareMenu(Menu menu, FeedItem selectedItem, int... excludeIds) {
if (menu == null || selectedItem == null ) {
return false;
}
boolean rc = onPrepareMenu(menu, selectedItem);
if (rc && excludeIds != null) {
for (int id : excludeIds) {
setItemVisibility(menu, id, false);
}
}
return rc;
}
/**
* Default menu handling for the given FeedItem.
*
* A Fragment instance, (rather than the more generic Context), is needed as a parameter
* to support some UI operations, e.g., creating a Snackbar.
*/
public static boolean onMenuItemClicked(@NonNull Fragment fragment, int menuItemId,
@NonNull FeedItem selectedItem) {
@NonNull Context context = fragment.requireContext();
switch (menuItemId) {
case R.id.skip_episode_item:
IntentUtils.sendLocalBroadcast(context, PlaybackService.ACTION_SKIP_CURRENT_EPISODE);
break;
case R.id.remove_item:
DBWriter.deleteFeedMediaOfItem(context, selectedItem.getMedia().getId());
break;
case R.id.remove_new_flag_item:
removeNewFlagWithUndo(fragment, selectedItem);
break;
case R.id.mark_read_item:
selectedItem.setPlayed(true);
DBWriter.markItemPlayed(selectedItem, FeedItem.PLAYED, true);
if (GpodnetPreferences.loggedIn()) {
FeedMedia media = selectedItem.getMedia();
// not all items have media, Gpodder only cares about those that do
if (media != null) {
EpisodeAction actionPlay = new EpisodeAction.Builder(selectedItem, EpisodeAction.PLAY)
.currentTimestamp()
.started(media.getDuration() / 1000)
.position(media.getDuration() / 1000)
.total(media.getDuration() / 1000)
.build();
SyncService.enqueueEpisodeAction(context, actionPlay);
}
}
break;
case R.id.mark_unread_item:
selectedItem.setPlayed(false);
DBWriter.markItemPlayed(selectedItem, FeedItem.UNPLAYED, false);
if (GpodnetPreferences.loggedIn() && selectedItem.getMedia() != null) {
EpisodeAction actionNew = new EpisodeAction.Builder(selectedItem, EpisodeAction.NEW)
.currentTimestamp()
.build();
SyncService.enqueueEpisodeAction(context, actionNew);
}
break;
case R.id.add_to_queue_item:
DBWriter.addQueueItem(context, selectedItem);
break;
case R.id.remove_from_queue_item:
DBWriter.removeQueueItem(context, true, selectedItem);
break;
case R.id.add_to_favorites_item:
DBWriter.addFavoriteItem(selectedItem);
break;
case R.id.remove_from_favorites_item:
DBWriter.removeFavoriteItem(selectedItem);
break;
case R.id.reset_position:
selectedItem.getMedia().setPosition(0);
if (PlaybackPreferences.getCurrentlyPlayingFeedMediaId() == selectedItem.getMedia().getId()) {
PlaybackPreferences.writeNoMediaPlaying();
IntentUtils.sendLocalBroadcast(context, PlaybackService.ACTION_SHUTDOWN_PLAYBACK_SERVICE);
}
DBWriter.markItemPlayed(selectedItem, FeedItem.UNPLAYED, true);
break;
case R.id.activate_auto_download:
selectedItem.setAutoDownload(true);
DBWriter.setFeedItemAutoDownload(selectedItem, true);
break;
case R.id.deactivate_auto_download:
selectedItem.setAutoDownload(false);
DBWriter.setFeedItemAutoDownload(selectedItem, false);
break;
case R.id.visit_website_item:
IntentUtils.openInBrowser(context, FeedItemUtil.getLinkWithFallback(selectedItem));
break;
case R.id.share_item:
ShareDialog shareDialog = ShareDialog.newInstance(selectedItem);
shareDialog.show((fragment.getActivity().getSupportFragmentManager()), "ShareEpisodeDialog");
break;
default:
Log.d(TAG, "Unknown menuItemId: " + menuItemId);
return false;
}
// Refresh menu state
return true;
}
/**
* Remove new flag with additional UI logic to allow undo with Snackbar.
*
* Undo is useful for Remove new flag, given there is no UI to undo it otherwise
* ,i.e., there is (context) menu item for add new flag
*/
public static void removeNewFlagWithUndo(@NonNull Fragment fragment, FeedItem item) {
if (item == null) {
return;
}
Log.d(TAG, "removeNewFlagWithUndo(" + item.getId() + ")");
// we're marking it as unplayed since the user didn't actually play it
// but they don't want it considered 'NEW' anymore
DBWriter.markItemPlayed(FeedItem.UNPLAYED, item.getId());
final Handler h = new Handler(fragment.requireContext().getMainLooper());
final Runnable r = () -> {
FeedMedia media = item.getMedia();
if (media != null && media.hasAlmostEnded() && UserPreferences.isAutoDelete()) {
DBWriter.deleteFeedMediaOfItem(fragment.requireContext(), media.getId());
}
};
Snackbar snackbar = ((MainActivity) fragment.getActivity()).showSnackbarAbovePlayer(
R.string.removed_new_flag_label, Snackbar.LENGTH_LONG)
.setAction(fragment.getString(R.string.undo), v -> {
DBWriter.markItemPlayed(FeedItem.NEW, item.getId());
// don't forget to cancel the thing that's going to remove the media
h.removeCallbacks(r);
});
h.postDelayed(r, (int) Math.ceil(snackbar.getDuration() * 1.05f));
}
}
| 1 | 17,035 | I think it would be more clear to have local-feed-hiding all in one place (bottom of this method?). Further up the method, there already is some code that hides the website icon, for example. | AntennaPod-AntennaPod | java |
@@ -199,14 +199,6 @@ class BackendMenuBuilder implements BackendMenuBuilderInterface
],
]);
- $menu->getChild('Maintenance')->addChild('Fixtures', [
- 'uri' => '',
- 'extras' => [
- 'name' => $t->trans('caption.fixtures_dummy_content'),
- 'icon' => 'fa-hat-wizard',
- ],
- ]);
-
$menu->getChild('Maintenance')->addChild('Clear the cache', [
'uri' => $this->urlGenerator->generate('bolt_clear_cache'),
'extras' => [ | 1 | <?php
declare(strict_types=1);
namespace Bolt\Menu;
use Bolt\Configuration\Config;
use Bolt\Content\ContentType;
use Bolt\Repository\ContentRepository;
use Bolt\Twig\ContentExtension;
use Knp\Menu\FactoryInterface;
use Knp\Menu\ItemInterface;
use Symfony\Component\Routing\Generator\UrlGeneratorInterface;
use Symfony\Contracts\Translation\TranslatorInterface;
class BackendMenuBuilder implements BackendMenuBuilderInterface
{
public const MAX_LATEST_RECORDS = 5;
/** @var FactoryInterface */
private $menuFactory;
/** @var Config */
private $config;
/** @var ContentRepository */
private $contentRepository;
/** @var UrlGeneratorInterface */
private $urlGenerator;
/** @var TranslatorInterface */
private $translator;
/** @var ContentExtension */
private $contentExtension;
public function __construct(
FactoryInterface $menuFactory,
Config $config,
ContentRepository $contentRepository,
UrlGeneratorInterface $urlGenerator,
TranslatorInterface $translator,
ContentExtension $contentExtension
) {
$this->menuFactory = $menuFactory;
$this->config = $config;
$this->contentRepository = $contentRepository;
$this->urlGenerator = $urlGenerator;
$this->translator = $translator;
$this->contentExtension = $contentExtension;
}
private function createAdminMenu(): ItemInterface
{
$t = $this->translator;
$menu = $this->menuFactory->createItem('root');
$menu->addChild('Dashboard', [
'uri' => $this->urlGenerator->generate('bolt_dashboard'),
'extras' => [
'name' => $t->trans('caption.dashboard'),
'icon' => 'fa-tachometer-alt',
],
]);
$menu->addChild('Content', ['extras' => [
'name' => $t->trans('caption.content'),
'type' => 'separator',
'icon' => 'fa-file',
]]);
/** @var ContentType[] $contentTypes */
$contentTypes = $this->config->get('contenttypes');
foreach ($contentTypes as $contentType) {
$menu->addChild($contentType->getSlug(), [
'uri' => $this->urlGenerator->generate('bolt_content_overview', ['contentType' => $contentType->getSlug()]),
'extras' => [
'name' => $contentType['name'],
'singular_name' => $contentType['singular_name'],
'slug' => $contentType->getSlug(),
'singular_slug' => $contentType['singular_slug'],
'icon' => $contentType['icon_many'],
'link_new' => $this->urlGenerator->generate('bolt_content_new', ['contentType' => $contentType->getSlug()]),
'singleton' => $contentType['singleton'],
'active' => $contentType->getSlug() === 'pages' ? true : false,
'submenu' => $this->getLatestRecords($contentType),
],
]);
}
$menu->addChild('Settings', ['extras' => [
'name' => $t->trans('caption.settings'),
'type' => 'separator',
'icon' => 'fa-wrench',
]]);
// Configuration submenu
$menu->addChild('Configuration', ['extras' => [
'name' => $t->trans('caption.configuration'),
'icon' => 'fa-sliders-h',
]]);
$menu->getChild('Configuration')->addChild('Users & Permissions', [
'uri' => $this->urlGenerator->generate('bolt_users'),
'extras' => [
'name' => $t->trans('caption.users_permissions'),
'icon' => 'fa-users',
],
]);
$menu->getChild('Configuration')->addChild('Main configuration', [
'uri' => $this->urlGenerator->generate('bolt_file_edit', [
'area' => 'config',
'file' => '/bolt/config.yaml',
]),
'extras' => [
'name' => $t->trans('caption.main_configuration'),
'icon' => 'fa-cog',
],
]);
$menu->getChild('Configuration')->addChild('ContentTypes', [
'uri' => $this->urlGenerator->generate('bolt_file_edit', [
'area' => 'config',
'file' => '/bolt/contenttypes.yaml',
]),
'extras' => [
'name' => $t->trans('caption.contenttypes'),
'icon' => 'fa-object-group',
],
]);
$menu->getChild('Configuration')->addChild('Taxonomies', [
'uri' => $this->urlGenerator->generate('bolt_file_edit', [
'area' => 'config',
'file' => '/bolt/taxonomy.yaml',
]),
'extras' => [
'name' => $t->trans('caption.taxonomies'),
'icon' => 'fa-tags',
],
]);
$menu->getChild('Configuration')->addChild('Menu set up', [
'uri' => $this->urlGenerator->generate('bolt_file_edit', [
'area' => 'config',
'file' => '/bolt/menu.yaml',
]),
'extras' => [
'name' => $t->trans('caption.menu_setup'),
'type' => 'separator',
'icon' => 'fa-list',
],
]);
$menu->getChild('Configuration')->addChild('Routing set up', [
'uri' => $this->urlGenerator->generate('bolt_file_edit', [
'area' => 'config',
'file' => '/routes.yaml',
]),
'extras' => [
'name' => $t->trans('caption.routing_setup'),
'icon' => 'fa-directions',
],
]);
$menu->getChild('Configuration')->addChild('All configuration files', [
'uri' => $this->urlGenerator->generate('bolt_filemanager', ['area' => 'config']),
'extras' => [
'name' => $t->trans('caption.all_configuration_files'),
'icon' => 'fa-cogs',
],
]);
// Maintenance submenu
$menu->addChild('Maintenance', ['extras' => [
'name' => $t->trans('caption.maintenance'),
'icon' => 'fa-tools',
]]);
$menu->getChild('Maintenance')->addChild('Bolt API', [
'uri' => $this->urlGenerator->generate('api_entrypoint'),
'extras' => [
'name' => $t->trans('caption.api'),
'icon' => 'fa-code',
],
]);
$menu->getChild('Maintenance')->addChild('Check database', [
'uri' => '',
'extras' => [
'name' => $t->trans('caption.check_database'),
'icon' => 'fa-database',
],
]);
$menu->getChild('Maintenance')->addChild('Fixtures', [
'uri' => '',
'extras' => [
'name' => $t->trans('caption.fixtures_dummy_content'),
'icon' => 'fa-hat-wizard',
],
]);
$menu->getChild('Maintenance')->addChild('Clear the cache', [
'uri' => $this->urlGenerator->generate('bolt_clear_cache'),
'extras' => [
'name' => $t->trans('caption.clear_cache'),
'icon' => 'fa-eraser',
],
]);
$menu->getChild('Maintenance')->addChild('Installation checks', [
'uri' => '',
'extras' => [
'name' => $t->trans('caption.installation_checks'),
'icon' => 'fa-clipboard-check',
],
]);
$menu->getChild('Maintenance')->addChild('Translations: Messages', [
'uri' => $this->urlGenerator->generate('translation_index'),
'extras' => [
'name' => $t->trans('caption.translations'),
'icon' => 'fa-language',
],
]);
$menu->getChild('Maintenance')->addChild('Extensions', [
'uri' => '',
'extras' => [
'name' => $t->trans('caption.extensions'),
'icon' => 'fa-plug',
],
]);
// @todo When we're close to stable release, make this less prominent
$menu->getChild('Maintenance')->addChild('The Kitchensink', [
'uri' => $this->urlGenerator->generate('bolt_kitchensink'),
'extras' => [
'name' => $t->trans('caption.kitchensink'),
'icon' => 'fa-bath',
],
]);
$menu->getChild('Maintenance')->addChild('About Bolt', [
'uri' => $this->urlGenerator->generate('bolt_about'),
'extras' => [
'name' => $t->trans('caption.about_bolt'),
'icon' => 'fa-award',
],
]);
// File Management submenu
$menu->addChild('File Management', ['extras' => [
'name' => $t->trans('caption.file_management'),
'icon' => 'fa-folder-open',
]]);
$menu->getChild('File Management')->addChild('Uploaded files', [
'uri' => $this->urlGenerator->generate('bolt_filemanager', ['area' => 'files']),
'extras' => [
'name' => $t->trans('caption.uploaded_files'),
'icon' => 'fa-archive',
],
]);
$menu->getChild('File Management')->addChild('View/edit Templates', [
'uri' => $this->urlGenerator->generate('bolt_filemanager', ['area' => 'themes']),
'extras' => [
'name' => $t->trans('caption.view_edit_templates'),
'icon' => 'fa-scroll',
],
]);
return $menu;
}
private function getLatestRecords(ContentType $contentType): array
{
$records = $this->contentRepository->findLatest($contentType, self::MAX_LATEST_RECORDS);
$result = [];
foreach ($records as $record) {
$result[] = [
'id' => $record->getId(),
'name' => $this->contentExtension->getTitle($record),
'link' => $this->contentExtension->getLink($record),
'editLink' => $this->contentExtension->getEditLink($record),
'icon' => $record->getIcon(),
];
}
return $result;
}
public function buildAdminMenu(): array
{
$menu = $this->createAdminMenu()->getChildren();
$menuData = [];
foreach ($menu as $child) {
$submenu = [];
if ($child->hasChildren()) {
foreach ($child->getChildren() as $submenuChild) {
$submenu[] = [
'name' => $submenuChild->getExtra('name') ?: $submenuChild->getLabel(),
'icon' => $submenuChild->getExtra('icon'),
'editLink' => $submenuChild->getUri(),
'active' => $submenuChild->getExtra('active'),
];
}
} else {
$submenu = $child->getExtra('submenu');
}
$menuData[] = [
'name' => $child->getExtra('name') ?: $child->getLabel(),
'singular_name' => $child->getExtra('singular_name'),
'slug' => $child->getExtra('slug'),
'singular_slug' => $child->getExtra('singular_slug'),
'icon' => $child->getExtra('icon'),
'link' => $child->getUri(),
'link_new' => $child->getExtra('link_new'),
'singleton' => $child->getExtra('singleton'),
'type' => $child->getExtra('type'),
'active' => $child->getExtra('active'),
'submenu' => $submenu,
];
}
return $menuData;
}
}
| 1 | 11,654 | Why remove this one? It doesn't work yet, but we'll add it sooner or later. | bolt-core | php |
@@ -304,12 +304,12 @@ type docstruct struct {
DocstoreRevision interface{}
Etag interface{}
- I int `docstore:"i"`
- U uint `docstore:"u"`
- F float64 `docstore:"f"`
- St string `docstore:"st"`
- B bool `docstore:"b"`
- M map[string]interface{} `docstore:"m"`
+ I int
+ U uint
+ F float64
+ St string
+ B bool
+ M map[string]interface{}
}
func nonexistentDoc() docmap { return docmap{KeyField: "doesNotExist"} } | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package drivertest provides a conformance test for implementations of
// driver.
package drivertest // import "gocloud.dev/docstore/drivertest"
import (
"context"
"errors"
"fmt"
"io"
"math"
"reflect"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"gocloud.dev/docstore"
ds "gocloud.dev/docstore"
"gocloud.dev/docstore/driver"
"gocloud.dev/gcerrors"
)
// Harness descibes the functionality test harnesses must provide to run
// conformance tests.
type Harness interface {
// MakeCollection makes a driver.Collection for testing.
// The collection should have a single primary key field of type string named
// drivertest.KeyField.
MakeCollection(context.Context) (driver.Collection, error)
// MakeTwoKeyCollection makes a driver.Collection for testing.
// The collection will consist entirely of HighScore structs (see below), whose
// two primary key fields are "Game" and "Player", both strings. Use
// drivertest.HighScoreKey as the key function.
MakeTwoKeyCollection(ctx context.Context) (driver.Collection, error)
// MakeAlternateRevisionFieldCollection makes a driver.Collection for testing.
// The collection should behave like the one returned from MakeCOllection, except
// that the revision field should be drivertest.AlternateRevisionField.
MakeAlternateRevisionFieldCollection(context.Context) (driver.Collection, error)
// BeforeDoTypes should return a list of values whose types are valid for the as
// function given to BeforeDo. For example, if the provider converts Get actions
// to *GetRequests and write actions to *WriteRequests, then BeforeDoTypes should
// return []interface{}{&GetRequest{}, &WriteRequest{}}.
// TODO(jba): consider splitting these by action kind.
BeforeDoTypes() []interface{}
// BeforeQueryTypes should return a list of values whose types are valid for the as
// function given to BeforeQuery.
BeforeQueryTypes() []interface{}
// Close closes resources used by the harness.
Close()
}
// HarnessMaker describes functions that construct a harness for running tests.
// It is called exactly once per test; Harness.Close() will be called when the test is complete.
type HarnessMaker func(ctx context.Context, t *testing.T) (Harness, error)
// UnsupportedType is an enum for types not supported by native codecs. We chose
// to describe this negatively (types that aren't supported rather than types
// that are) to make the more inclusive cases easier to write. A driver can
// return nil for CodecTester.UnsupportedTypes, then add values from this enum
// one by one until all tests pass.
type UnsupportedType int
// These are known unsupported types by one or more driver. Each of them
// corresponses to an unsupported type specific test which if the driver
// actually supports.
const (
// Native codec doesn't support any unsigned integer type
Uint UnsupportedType = iota
// Native codec doesn't support arrays
Arrays
// Native codec doesn't support full time precision
NanosecondTimes
// Native codec doesn't support [][]byte
BinarySet
)
// CodecTester describes functions that encode and decode values using both the
// docstore codec for a provider, and that provider's own "native" codec.
type CodecTester interface {
UnsupportedTypes() []UnsupportedType
NativeEncode(interface{}) (interface{}, error)
NativeDecode(value, dest interface{}) error
DocstoreEncode(interface{}) (interface{}, error)
DocstoreDecode(value, dest interface{}) error
}
// AsTest represents a test of As functionality.
type AsTest interface {
// Name should return a descriptive name for the test.
Name() string
// CollectionCheck will be called to allow verification of Collection.As.
CollectionCheck(coll *docstore.Collection) error
// QueryCheck will be called after calling Query. It should call it.As and
// verify the results.
QueryCheck(it *docstore.DocumentIterator) error
// ErrorCheck is called to allow verification of Collection.ErrorAs.
ErrorCheck(c *docstore.Collection, err error) error
}
type verifyAsFailsOnNil struct{}
func (verifyAsFailsOnNil) Name() string {
return "verify As returns false when passed nil"
}
func (verifyAsFailsOnNil) CollectionCheck(coll *docstore.Collection) error {
if coll.As(nil) {
return errors.New("want Collection.As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) QueryCheck(it *docstore.DocumentIterator) error {
if it.As(nil) {
return errors.New("want DocumentIterator.As to return false when passed nil")
}
return nil
}
func (v verifyAsFailsOnNil) ErrorCheck(c *docstore.Collection, err error) (ret error) {
defer func() {
if recover() == nil {
ret = errors.New("want ErrorAs to panic when passed nil")
}
}()
c.ErrorAs(err, nil)
return nil
}
// RunConformanceTests runs conformance tests for provider implementations of docstore.
func RunConformanceTests(t *testing.T, newHarness HarnessMaker, ct CodecTester, asTests []AsTest) {
t.Run("TypeDrivenCodec", func(t *testing.T) { testTypeDrivenDecode(t, ct) })
t.Run("BlindCodec", func(t *testing.T) { testBlindDecode(t, ct) })
t.Run("Create", func(t *testing.T) { withCollection(t, newHarness, testCreate) })
t.Run("Put", func(t *testing.T) { withCollection(t, newHarness, testPut) })
t.Run("Replace", func(t *testing.T) { withCollection(t, newHarness, testReplace) })
t.Run("Get", func(t *testing.T) { withCollection(t, newHarness, testGet) })
t.Run("Delete", func(t *testing.T) { withCollection(t, newHarness, testDelete) })
t.Run("Update", func(t *testing.T) { withCollection(t, newHarness, testUpdate) })
t.Run("Data", func(t *testing.T) { withCollection(t, newHarness, testData) })
t.Run("MultipleActions", func(t *testing.T) { withCollection(t, newHarness, testMultipleActions) })
t.Run("UnorderedActions", func(t *testing.T) { withCollection(t, newHarness, testUnorderedActions) })
t.Run("GetQueryKeyField", func(t *testing.T) { withCollection(t, newHarness, testGetQueryKeyField) })
t.Run("GetQuery", func(t *testing.T) { withTwoKeyCollection(t, newHarness, testGetQuery) })
t.Run("DeleteQuery", func(t *testing.T) { withTwoKeyCollection(t, newHarness, testDeleteQuery) })
t.Run("UpdateQuery", func(t *testing.T) { withTwoKeyCollection(t, newHarness, testUpdateQuery) })
t.Run("BeforeDo", func(t *testing.T) { testBeforeDo(t, newHarness) })
t.Run("BeforeQuery", func(t *testing.T) { testBeforeQuery(t, newHarness) })
asTests = append(asTests, verifyAsFailsOnNil{})
t.Run("As", func(t *testing.T) {
for _, st := range asTests {
if st.Name() == "" {
t.Fatalf("AsTest.Name is required")
}
t.Run(st.Name(), func(t *testing.T) {
withTwoKeyCollection(t, newHarness, func(t *testing.T, coll *docstore.Collection) {
testAs(t, coll, st)
})
})
}
})
}
func withHarnessAndCollection(t *testing.T, newHarness HarnessMaker, f func(*testing.T, context.Context, Harness, *ds.Collection)) {
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
dc, err := h.MakeCollection(ctx)
if err != nil {
t.Fatal(err)
}
coll := ds.NewCollection(dc)
defer coll.Close()
clearCollection(t, coll)
f(t, ctx, h, coll)
}
func withCollection(t *testing.T, newHarness HarnessMaker, f func(*testing.T, *ds.Collection, string)) {
withHarnessAndCollection(t, newHarness, func(t *testing.T, ctx context.Context, h Harness, coll *ds.Collection) {
t.Run("StdRev", func(t *testing.T) { f(t, coll, ds.DefaultRevisionField) })
dc, err := h.MakeAlternateRevisionFieldCollection(ctx)
if err != nil {
t.Fatal(err)
}
coll = ds.NewCollection(dc)
defer coll.Close()
clearCollection(t, coll)
t.Run("AltRev", func(t *testing.T) { f(t, coll, AlternateRevisionField) })
})
}
func withTwoKeyCollection(t *testing.T, newHarness HarnessMaker, f func(*testing.T, *ds.Collection)) {
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
dc, err := h.MakeTwoKeyCollection(ctx)
if err != nil {
t.Fatal(err)
}
coll := ds.NewCollection(dc)
defer coll.Close()
clearCollection(t, coll)
f(t, coll)
}
// KeyField is the primary key field for the main test collection.
const KeyField = "name"
// AlternateRevisionField is used for testing the option to provide a different
// name for the revision field.
const AlternateRevisionField = "Etag"
type docmap = map[string]interface{}
func newDoc(doc interface{}) interface{} {
switch v := doc.(type) {
case docmap:
return docmap{KeyField: v[KeyField]}
case *docstruct:
return &docstruct{Name: v.Name}
}
return nil
}
func key(doc interface{}) interface{} {
switch d := doc.(type) {
case docmap:
return d[KeyField]
case *docstruct:
return d.Name
}
return nil
}
func setKey(doc, key interface{}) {
switch d := doc.(type) {
case docmap:
d[KeyField] = key
case *docstruct:
d.Name = key
}
}
func revision(doc interface{}, revField string) interface{} {
switch d := doc.(type) {
case docmap:
return d[revField]
case *docstruct:
if revField == docstore.DefaultRevisionField {
return d.DocstoreRevision
}
return d.Etag
}
return nil
}
func setRevision(doc, rev interface{}, revField string) {
switch d := doc.(type) {
case docmap:
d[revField] = rev
case *docstruct:
if revField == docstore.DefaultRevisionField {
d.DocstoreRevision = rev
} else {
d.Etag = rev
}
}
}
type docstruct struct {
Name interface{} `docstore:"name"`
DocstoreRevision interface{}
Etag interface{}
I int `docstore:"i"`
U uint `docstore:"u"`
F float64 `docstore:"f"`
St string `docstore:"st"`
B bool `docstore:"b"`
M map[string]interface{} `docstore:"m"`
}
func nonexistentDoc() docmap { return docmap{KeyField: "doesNotExist"} }
func testCreate(t *testing.T, coll *ds.Collection, revField string) {
ctx := context.Background()
for _, tc := range []struct {
name string
doc interface{}
wantErr gcerrors.ErrorCode
}{
{
name: "named map",
doc: docmap{KeyField: "testCreateMap", "b": true},
},
{
name: "existing",
doc: docmap{KeyField: "testCreateMap"},
wantErr: gcerrors.AlreadyExists,
},
{
name: "unnamed map",
doc: docmap{"b": true},
},
{
name: "named struct",
doc: &docstruct{Name: "testCreateStruct", B: true},
},
{
name: "unnamed struct",
doc: &docstruct{B: true},
},
{
name: "with revision",
doc: docmap{KeyField: "testCreate2", revField: 0},
wantErr: gcerrors.InvalidArgument,
},
} {
t.Run(tc.name, func(t *testing.T) {
if tc.wantErr == gcerrors.OK {
checkNoRevisionField(t, tc.doc, revField)
if err := coll.Create(ctx, tc.doc); err != nil {
t.Fatalf("Create: %v", err)
}
checkHasRevisionField(t, tc.doc, revField)
got := newDoc(tc.doc)
if err := coll.Get(ctx, got); err != nil {
t.Fatalf("Get: %v", err)
}
if diff := cmpDiff(got, tc.doc); diff != "" {
t.Fatal(diff)
}
} else {
err := coll.Create(ctx, tc.doc)
checkCode(t, err, tc.wantErr)
}
})
}
}
func testPut(t *testing.T, coll *ds.Collection, revField string) {
ctx := context.Background()
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
var maprev, strmap interface{}
for _, tc := range []struct {
name string
doc interface{}
rev bool
}{
{
name: "create map",
doc: docmap{KeyField: "testPutMap", "b": true},
},
{
name: "create struct",
doc: &docstruct{Name: "testPutStruct", B: true},
},
{
name: "replace map",
doc: docmap{KeyField: "testPutMap", "b": false},
rev: true,
},
{
name: "replace struct",
doc: &docstruct{Name: "testPutStruct", B: false},
rev: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
checkNoRevisionField(t, tc.doc, revField)
must(coll.Put(ctx, tc.doc))
checkHasRevisionField(t, tc.doc, revField)
got := newDoc(tc.doc)
must(coll.Get(ctx, got))
if diff := cmpDiff(got, tc.doc); diff != "" {
t.Fatalf(diff)
}
if tc.rev {
switch v := tc.doc.(type) {
case docmap:
maprev = v[revField]
case *docstruct:
if revField == docstore.DefaultRevisionField {
strmap = v.DocstoreRevision
} else {
strmap = v.Etag
}
}
}
})
}
// Putting a doc with a revision field is the same as replace, meaning
// it will fail if the document doesn't exist.
for _, tc := range []struct {
name string
doc interface{}
}{
{
name: "replace map wrong key",
doc: docmap{KeyField: "testPutMap2", revField: maprev},
},
{
name: "replace struct wrong key",
doc: &docstruct{Name: "testPutStruct2", DocstoreRevision: strmap, Etag: strmap},
},
} {
t.Run(tc.name, func(t *testing.T) {
err := coll.Put(ctx, tc.doc)
if c := gcerrors.Code(err); c != gcerrors.NotFound && c != gcerrors.FailedPrecondition {
t.Errorf("got %v, want NotFound or FailedPrecondition", err)
}
})
}
t.Run("revision", func(t *testing.T) {
testRevisionField(t, coll, revField, func(doc interface{}) error {
return coll.Put(ctx, doc)
})
})
}
func testReplace(t *testing.T, coll *ds.Collection, revField string) {
ctx := context.Background()
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
for _, tc := range []struct {
name string
doc1, doc2 interface{}
}{
{
name: "replace map",
doc1: docmap{KeyField: "testReplaceMap", "s": "a"},
doc2: docmap{KeyField: "testReplaceMap", "s": "b"},
},
{
name: "replace struct",
doc1: &docstruct{Name: "testReplaceStruct", St: "a"},
doc2: &docstruct{Name: "testReplaceStruct", St: "b"},
},
} {
t.Run(tc.name, func(t *testing.T) {
must(coll.Put(ctx, tc.doc1))
checkNoRevisionField(t, tc.doc2, revField)
must(coll.Replace(ctx, tc.doc2))
checkHasRevisionField(t, tc.doc2, revField)
got := newDoc(tc.doc2)
must(coll.Get(ctx, got))
if diff := cmpDiff(got, tc.doc2); diff != "" {
t.Fatalf(diff)
}
})
}
// Can't replace a nonexistent doc.
checkCode(t, coll.Replace(ctx, nonexistentDoc()), gcerrors.NotFound)
t.Run("revision", func(t *testing.T) {
testRevisionField(t, coll, revField, func(doc interface{}) error {
return coll.Replace(ctx, doc)
})
})
}
// Check that doc does not have a revision field (or has a nil one).
func checkNoRevisionField(t *testing.T, doc interface{}, revField string) {
t.Helper()
ddoc, err := driver.NewDocument(doc)
if err != nil {
t.Fatal(err)
}
if rev, _ := ddoc.GetField(revField); rev != nil {
t.Fatal("doc has revision field")
}
}
// Check that doc has a non-nil revision field.
func checkHasRevisionField(t *testing.T, doc interface{}, revField string) {
t.Helper()
ddoc, err := driver.NewDocument(doc)
if err != nil {
t.Fatal(err)
}
if rev, err := ddoc.GetField(revField); err != nil || rev == nil {
t.Fatalf("doc missing revision field (error = %v)", err)
}
}
func testGet(t *testing.T, coll *ds.Collection, revField string) {
ctx := context.Background()
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
for _, tc := range []struct {
name string
doc interface{}
fps []docstore.FieldPath
want interface{}
}{
// If Get is called with no field paths, the full document is populated.
{
name: "get map",
doc: docmap{
KeyField: "testGetMap",
"s": "a string",
"i": int64(95),
"f": 32.3,
"m": map[string]interface{}{"a": "one", "b": "two"},
},
},
{
name: "get struct",
doc: &docstruct{
Name: "testGetStruct",
St: "a string",
I: 95,
F: 32.3,
M: map[string]interface{}{"a": "one", "b": "two"},
},
},
// If Get is called with field paths, the resulting document has only those fields.
{
name: "get map with field path",
doc: docmap{
KeyField: "testGetMapFP",
"s": "a string",
"i": int64(95),
"f": 32.3,
"m": map[string]interface{}{"a": "one", "b": "two"},
},
fps: []docstore.FieldPath{"f", "m.b"},
want: docmap{
KeyField: "testGetMapFP",
"f": 32.3,
"m": map[string]interface{}{"b": "two"},
},
},
{
name: "get struct with field path",
doc: &docstruct{
Name: "testGetStruct",
St: "a string",
I: 95,
F: 32.3,
M: map[string]interface{}{"a": "one", "b": "two"},
},
fps: []docstore.FieldPath{"st", "m.a"},
want: &docstruct{
Name: "testGetStruct",
St: "a string",
M: map[string]interface{}{"a": "one"},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
must(coll.Put(ctx, tc.doc))
got := newDoc(tc.doc)
must(coll.Get(ctx, got, tc.fps...))
if tc.want == nil {
tc.want = tc.doc
}
setRevision(tc.want, revision(got, revField), revField)
if diff := cmpDiff(got, tc.want); diff != "" {
t.Error("Get with field paths:\n", diff)
}
})
}
err := coll.Get(ctx, nonexistentDoc())
checkCode(t, err, gcerrors.NotFound)
}
func testDelete(t *testing.T, coll *ds.Collection, revField string) {
ctx := context.Background()
var rev interface{}
for _, tc := range []struct {
name string
doc interface{}
wantErr gcerrors.ErrorCode
}{
{
name: "delete map",
doc: docmap{KeyField: "testDeleteMap"},
},
{
name: "delete map wrong rev",
doc: docmap{KeyField: "testDeleteMap", "b": true},
wantErr: gcerrors.FailedPrecondition,
},
{
name: "delete struct",
doc: &docstruct{Name: "testDeleteStruct"},
},
{
name: "delete struct wrong rev",
doc: &docstruct{Name: "testDeleteStruct", B: true},
wantErr: gcerrors.FailedPrecondition,
},
} {
t.Run(tc.name, func(t *testing.T) {
if err := coll.Put(ctx, tc.doc); err != nil {
t.Fatal(err)
}
if tc.wantErr == gcerrors.OK {
rev = revision(tc.doc, revField)
if err := coll.Delete(ctx, tc.doc); err != nil {
t.Fatal(err)
}
// The document should no longer exist.
if err := coll.Get(ctx, tc.doc); err == nil {
t.Error("want error, got nil")
}
} else {
setRevision(tc.doc, rev, revField)
checkCode(t, coll.Delete(ctx, tc.doc), gcerrors.FailedPrecondition)
}
})
}
// Delete doesn't fail if the doc doesn't exist.
if err := coll.Delete(ctx, nonexistentDoc()); err != nil {
t.Errorf("delete nonexistent doc: want nil, got %v", err)
}
}
func testUpdate(t *testing.T, coll *ds.Collection, revField string) {
// TODO(jba): test an increment-only update.
ctx := context.Background()
for _, tc := range []struct {
name string
doc interface{}
mods ds.Mods
want interface{}
}{
{
name: "update map",
doc: docmap{KeyField: "testUpdateMap", "a": "A", "b": "B", "n": 3.5, "i": 1},
mods: ds.Mods{
"a": "X",
"b": nil,
"c": "C",
"n": docstore.Increment(-1),
"i": docstore.Increment(2.5),
"m": docstore.Increment(3),
},
want: docmap{KeyField: "testUpdateMap", "a": "X", "c": "C", "n": 2.5, "i": 3.5, "m": int64(3)},
},
{
name: "update struct",
doc: &docstruct{Name: "testUpdateStruct", St: "st", I: 1, F: 3.5},
mods: ds.Mods{
"st": "str",
"i": nil,
"u": docstore.Increment(4),
"f": docstore.Increment(-3),
},
want: &docstruct{Name: "testUpdateStruct", St: "str", U: 4, F: 0.5},
},
} {
t.Run(tc.name, func(t *testing.T) {
if err := coll.Put(ctx, tc.doc); err != nil {
t.Fatal(err)
}
setRevision(tc.doc, nil, revField)
got := newDoc(tc.doc)
checkNoRevisionField(t, tc.doc, revField)
errs := coll.Actions().Update(tc.doc, tc.mods).Get(got).Do(ctx)
if errs != nil {
t.Fatal(errs)
}
checkHasRevisionField(t, tc.doc, revField)
setRevision(tc.want, revision(got, revField), revField)
if diff := cmp.Diff(got, tc.want); diff != "" {
t.Error(diff)
}
})
}
// Can't update a nonexistent doc.
if err := coll.Update(ctx, nonexistentDoc(), ds.Mods{"x": "y"}); err == nil {
t.Error("nonexistent document: got nil, want error")
}
// Bad increment value.
err := coll.Update(ctx, docmap{KeyField: "update invalid"}, ds.Mods{"x": ds.Increment("3")})
checkCode(t, err, gcerrors.InvalidArgument)
t.Run("revision", func(t *testing.T) {
testRevisionField(t, coll, revField, func(doc interface{}) error {
return coll.Update(ctx, doc, ds.Mods{"s": "c"})
})
})
}
// Test that:
// - Writing a document with a revision field succeeds if the document hasn't changed.
// - Writing a document with a revision field fails if the document has changed.
func testRevisionField(t *testing.T, coll *ds.Collection, revField string, write func(interface{}) error) {
ctx := context.Background()
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
for _, tc := range []struct {
name string
doc interface{}
}{
{
name: "map revision",
doc: docmap{KeyField: "testRevisionMap", "s": "a"},
},
{
name: "struct revision",
doc: &docstruct{Name: "testRevisionStruct", St: "a"},
},
} {
t.Run(tc.name, func(t *testing.T) {
must(coll.Put(ctx, tc.doc))
got := newDoc(tc.doc)
must(coll.Get(ctx, got))
rev := revision(got, revField)
if rev == nil {
t.Fatal("missing revision field")
}
// A write should succeed, because the document hasn't changed since it was gotten.
if err := write(tc.doc); err != nil {
t.Fatalf("write with revision field got %v, want nil", err)
}
// This write should fail: got's revision field hasn't changed, but the stored document has.
err := write(got)
if c := gcerrors.Code(err); c != gcerrors.FailedPrecondition && c != gcerrors.NotFound {
t.Errorf("write with old revision field: got %v, wanted FailedPrecondition or NotFound", err)
}
})
}
}
func testData(t *testing.T, coll *ds.Collection, revField string) {
// All Go integer types are supported, but they all come back as int64.
ctx := context.Background()
for _, test := range []struct {
in, want interface{}
}{
{int(-1), int64(-1)},
{int8(-8), int64(-8)},
{int16(-16), int64(-16)},
{int32(-32), int64(-32)},
{int64(-64), int64(-64)},
{uint(1), int64(1)},
{uint8(8), int64(8)},
{uint16(16), int64(16)},
{uint32(32), int64(32)},
{uint64(64), int64(64)},
{float32(3.5), float64(3.5)},
{[]byte{0, 1, 2}, []byte{0, 1, 2}},
} {
doc := docmap{KeyField: "testData", "val": test.in}
got := docmap{KeyField: doc[KeyField]}
if errs := coll.Actions().Put(doc).Get(got).Do(ctx); errs != nil {
t.Fatal(errs)
}
want := docmap{
"val": test.want,
KeyField: doc[KeyField],
revField: got[revField],
}
if len(got) != len(want) {
t.Errorf("%v: got %v, want %v", test.in, got, want)
} else if g := got["val"]; !cmp.Equal(g, test.want) {
t.Errorf("%v: got %v (%T), want %v (%T)", test.in, g, g, test.want, test.want)
}
}
// TODO: strings: valid vs. invalid unicode
}
var (
// A time with non-zero milliseconds, but zero nanoseconds.
milliTime = time.Date(2019, time.March, 27, 0, 0, 0, 5*1e6, time.UTC)
// A time with non-zero nanoseconds.
nanoTime = time.Date(2019, time.March, 27, 0, 0, 0, 5*1e6+7, time.UTC)
)
// Test that encoding from a struct and then decoding into the same struct works properly.
// The decoding is "type-driven" because the decoder knows the expected type of the value
// it is decoding--it is the type of a struct field.
func testTypeDrivenDecode(t *testing.T, ct CodecTester) {
if ct == nil {
t.Skip("no CodecTester")
}
check := func(in, dec interface{}, encode func(interface{}) (interface{}, error), decode func(interface{}, interface{}) error) {
t.Helper()
enc, err := encode(in)
if err != nil {
t.Fatalf("%+v", err)
}
if err := decode(enc, dec); err != nil {
t.Fatalf("%+v", err)
}
if diff := cmp.Diff(in, dec); diff != "" {
t.Error(diff)
}
}
s := "bar"
dsrt := &docstoreRoundTrip{
N: nil,
I: 1,
U: 2,
F: 2.5,
St: "foo",
B: true,
L: []int{3, 4, 5},
A: [2]int{6, 7},
M: map[string]bool{"a": true, "b": false},
By: []byte{6, 7, 8},
P: &s,
T: milliTime,
}
check(dsrt, &docstoreRoundTrip{}, ct.DocstoreEncode, ct.DocstoreDecode)
// Test native-to-docstore and docstore-to-native round trips with a smaller set
// of types.
nm := &nativeMinimal{
N: nil,
I: 1,
F: 2.5,
St: "foo",
B: true,
L: []int{3, 4, 5},
M: map[string]bool{"a": true, "b": false},
By: []byte{6, 7, 8},
P: &s,
T: milliTime,
LF: []float64{18.8, -19.9, 20},
LS: []string{"foo", "bar"},
}
check(nm, &nativeMinimal{}, ct.DocstoreEncode, ct.NativeDecode)
check(nm, &nativeMinimal{}, ct.NativeEncode, ct.DocstoreDecode)
// Test various other types, unless they are unsupported.
unsupported := map[UnsupportedType]bool{}
for _, u := range ct.UnsupportedTypes() {
unsupported[u] = true
}
// Unsigned integers.
if !unsupported[Uint] {
type Uint struct {
U uint
}
u := &Uint{10}
check(u, &Uint{}, ct.DocstoreEncode, ct.NativeDecode)
check(u, &Uint{}, ct.NativeEncode, ct.DocstoreDecode)
}
// Arrays.
if !unsupported[Arrays] {
type Arrays struct {
A [2]int
}
a := &Arrays{[2]int{13, 14}}
check(a, &Arrays{}, ct.DocstoreEncode, ct.NativeDecode)
check(a, &Arrays{}, ct.NativeEncode, ct.DocstoreDecode)
}
// Nanosecond-precision time.
type NT struct {
T time.Time
}
nt := &NT{nanoTime}
if unsupported[NanosecondTimes] {
// Expect rounding to the nearest millisecond.
check := func(encode func(interface{}) (interface{}, error), decode func(interface{}, interface{}) error) {
enc, err := encode(nt)
if err != nil {
t.Fatalf("%+v", err)
}
var got NT
if err := decode(enc, &got); err != nil {
t.Fatalf("%+v", err)
}
want := nt.T.Round(time.Millisecond)
if !got.T.Equal(want) {
t.Errorf("got %v, want %v", got.T, want)
}
}
check(ct.DocstoreEncode, ct.NativeDecode)
check(ct.NativeEncode, ct.DocstoreDecode)
} else {
// Expect perfect round-tripping of nanosecond times.
check(nt, &NT{}, ct.DocstoreEncode, ct.NativeDecode)
check(nt, &NT{}, ct.NativeEncode, ct.DocstoreDecode)
}
// Binary sets.
if !unsupported[BinarySet] {
type BinarySet struct {
B [][]byte
}
b := &BinarySet{[][]byte{{15}, {16}, {17}}}
check(b, &BinarySet{}, ct.DocstoreEncode, ct.NativeDecode)
check(b, &BinarySet{}, ct.NativeEncode, ct.DocstoreDecode)
}
}
// Test decoding into an interface{}, where the decoder doesn't know the type of the
// result and must return some Go type that accurately represents the value.
// This is implemented by the AsInterface method of driver.Decoder.
// Since it's fine for different providers to return different types in this case,
// each test case compares against a list of possible values.
func testBlindDecode(t *testing.T, ct CodecTester) {
if ct == nil {
t.Skip("no CodecTester")
}
t.Run("DocstoreEncode", func(t *testing.T) { testBlindDecode1(t, ct.DocstoreEncode, ct.DocstoreDecode) })
t.Run("NativeEncode", func(t *testing.T) { testBlindDecode1(t, ct.NativeEncode, ct.DocstoreDecode) })
}
func testBlindDecode1(t *testing.T, encode func(interface{}) (interface{}, error), decode func(_, _ interface{}) error) {
// Encode and decode expect a document, so use this struct to hold the values.
type S struct{ X interface{} }
for _, test := range []struct {
in interface{} // the value to be encoded
want interface{} // one possibility
want2 interface{} // a second possibility
}{
{in: nil, want: nil},
{in: true, want: true},
{in: "foo", want: "foo"},
{in: 'c', want: 'c', want2: int64('c')},
{in: int(3), want: int32(3), want2: int64(3)},
{in: int8(3), want: int32(3), want2: int64(3)},
{in: int(-3), want: int32(-3), want2: int64(-3)},
{in: int64(math.MaxInt32 + 1), want: int64(math.MaxInt32 + 1)},
{in: float32(1.5), want: float64(1.5)},
{in: float64(1.5), want: float64(1.5)},
{in: []byte{1, 2}, want: []byte{1, 2}},
{in: []int{1, 2},
want: []interface{}{int32(1), int32(2)},
want2: []interface{}{int64(1), int64(2)}},
{in: []float32{1.5, 2.5}, want: []interface{}{float64(1.5), float64(2.5)}},
{in: []float64{1.5, 2.5}, want: []interface{}{float64(1.5), float64(2.5)}},
{in: milliTime, want: milliTime, want2: "2019-03-27T00:00:00.005Z"},
{in: []time.Time{milliTime},
want: []interface{}{milliTime},
want2: []interface{}{"2019-03-27T00:00:00.005Z"},
},
{in: map[string]int{"a": 1},
want: map[string]interface{}{"a": int64(1)},
want2: map[string]interface{}{"a": int32(1)},
},
{in: map[string][]byte{"a": {1, 2}}, want: map[string]interface{}{"a": []byte{1, 2}}},
} {
enc, err := encode(&S{test.in})
if err != nil {
t.Fatalf("encoding %T: %v", test.in, err)
}
var got S
if err := decode(enc, &got); err != nil {
t.Fatalf("decoding %T: %v", test.in, err)
}
matched := false
wants := []interface{}{test.want}
if test.want2 != nil {
wants = append(wants, test.want2)
}
for _, w := range wants {
if cmp.Equal(got.X, w) {
matched = true
break
}
}
if !matched {
t.Errorf("%T: got %#v (%T), not equal to %#v or %#v", test.in, got.X, got.X, test.want, test.want2)
}
}
}
// A round trip with the docstore codec should work for all docstore-supported types,
// regardless of native driver support.
type docstoreRoundTrip struct {
N *int
I int
U uint
F float64
St string
B bool
By []byte
L []int
A [2]int
M map[string]bool
P *string
T time.Time
}
// TODO(jba): add more fields: structs; embedding.
// All native codecs should support these types. If one doesn't, remove it from this
// struct and make a new single-field struct for it.
type nativeMinimal struct {
N *int
I int
F float64
St string
B bool
By []byte
L []int
M map[string]bool
P *string
T time.Time
LF []float64
LS []string
}
// The following is the schema for the collection used for query testing.
// It is loosely borrowed from the DynamoDB documentation.
// It is rich enough to require indexes for some providers.
// A HighScore records one user's high score in a particular game.
// The primary key fields are Game and Player.
type HighScore struct {
Game string
Player string
Score int
Time time.Time
DocstoreRevision interface{}
}
func newHighScore() interface{} { return &HighScore{} }
// HighScoreKey constructs a single primary key from a HighScore struct
// by concatenating the Game and Player fields.
func HighScoreKey(doc docstore.Document) interface{} {
h := doc.(*HighScore)
return h.Game + "|" + h.Player
}
func (h *HighScore) String() string {
return fmt.Sprintf("%s|%s=%d@%s", h.Game, h.Player, h.Score, h.Time.Format("01/02"))
}
func date(month, day int) time.Time {
return time.Date(2019, time.Month(month), day, 0, 0, 0, 0, time.UTC)
}
const (
game1 = "Praise All Monsters"
game2 = "Zombie DMV"
game3 = "Days Gone"
)
var queryDocuments = []*HighScore{
{game1, "pat", 49, date(3, 13), nil},
{game1, "mel", 60, date(4, 10), nil},
{game1, "andy", 81, date(2, 1), nil},
{game1, "fran", 33, date(3, 19), nil},
{game2, "pat", 120, date(4, 1), nil},
{game2, "billie", 111, date(4, 10), nil},
{game2, "mel", 190, date(4, 18), nil},
{game2, "fran", 33, date(3, 20), nil},
}
func addQueryDocuments(t *testing.T, coll *ds.Collection) {
alist := coll.Actions()
for _, doc := range queryDocuments {
d := *doc
alist.Put(&d)
}
if err := alist.Do(context.Background()); err != nil {
t.Fatalf("%+v", err)
}
}
func testGetQueryKeyField(t *testing.T, coll *ds.Collection, revField string) {
// Query the key field of a collection that has one.
// (The collection used for testGetQuery uses a key function rather than a key field.)
ctx := context.Background()
docs := []docmap{
{KeyField: "qkf1", "a": "one"},
{KeyField: "qkf2", "a": "two"},
{KeyField: "qkf3", "a": "three"},
}
al := coll.Actions()
for _, d := range docs {
al.Put(d)
}
if err := al.Do(ctx); err != nil {
t.Fatal(err)
}
iter := coll.Query().Where(KeyField, "<", "qkf3").Get(ctx)
defer iter.Stop()
got := mustCollect(ctx, t, iter)
want := docs[:2]
diff := cmpDiff(got, want, cmpopts.SortSlices(sortByKeyField))
if diff != "" {
t.Error(diff)
}
// Test that queries with selected fields always return the key and revision fields.
iter = coll.Query().Get(ctx, "a")
defer iter.Stop()
got = mustCollect(ctx, t, iter)
for _, d := range docs {
checkHasRevisionField(t, d, revField)
}
diff = cmpDiff(got, docs, cmpopts.SortSlices(sortByKeyField))
if diff != "" {
t.Error(diff)
}
}
func sortByKeyField(d1, d2 docmap) bool { return d1[KeyField].(string) < d2[KeyField].(string) }
func testGetQuery(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
addQueryDocuments(t, coll)
// Query filters should have the same behavior when doing string and number
// comparison.
tests := []struct {
name string
q *ds.Query
fields []docstore.FieldPath // fields to get
want func(*HighScore) bool // filters queryDocuments
before func(x, y *HighScore) bool // if present, checks result order
}{
{
name: "All",
q: coll.Query(),
want: func(*HighScore) bool { return true },
},
{
name: "Game",
q: coll.Query().Where("Game", "=", game2),
want: func(h *HighScore) bool { return h.Game == game2 },
},
{
name: "Score",
q: coll.Query().Where("Score", ">", 100),
want: func(h *HighScore) bool { return h.Score > 100 },
},
{
name: "Player",
q: coll.Query().Where("Player", "=", "billie"),
want: func(h *HighScore) bool { return h.Player == "billie" },
},
{
name: "GamePlayer",
q: coll.Query().Where("Player", "=", "andy").Where("Game", "=", game1),
want: func(h *HighScore) bool { return h.Player == "andy" && h.Game == game1 },
},
{
name: "PlayerScore",
q: coll.Query().Where("Player", "=", "pat").Where("Score", "<", 100),
want: func(h *HighScore) bool { return h.Player == "pat" && h.Score < 100 },
},
{
name: "GameScore",
q: coll.Query().Where("Game", "=", game1).Where("Score", ">=", 50),
want: func(h *HighScore) bool { return h.Game == game1 && h.Score >= 50 },
},
{
name: "PlayerTime",
q: coll.Query().Where("Player", "=", "mel").Where("Time", ">", date(4, 1)),
want: func(h *HighScore) bool { return h.Player == "mel" && h.Time.After(date(4, 1)) },
},
{
name: "ScoreTime",
q: coll.Query().Where("Score", ">=", 50).Where("Time", ">", date(4, 1)),
want: func(h *HighScore) bool { return h.Score >= 50 && h.Time.After(date(4, 1)) },
},
{
name: "AllByPlayerAsc",
q: coll.Query().OrderBy("Player", docstore.Ascending),
want: func(h *HighScore) bool { return true },
before: func(h1, h2 *HighScore) bool { return h1.Player < h2.Player },
},
{
name: "AllByPlayerDesc",
q: coll.Query().OrderBy("Player", docstore.Descending),
want: func(h *HighScore) bool { return true },
before: func(h1, h2 *HighScore) bool { return h1.Player > h2.Player },
},
{
name: "GameByPlayerAsc",
// We need a filter on Player, and it can't be the empty string (DynamoDB limitation).
// So pick any string that sorts less than all valid player names.
q: coll.Query().Where("Game", "=", game1).Where("Player", ">", ".").
OrderBy("Player", docstore.Ascending),
want: func(h *HighScore) bool { return h.Game == game1 },
before: func(h1, h2 *HighScore) bool { return h1.Player < h2.Player },
},
{
// Same as above, but descending.
name: "GameByPlayerDesc",
q: coll.Query().Where("Game", "=", game1).Where("Player", ">", ".").
OrderBy("Player", docstore.Descending),
want: func(h *HighScore) bool { return h.Game == game1 },
before: func(h1, h2 *HighScore) bool { return h1.Player > h2.Player },
},
// TODO(jba): add more OrderBy tests.
{
name: "AllWithKeyFields",
q: coll.Query(),
fields: []docstore.FieldPath{"Game", "Player"},
want: func(h *HighScore) bool {
h.Score = 0
h.Time = time.Time{}
return true
},
},
{
name: "AllWithScore",
q: coll.Query(),
fields: []docstore.FieldPath{"Game", "Player", "Score"},
want: func(h *HighScore) bool {
h.Time = time.Time{}
return true
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got, err := collectHighScores(ctx, tc.q.Get(ctx, tc.fields...))
if err != nil {
t.Fatal(err)
}
for _, g := range got {
if g.DocstoreRevision == nil {
t.Errorf("%v missing DocstoreRevision", g)
} else {
g.DocstoreRevision = nil
}
}
want := filterHighScores(queryDocuments, tc.want)
_, err = tc.q.Plan()
if err != nil {
t.Fatal(err)
}
diff := cmp.Diff(got, want, cmpopts.SortSlices(func(h1, h2 *HighScore) bool {
return h1.Game+"|"+h1.Player < h2.Game+"|"+h2.Player
}))
if diff != "" {
t.Fatal(diff)
}
if tc.before != nil {
// Verify that the results are sorted according to tc.less.
for i := 1; i < len(got); i++ {
if tc.before(got[i], got[i-1]) {
t.Errorf("%s at %d sorts before previous %s", got[i], i, got[i-1])
}
}
}
// We can't assume anything about the query plan. Just verify that Plan returns
// successfully.
if _, err := tc.q.Plan(KeyField); err != nil {
t.Fatal(err)
}
})
}
t.Run("Limit", func(t *testing.T) {
// For limit, we can't be sure which documents will be returned, only their count.
limitQ := coll.Query().Limit(2)
got := mustCollectHighScores(ctx, t, limitQ.Get(ctx))
if len(got) != 2 {
t.Errorf("got %v, wanted two documents", got)
}
})
}
func testDeleteQuery(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
addQueryDocuments(t, coll)
// Note: these tests are cumulative. If the first test deletes a document, that
// change will persist for the second test.
tests := []struct {
name string
q *ds.Query
want func(*HighScore) bool // filters queryDocuments
}{
{
name: "Player",
q: coll.Query().Where("Player", "=", "andy"),
want: func(h *HighScore) bool { return h.Player != "andy" },
},
{
name: "Score",
q: coll.Query().Where("Score", ">", 100),
want: func(h *HighScore) bool { return h.Score <= 100 },
},
{
name: "All",
q: coll.Query(),
want: func(h *HighScore) bool { return false },
},
// TODO(jba): add a case that requires Firestore to evaluate filters on the client.
}
prevWant := queryDocuments
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if err := tc.q.Delete(ctx); err != nil {
t.Fatal(err)
}
got := mustCollectHighScores(ctx, t, coll.Query().Get(ctx))
for _, g := range got {
g.DocstoreRevision = nil
}
want := filterHighScores(prevWant, tc.want)
prevWant = want
diff := cmp.Diff(got, want, cmpopts.SortSlices(func(h1, h2 *HighScore) bool {
return h1.Game+"|"+h1.Player < h2.Game+"|"+h2.Player
}))
if diff != "" {
t.Error(diff)
}
})
}
// Using Limit with DeleteQuery should be an error.
err := coll.Query().Where("Player", "=", "mel").Limit(1).Delete(ctx)
if err == nil {
t.Fatal("want error for Limit, got nil")
}
}
func testUpdateQuery(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
addQueryDocuments(t, coll)
err := coll.Query().Where("Player", "=", "fran").Update(ctx, docstore.Mods{"Score": 13, "Time": nil})
if err != nil {
t.Fatal(err)
}
got := mustCollectHighScores(ctx, t, coll.Query().Get(ctx))
for _, g := range got {
g.DocstoreRevision = nil
}
want := filterHighScores(queryDocuments, func(h *HighScore) bool {
if h.Player == "fran" {
h.Score = 13
h.Time = time.Time{}
}
return true
})
diff := cmp.Diff(got, want, cmpopts.SortSlices(func(h1, h2 *HighScore) bool {
return h1.Game+"|"+h1.Player < h2.Game+"|"+h2.Player
}))
if diff != "" {
t.Error(diff)
}
}
func filterHighScores(hs []*HighScore, f func(*HighScore) bool) []*HighScore {
var res []*HighScore
for _, h := range hs {
c := *h // Copy in case f modifies its argument.
if f(&c) {
res = append(res, &c)
}
}
return res
}
// clearCollection delete all documents from this collection after test.
func clearCollection(fataler interface{ Fatalf(string, ...interface{}) }, coll *docstore.Collection) {
if err := coll.Query().Delete(context.Background()); err != nil {
fataler.Fatalf("%+v", err)
}
}
func forEach(ctx context.Context, iter *ds.DocumentIterator, create func() interface{}, handle func(interface{}) error) error {
for {
doc := create()
err := iter.Next(ctx, doc)
if err == io.EOF {
break
}
if err != nil {
return err
}
if err := handle(doc); err != nil {
return err
}
}
return nil
}
func mustCollect(ctx context.Context, t *testing.T, iter *ds.DocumentIterator) []docmap {
var ms []docmap
newDocmap := func() interface{} { return docmap{} }
collect := func(m interface{}) error { ms = append(ms, m.(docmap)); return nil }
if err := forEach(ctx, iter, newDocmap, collect); err != nil {
t.Fatal(err)
}
return ms
}
func mustCollectHighScores(ctx context.Context, t *testing.T, iter *ds.DocumentIterator) []*HighScore {
hs, err := collectHighScores(ctx, iter)
if err != nil {
t.Fatal(err)
}
return hs
}
func collectHighScores(ctx context.Context, iter *ds.DocumentIterator) ([]*HighScore, error) {
var hs []*HighScore
collect := func(h interface{}) error { hs = append(hs, h.(*HighScore)); return nil }
if err := forEach(ctx, iter, newHighScore, collect); err != nil {
return nil, err
}
return hs, nil
}
func testMultipleActions(t *testing.T, coll *ds.Collection, revField string) {
ctx := context.Background()
docs := []docmap{
{KeyField: "testMultipleActions1", "s": "a"},
{KeyField: "testMultipleActions2", "s": "b"},
{KeyField: "testMultipleActions3", "s": "c"},
{KeyField: "testMultipleActions4", "s": "d"},
{KeyField: "testMultipleActions5", "s": "e"},
{KeyField: "testMultipleActions6", "s": "f"},
{KeyField: "testMultipleActions7", "s": "g"},
{KeyField: "testMultipleActions8", "s": "h"},
{KeyField: "testMultipleActions9", "s": "i"},
{KeyField: "testMultipleActions10", "s": "j"},
{KeyField: "testMultipleActions11", "s": "k"},
{KeyField: "testMultipleActions12", "s": "l"},
}
actions := coll.Actions()
// Writes
for i := 0; i < 6; i++ {
actions.Create(docs[i])
}
for i := 6; i < len(docs); i++ {
actions.Put(docs[i])
}
// Reads
gots := make([]docmap, len(docs))
for i, doc := range docs {
gots[i] = docmap{KeyField: doc[KeyField]}
actions.Get(gots[i], docstore.FieldPath("s"))
}
if err := actions.Do(ctx); err != nil {
t.Fatal(err)
}
for i, got := range gots {
if diff := cmpDiff(got, docs[i]); diff != "" {
t.Error(diff)
}
}
// Deletes
dels := coll.Actions()
for _, got := range gots {
dels.Delete(docmap{KeyField: got[KeyField]})
}
if err := dels.Do(ctx); err != nil {
t.Fatal(err)
}
}
func testUnorderedActions(t *testing.T, coll *ds.Collection, revField string) {
ctx := context.Background()
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
var docs []docmap
for i := 0; i < 9; i++ {
docs = append(docs, docmap{KeyField: fmt.Sprintf("testUnorderedActions%d", i), "s": fmt.Sprint(i)})
}
compare := func(gots, wants []docmap) {
t.Helper()
for i := 0; i < len(gots); i++ {
got := gots[i]
want := clone(wants[i])
want[revField] = got[revField]
if !cmp.Equal(got, want) {
t.Errorf("index #%d:\ngot %v\nwant %v", i, got, want)
}
}
}
// Put the first three docs.
actions := coll.Actions()
for i := 0; i < 6; i++ {
actions.Create(docs[i])
}
must(actions.Do(ctx))
// Replace the first three and put six more.
actions = coll.Actions()
for i := 0; i < 3; i++ {
docs[i]["s"] = fmt.Sprintf("%d'", i)
actions.Replace(docs[i])
}
for i := 3; i < 9; i++ {
actions.Put(docs[i])
}
must(actions.Do(ctx))
// Delete the first three, get the second three, and put three more.
gdocs := []docmap{
{KeyField: docs[3][KeyField]},
{KeyField: docs[4][KeyField]},
{KeyField: docs[5][KeyField]},
}
actions = coll.Actions()
actions.Update(docs[6], ds.Mods{"s": "6'"})
actions.Get(gdocs[0])
actions.Delete(docs[0])
actions.Delete(docs[1])
actions.Update(docs[7], ds.Mods{"s": "7'"})
actions.Get(gdocs[1])
actions.Delete(docs[2])
actions.Get(gdocs[2])
actions.Update(docs[8], ds.Mods{"s": "8'"})
must(actions.Do(ctx))
compare(gdocs, docs[3:6])
// At this point, the existing documents are 3 - 9.
// Get the first four, try to create one that already exists, delete a
// nonexistent doc, and put one. Only the Get of #3, the Delete and the Put
// should succeed.
actions = coll.Actions()
for _, doc := range []docmap{
{KeyField: docs[0][KeyField]},
{KeyField: docs[1][KeyField]},
{KeyField: docs[2][KeyField]},
{KeyField: docs[3][KeyField]},
} {
actions.Get(doc)
}
docs[4][revField] = nil
actions.Create(docs[4]) // create existing doc
actions.Put(docs[5])
// TODO(jba): Understand why the following line is necessary for dynamo but not the others.
docs[0][revField] = nil
actions.Delete(docs[0]) // delete nonexistent doc
err := actions.Do(ctx)
if err == nil {
t.Fatal("want error, got nil")
}
alerr, ok := err.(docstore.ActionListError)
if !ok {
t.Fatalf("got %v (%T), want ActionListError", alerr, alerr)
}
for _, e := range alerr {
switch i := e.Index; i {
case 3, 5, 6:
t.Errorf("index %d: got %v, want nil", i, e.Err)
case 4, -1: // -1 for mongodb issue, see https://jira.mongodb.org/browse/GODRIVER-1028
if ec := gcerrors.Code(e.Err); ec != gcerrors.AlreadyExists &&
ec != gcerrors.FailedPrecondition { // TODO(shantuo): distinguish this case for dyanmo
t.Errorf("index 4: create an existing document: got %v, want error", e.Err)
}
default:
if gcerrors.Code(e.Err) != gcerrors.NotFound {
t.Errorf("index %d: got %v, want NotFound", i, e.Err)
}
}
}
}
// Verify that BeforeDo is invoked, and its as function behaves as expected.
func testBeforeDo(t *testing.T, newHarness HarnessMaker) {
withHarnessAndCollection(t, newHarness, func(t *testing.T, ctx context.Context, h Harness, coll *ds.Collection) {
var called bool
beforeDo := func(asFunc func(interface{}) bool) error {
called = true
if asFunc(nil) {
return errors.New("asFunc returned true when called with nil, want false")
}
// At least one of the expected types must return true. Special case: if
// there are no types, then the as function never returns true, so skip the
// check.
if len(h.BeforeDoTypes()) > 0 {
found := false
for _, b := range h.BeforeDoTypes() {
v := reflect.New(reflect.TypeOf(b)).Interface()
if asFunc(v) {
found = true
break
}
}
if !found {
return errors.New("none of the BeforeDoTypes works with the as function")
}
}
return nil
}
check := func(f func(*ds.ActionList)) {
t.Helper()
// First, verify that if a BeforeDo function returns an error, so does ActionList.Do.
// We depend on that for the rest of the test.
al := coll.Actions().BeforeDo(func(func(interface{}) bool) error { return errors.New("") })
f(al)
if err := al.Do(ctx); err == nil {
t.Error("beforeDo returning error: got nil from Do, want error")
return
}
called = false
al = coll.Actions().BeforeDo(beforeDo)
f(al)
if err := al.Do(ctx); err != nil {
t.Error(err)
return
}
if !called {
t.Error("BeforeDo function never called")
}
}
doc := docmap{KeyField: "testBeforeDo"}
check(func(l *docstore.ActionList) { l.Create(doc) })
check(func(l *docstore.ActionList) { l.Replace(doc) })
check(func(l *docstore.ActionList) { l.Put(doc) })
check(func(l *docstore.ActionList) { l.Update(doc, docstore.Mods{"a": 1}) })
check(func(l *docstore.ActionList) { l.Get(doc) })
check(func(l *docstore.ActionList) { l.Delete(doc) })
})
}
// Verify that BeforeQuery is invoked, and its as function behaves as expected.
func testBeforeQuery(t *testing.T, newHarness HarnessMaker) {
withHarnessAndCollection(t, newHarness, func(t *testing.T, ctx context.Context, h Harness, coll *ds.Collection) {
var called bool
beforeQuery := func(asFunc func(interface{}) bool) error {
called = true
if asFunc(nil) {
return errors.New("asFunc returned true when called with nil, want false")
}
// At least one of the expected types must return true. Special case: if
// there are no types, then the as function never returns true, so skip the
// check.
if len(h.BeforeQueryTypes()) > 0 {
found := false
for _, b := range h.BeforeQueryTypes() {
v := reflect.New(reflect.TypeOf(b)).Interface()
if asFunc(v) {
found = true
break
}
}
if !found {
return errors.New("none of the BeforeQueryTypes works with the as function")
}
}
return nil
}
iter := coll.Query().BeforeQuery(beforeQuery).Get(ctx)
if err := iter.Next(ctx, docmap{}); err != io.EOF {
t.Fatalf("got %v, wanted io.EOF", err)
}
if !called {
t.Error("BeforeQuery function never called for Get")
}
called = false
if err := coll.Query().BeforeQuery(beforeQuery).Delete(ctx); err != nil {
t.Fatal(err)
}
if !called {
t.Error("BeforeQuery function never called for Delete")
}
called = false
if err := coll.Query().BeforeQuery(beforeQuery).Update(ctx, ds.Mods{"a": 1}); err != nil {
t.Fatal(err)
}
if !called {
t.Error("BeforeQuery function never called for Update")
}
})
}
func testAs(t *testing.T, coll *ds.Collection, st AsTest) {
// Verify Collection.As
if err := st.CollectionCheck(coll); err != nil {
t.Error(err)
}
ctx := context.Background()
// Query
qs := []*docstore.Query{
coll.Query().Where("Game", "=", game3),
// Note: don't use filter on Player, the test table has Player as the
// partition key of a Global Secondary Index, which doesn't support
// ConsistentRead mode, which is what the As test does in its BeforeQuery
// function.
coll.Query().Where("Score", ">", 50),
}
for _, q := range qs {
iter := q.Get(ctx)
if err := st.QueryCheck(iter); err != nil {
t.Error(err)
}
}
// ErrorCheck
doc := &HighScore{game3, "steph", 24, date(4, 25), nil}
if err := coll.Create(ctx, doc); err != nil {
t.Fatal(err)
}
doc.DocstoreRevision = nil
if err := coll.Create(ctx, doc); err == nil {
t.Fatal("got nil error from creating an existing item, want an error")
} else {
if alerr, ok := err.(docstore.ActionListError); ok {
for _, aerr := range alerr {
if checkerr := st.ErrorCheck(coll, aerr.Err); checkerr != nil {
t.Error(checkerr)
}
}
} else if checkerr := st.ErrorCheck(coll, err); checkerr != nil {
t.Error(checkerr)
}
}
}
func clone(m docmap) docmap {
r := docmap{}
for k, v := range m {
r[k] = v
}
return r
}
func cmpDiff(a, b interface{}, opts ...cmp.Option) string {
// Firestore revisions can be protos.
return cmp.Diff(a, b, append([]cmp.Option{cmp.Comparer(proto.Equal)}, opts...)...)
}
func checkCode(t *testing.T, err error, code gcerrors.ErrorCode) {
t.Helper()
if gcerrors.Code(err) != code {
t.Errorf("got %v, want %s", err, code)
}
}
| 1 | 18,864 | Why did you remove the struct tags? | google-go-cloud | go |
@@ -178,6 +178,11 @@ class FinalStatus(Reporter, AggregatorListener, FunctionalAggregatorListener):
def __dump_xml(self, filename):
self.log.info("Dumping final status as XML: %s", filename)
root = etree.Element("FinalStatus")
+ report_info = get_bza_report_info(self.engine, self.log)
+ if report_info:
+ link, text = report_info[0]
+ report_element = etree.Element("BlazeMeterReport", link=link, name=text)
+ root.append(report_element)
if self.last_sec:
for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]):
root.append(self.__get_xml_summary(label, kpiset)) | 1 | """
Basics of reporting capabilities
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import csv
import os
import time
from collections import Counter, OrderedDict
from datetime import datetime
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Reporter
from bzt.modules.aggregator import DataPoint, KPISet, AggregatorListener, ResultsProvider
from bzt.modules.blazemeter import BlazeMeterUploader, CloudProvisioning
from bzt.modules.functional import FunctionalAggregator, FunctionalAggregatorListener
from bzt.modules.passfail import PassFailStatus
from bzt.six import etree, iteritems, string_types
from bzt.utils import get_full_path
class FinalStatus(Reporter, AggregatorListener, FunctionalAggregatorListener):
"""
A reporter that prints short statistics on test end
"""
def __init__(self):
super(FinalStatus, self).__init__()
self.last_sec = None
self.cumulative_results = None
self.start_time = time.time()
self.end_time = None
def startup(self):
self.start_time = time.time()
def prepare(self):
super(FinalStatus, self).prepare()
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
elif isinstance(self.engine.aggregator, FunctionalAggregator):
self.engine.aggregator.add_listener(self)
def aggregated_second(self, data):
"""
Just store the latest info
:type data: bzt.modules.aggregator.DataPoint
"""
self.last_sec = data
def aggregated_results(self, results, cumulative_results):
"""
Just store the latest info
:type cumulative_results: bzt.modules.functional.ResultsTree
:type results: bzt.modules.functional.ResultsTree
"""
self.cumulative_results = cumulative_results
def post_process(self):
"""
Log basic stats
"""
super(FinalStatus, self).post_process()
self.end_time = time.time()
if self.parameters.get("test-duration", True):
self.__report_duration()
if self.last_sec:
summary_kpi = self.last_sec[DataPoint.CUMULATIVE][""]
if self.parameters.get("summary", True):
self.__report_samples_count(summary_kpi)
if self.parameters.get("percentiles", True):
self.__report_percentiles(summary_kpi)
if self.parameters.get("failed-labels", False):
self.__report_failed_labels(self.last_sec[DataPoint.CUMULATIVE])
if self.parameters.get("dump-xml", None):
self.__dump_xml(self.parameters.get("dump-xml"))
if self.parameters.get("dump-csv", None):
self.__dump_csv(self.parameters.get("dump-csv"))
elif self.cumulative_results:
self.__report_summary()
report_mode = self.parameters.get("report-tests", "failed")
if report_mode == "failed":
self.__report_failed_tests()
else:
self.__report_all_tests()
def __plural(self, count, noun):
return noun + 's' if count > 1 else noun
def __report_all_tests(self):
for test_suite in self.cumulative_results.test_suites():
for case in self.cumulative_results.test_cases(test_suite):
full_name = case.test_suite + "." + case.test_case
self.log.info("Test %s - %s", full_name, case.status)
print_trace = self.parameters.get("print-stacktrace", True)
if print_trace and case.error_trace:
self.log.info("Stacktrace:\n%s", case.error_trace)
def __report_failed_tests(self):
for test_suite in self.cumulative_results.test_suites():
for case in self.cumulative_results.test_cases(test_suite):
if case.status in ("FAILED", "BROKEN"):
full_name = case.test_suite + "." + case.test_case
self.log.info("Test %s failed:\n%s", full_name, case.error_trace)
def __report_summary(self):
status_counter = Counter()
for test_suite in self.cumulative_results.test_suites():
for case in self.cumulative_results.test_cases(test_suite):
status_counter[case.status] += 1
total = sum(count for _, count in iteritems(status_counter))
self.log.info("Total: %s %s", total, self.__plural(total, 'test')) # FIXME: it's actually not tests, but test cases
def __report_samples_count(self, summary_kpi_set):
"""
reports samples count
"""
if summary_kpi_set[KPISet.SAMPLE_COUNT]:
err_rate = 100 * summary_kpi_set[KPISet.FAILURES] / float(summary_kpi_set[KPISet.SAMPLE_COUNT])
self.log.info("Samples count: %s, %.2f%% failures", summary_kpi_set[KPISet.SAMPLE_COUNT], err_rate)
def __report_percentiles(self, summary_kpi_set):
"""
reports percentiles
"""
fmt = "Average times: total %.3f, latency %.3f, connect %.3f"
self.log.info(fmt, summary_kpi_set[KPISet.AVG_RESP_TIME], summary_kpi_set[KPISet.AVG_LATENCY],
summary_kpi_set[KPISet.AVG_CONN_TIME])
for key in sorted(summary_kpi_set[KPISet.PERCENTILES].keys(), key=float):
self.log.info("Percentile %.1f%%: %.3f", float(key), summary_kpi_set[KPISet.PERCENTILES][key])
def __report_failed_labels(self, cumulative):
"""
reports failed labels
"""
report_template = "%d failed samples: %s"
sorted_labels = sorted(cumulative.keys())
for sample_label in sorted_labels:
if sample_label != "":
failed_samples_count = cumulative[sample_label]['fail']
if failed_samples_count:
self.log.info(report_template, failed_samples_count, sample_label)
def __report_duration(self):
"""
asks executors start_time and end_time, provides time delta
"""
date_start = datetime.fromtimestamp(int(self.start_time))
date_end = datetime.fromtimestamp(int(self.end_time))
self.log.info("Test duration: %s", date_end - date_start)
def __dump_xml(self, filename):
self.log.info("Dumping final status as XML: %s", filename)
root = etree.Element("FinalStatus")
if self.last_sec:
for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]):
root.append(self.__get_xml_summary(label, kpiset))
with open(get_full_path(filename), 'wb') as fhd:
tree = etree.ElementTree(root)
tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
def __get_xml_summary(self, label, kpiset):
elem = etree.Element("Group", label=label)
for kpi_name, kpi_val in iteritems(kpiset):
if kpi_name in ('errors', 'rt'):
continue
if isinstance(kpi_val, dict):
for param_name, param_val in iteritems(kpi_val):
elem.append(self.__get_kpi_xml(kpi_name, param_val, param_name))
else:
elem.append(self.__get_kpi_xml(kpi_name, kpi_val))
return elem
def __get_kpi_xml(self, kpi_name, kpi_val, param=None):
kpi = etree.Element(kpi_name)
kpi.attrib['value'] = self.__val_to_str(kpi_val)
elm_name = etree.Element("name")
elm_name.text = kpi_name
if param is not None:
kpi.attrib['param'] = self.__val_to_str(param)
elm_name.text += "/" + param
kpi.append(elm_name)
elm_value = etree.Element("value")
elm_value.text = self.__val_to_str(kpi_val)
kpi.append(elm_value)
return kpi
def __val_to_str(self, kpi_val):
if isinstance(kpi_val, float):
return '%.5f' % kpi_val
elif isinstance(kpi_val, int):
return '%d' % kpi_val
elif isinstance(kpi_val, string_types):
return kpi_val
else:
raise TaurusInternalException("Unhandled kpi type: %s" % type(kpi_val))
def __dump_csv(self, filename):
self.log.info("Dumping final status as CSV: %s", filename)
# FIXME: what if there's no last_sec
with open(get_full_path(filename), 'wt') as fhd:
writer = csv.DictWriter(fhd, self.__get_csv_dict('', self.last_sec[DataPoint.CUMULATIVE]['']).keys())
writer.writeheader()
for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]):
writer.writerow(self.__get_csv_dict(label, kpiset))
def __get_csv_dict(self, label, kpiset):
kpi_copy = copy.deepcopy(kpiset)
# sort label
res = OrderedDict()
for key in sorted(kpi_copy.keys()):
res[key] = kpi_copy[key]
for level, val in iteritems(kpiset[KPISet.PERCENTILES]):
res['perc_%s' % level] = val
for rcd, val in iteritems(kpiset[KPISet.RESP_CODES]):
res['rc_%s' % rcd] = val
for key in res:
if isinstance(res[key], float):
res[key] = "%.5f" % res[key]
del res['errors']
del res['rt']
del res['rc']
del res['perc']
res['label'] = label
return res
class JUnitXMLReporter(Reporter, AggregatorListener):
"""
A reporter that exports results in Jenkins JUnit XML format.
"""
def __init__(self):
super(JUnitXMLReporter, self).__init__()
self.last_second = None
self.report_file_path = None
def prepare(self):
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
def aggregated_second(self, data):
self.last_second = data
def post_process(self):
"""
Get report data, generate xml report.
"""
filename = self.parameters.get("filename", None)
if not filename:
filename = self.engine.create_artifact(XUnitFileWriter.REPORT_FILE_NAME, XUnitFileWriter.REPORT_FILE_EXT)
self.parameters["filename"] = filename # reflect it in effective config
test_data_source = self.parameters.get("data-source", "sample-labels")
if test_data_source == "sample-labels":
if not self.last_second:
self.log.warning("No last second data to generate XUnit.xml")
else:
writer = XUnitFileWriter(self.engine, 'sample_labels')
self.process_sample_labels(writer)
writer.save_report(filename)
elif test_data_source == "pass-fail":
writer = XUnitFileWriter(self.engine, 'bzt_pass_fail')
self.process_pass_fail(writer)
writer.save_report(filename)
else:
raise TaurusConfigError("Unsupported data source: %s" % test_data_source)
self.report_file_path = filename # TODO: just for backward compatibility, remove later
def process_sample_labels(self, xunit):
"""
:type xunit: XUnitFileWriter
"""
labels = self.last_second[DataPoint.CUMULATIVE]
for key in sorted(labels.keys()):
if key == "": # skip total label
continue
errors = []
for er_dict in labels[key][KPISet.ERRORS]:
err_message = str(er_dict["rc"])
err_type = str(er_dict["msg"])
err_desc = "total errors of this type:" + str(er_dict["cnt"])
err_element = etree.Element("error", message=err_message, type=err_type)
err_element.text = err_desc
errors.append(err_element)
xunit.add_test_case(key, errors)
def process_pass_fail(self, xunit):
"""
:type xunit: XUnitFileWriter
"""
mods = self.engine.reporters + self.engine.services # TODO: remove it after passfail is only reporter
pass_fail_objects = [_x for _x in mods if isinstance(_x, PassFailStatus)]
self.log.debug("Processing passfail objects: %s", pass_fail_objects)
fail_criteria = []
for pf_obj in pass_fail_objects:
if pf_obj.criteria:
for _fc in pf_obj.criteria:
fail_criteria.append(_fc)
for fc_obj in fail_criteria:
if 'label' in fc_obj.config:
data = (fc_obj.config['subject'], fc_obj.config['label'], fc_obj.config['condition'],
fc_obj.config['threshold'])
tpl = "%s of %s%s%s"
else:
data = (fc_obj.config['subject'], fc_obj.config['condition'], fc_obj.config['threshold'])
tpl = "%s%s%s"
if fc_obj.config['timeframe']:
tpl += " for %s"
data += (fc_obj.config['timeframe'],)
disp_name = tpl % data
if fc_obj.is_triggered and fc_obj.fail:
errors = [etree.Element("error", message=str(fc_obj), type="pass/fail criteria triggered")]
else:
errors = ()
xunit.add_test_case(disp_name, errors)
class XUnitFileWriter(object):
REPORT_FILE_NAME = "xunit"
REPORT_FILE_EXT = ".xml"
def __init__(self, engine, suite_name):
"""
:type engine: bzt.engine.Engine
:type suite_name: str
"""
super(XUnitFileWriter, self).__init__()
self.engine = engine
self.log = engine.log.getChild(self.__class__.__name__)
self.test_suite = etree.Element("testsuite", name=suite_name, package="bzt")
bza_report_info = self.get_bza_report_info()
self.class_name = bza_report_info[0][1] if bza_report_info else "bzt-" + str(self.__hash__())
self.report_urls = [info_item[0] for info_item in bza_report_info]
def get_bza_report_info(self):
"""
:return: [(url, test), (url, test), ...]
"""
result = []
if isinstance(self.engine.provisioning, CloudProvisioning):
cloud_prov = self.engine.provisioning
report_url = "Cloud report link: %s\n" % cloud_prov.results_url
test_name = cloud_prov.settings.get('test', None)
result.append((report_url, test_name if test_name is not None else report_url))
else:
# FIXME: reworking it all
bza_reporters = [_x for _x in self.engine.reporters if isinstance(_x, BlazeMeterUploader)]
""":type : list[bzt.modules.blazemeter.BlazeMeterUploader]"""
for bza_reporter in bza_reporters:
if bza_reporter.results_url:
report_url = "BlazeMeter report link: %s\n" % bza_reporter.results_url
test_name = bza_reporter.parameters.get("test", None)
result.append((report_url, test_name if test_name is not None else report_url))
if len(result) > 1:
self.log.warning("More than one blazemeter reporter found")
return result
def save_report(self, fname):
"""
:type fname: str
"""
try:
if os.path.exists(fname):
self.log.warning("File %s already exists, it will be overwritten", fname)
else:
dirname = os.path.dirname(fname)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
etree_obj = etree.ElementTree(self.test_suite)
self.log.info("Writing JUnit XML report into: %s", fname)
with open(get_full_path(fname), 'wb') as _fds:
etree_obj.write(_fds, xml_declaration=True, encoding="UTF-8", pretty_print=True)
except BaseException:
raise TaurusInternalException("Cannot create file %s" % fname)
def add_test_case(self, case_name, children=()):
"""
:type case_name: str
:type children: list[bzt.six.etree.Element]
"""
test_case = etree.Element("testcase", classname=self.class_name, name=case_name)
if self.report_urls:
system_out_etree = etree.SubElement(test_case, "system-out")
system_out_etree.text = "".join(self.report_urls)
for child in children:
test_case.append(child)
self.test_suite.append(test_case)
| 1 | 14,333 | Let's be neutral with tag names. Let's just have "ReportURL" | Blazemeter-taurus | py |
@@ -265,7 +265,8 @@ func TestEmptySpanData(t *testing.T) {
func TestSpanData(t *testing.T) {
// Full test of span data transform.
- startTime := time.Now()
+ // March 31, 2020 5:01:26 1234nanos (UTC)
+ startTime := time.Unix(1585674086, 1234)
endTime := startTime.Add(10 * time.Second)
spanData := &export.SpanData{
SpanContext: core.SpanContext{ | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
import (
"strconv"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp"
tracepb "github.com/open-telemetry/opentelemetry-proto/gen/go/trace/v1"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"go.opentelemetry.io/otel/api/core"
apitrace "go.opentelemetry.io/otel/api/trace"
export "go.opentelemetry.io/otel/sdk/export/trace"
"go.opentelemetry.io/otel/sdk/resource"
)
func TestSpanKind(t *testing.T) {
for _, test := range []struct {
kind apitrace.SpanKind
expected tracepb.Span_SpanKind
}{
{
apitrace.SpanKindInternal,
tracepb.Span_INTERNAL,
},
{
apitrace.SpanKindClient,
tracepb.Span_CLIENT,
},
{
apitrace.SpanKindServer,
tracepb.Span_SERVER,
},
{
apitrace.SpanKindProducer,
tracepb.Span_PRODUCER,
},
{
apitrace.SpanKindConsumer,
tracepb.Span_CONSUMER,
},
{
apitrace.SpanKind(-1),
tracepb.Span_SPAN_KIND_UNSPECIFIED,
},
} {
assert.Equal(t, test.expected, spanKind(test.kind))
}
}
func TestNilSpanEvent(t *testing.T) {
assert.Nil(t, spanEvents(nil))
}
func TestEmptySpanEvent(t *testing.T) {
assert.Nil(t, spanEvents([]export.Event{}))
}
func TestSpanEvent(t *testing.T) {
attrs := []core.KeyValue{core.Key("one").Int(1), core.Key("two").Int(2)}
now := time.Now()
got := spanEvents([]export.Event{
{
Name: "test 1",
Attributes: []core.KeyValue{},
Time: now,
},
{
Name: "test 2",
Attributes: attrs,
Time: now,
},
})
if !assert.Len(t, got, 2) {
return
}
uNow := uint64(now.Nanosecond())
assert.Equal(t, &tracepb.Span_Event{Name: "test 1", Attributes: nil, TimeUnixNano: uNow}, got[0])
// Do not test Attributes directly, just that the return value goes to the correct field.
assert.Equal(t, &tracepb.Span_Event{Name: "test 2", Attributes: Attributes(attrs), TimeUnixNano: uNow}, got[1])
}
func TestExcessiveSpanEvents(t *testing.T) {
e := make([]export.Event, maxMessageEventsPerSpan+1)
for i := 0; i < maxMessageEventsPerSpan+1; i++ {
e[i] = export.Event{Name: strconv.Itoa(i)}
}
assert.Len(t, e, maxMessageEventsPerSpan+1)
got := spanEvents(e)
assert.Len(t, got, maxMessageEventsPerSpan)
// Ensure the drop order.
assert.Equal(t, strconv.Itoa(maxMessageEventsPerSpan-1), got[len(got)-1].Name)
}
func TestNilLinks(t *testing.T) {
assert.Nil(t, links(nil))
}
func TestEmptyLinks(t *testing.T) {
assert.Nil(t, links([]apitrace.Link{}))
}
func TestLinks(t *testing.T) {
attrs := []core.KeyValue{core.Key("one").Int(1), core.Key("two").Int(2)}
l := []apitrace.Link{
{},
{
SpanContext: core.EmptySpanContext(),
Attributes: attrs,
},
}
got := links(l)
// Make sure we get the same number back first.
if !assert.Len(t, got, 2) {
return
}
// Empty should be empty.
expected := &tracepb.Span_Link{
TraceId: []uint8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
SpanId: []uint8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
}
assert.Equal(t, expected, got[0])
// Do not test Attributes directly, just that the return value goes to the correct field.
expected.Attributes = Attributes(attrs)
assert.Equal(t, expected, got[1])
// Changes to our links should not change the produced links.
l[1].TraceID[0] = byte(0x1)
l[1].SpanID[0] = byte(0x1)
assert.Equal(t, expected, got[1])
}
func TestStatus(t *testing.T) {
for _, test := range []struct {
grpcCode codes.Code
message string
otlpStatus tracepb.Status_StatusCode
}{
{
codes.OK,
"test OK",
tracepb.Status_Ok,
},
{
codes.Canceled,
//nolint
"test CANCELLED",
//nolint
tracepb.Status_Cancelled,
},
{
codes.Unknown,
"test UNKNOWN",
tracepb.Status_UnknownError,
},
{
codes.InvalidArgument,
"test INVALID_ARGUMENT",
tracepb.Status_InvalidArgument,
},
{
codes.DeadlineExceeded,
"test DEADLINE_EXCEEDED",
tracepb.Status_DeadlineExceeded,
},
{
codes.NotFound,
"test NOT_FOUND",
tracepb.Status_NotFound,
},
{
codes.AlreadyExists,
"test ALREADY_EXISTS",
tracepb.Status_AlreadyExists,
},
{
codes.PermissionDenied,
"test PERMISSION_DENIED",
tracepb.Status_PermissionDenied,
},
{
codes.ResourceExhausted,
"test RESOURCE_EXHAUSTED",
tracepb.Status_ResourceExhausted,
},
{
codes.FailedPrecondition,
"test FAILED_PRECONDITION",
tracepb.Status_FailedPrecondition,
},
{
codes.Aborted,
"test ABORTED",
tracepb.Status_Aborted,
},
{
codes.OutOfRange,
"test OUT_OF_RANGE",
tracepb.Status_OutOfRange,
},
{
codes.Unimplemented,
"test UNIMPLEMENTED",
tracepb.Status_Unimplemented,
},
{
codes.Internal,
"test INTERNAL",
tracepb.Status_InternalError,
},
{
codes.Unavailable,
"test UNAVAILABLE",
tracepb.Status_Unavailable,
},
{
codes.DataLoss,
"test DATA_LOSS",
tracepb.Status_DataLoss,
},
{
codes.Unauthenticated,
"test UNAUTHENTICATED",
tracepb.Status_Unauthenticated,
},
} {
expected := &tracepb.Status{Code: test.otlpStatus, Message: test.message}
assert.Equal(t, expected, status(test.grpcCode, test.message))
}
}
func TestNilSpan(t *testing.T) {
assert.Nil(t, span(nil))
}
func TestNilSpanData(t *testing.T) {
assert.Nil(t, SpanData(nil))
}
func TestEmptySpanData(t *testing.T) {
assert.Nil(t, SpanData(nil))
}
func TestSpanData(t *testing.T) {
// Full test of span data transform.
startTime := time.Now()
endTime := startTime.Add(10 * time.Second)
spanData := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: core.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F},
SpanID: core.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8},
},
SpanKind: apitrace.SpanKindServer,
ParentSpanID: core.SpanID{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8},
Name: "span data to span data",
StartTime: startTime,
EndTime: endTime,
MessageEvents: []export.Event{
{Time: startTime,
Attributes: []core.KeyValue{
core.Key("CompressedByteSize").Uint64(512),
},
},
{Time: endTime,
Attributes: []core.KeyValue{
core.Key("MessageEventType").String("Recv"),
},
},
},
Links: []apitrace.Link{
{
SpanContext: core.SpanContext{
TraceID: core.TraceID{0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF},
SpanID: core.SpanID{0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7},
TraceFlags: 0,
},
Attributes: []core.KeyValue{
core.Key("LinkType").String("Parent"),
},
},
{
SpanContext: core.SpanContext{
TraceID: core.TraceID{0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF},
SpanID: core.SpanID{0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7},
TraceFlags: 0,
},
Attributes: []core.KeyValue{
core.Key("LinkType").String("Child"),
},
},
},
StatusCode: codes.Internal,
StatusMessage: "utterly unrecognized",
HasRemoteParent: true,
Attributes: []core.KeyValue{
core.Key("timeout_ns").Int64(12e9),
},
DroppedAttributeCount: 1,
DroppedMessageEventCount: 2,
DroppedLinkCount: 3,
Resource: resource.New(core.Key("rk1").String("rv1"), core.Key("rk2").Int64(5)),
}
// Not checking resource as the underlying map of our Resource makes
// ordering impossible to guarantee on the output. The Resource
// transform function has unit tests that should suffice.
expectedSpan := &tracepb.Span{
TraceId: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F},
SpanId: []byte{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8},
ParentSpanId: []byte{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8},
Name: spanData.Name,
Kind: tracepb.Span_SERVER,
StartTimeUnixNano: uint64(startTime.Nanosecond()),
EndTimeUnixNano: uint64(endTime.Nanosecond()),
Status: status(spanData.StatusCode, spanData.StatusMessage),
Events: spanEvents(spanData.MessageEvents),
Links: links(spanData.Links),
Attributes: Attributes(spanData.Attributes),
DroppedAttributesCount: 1,
DroppedEventsCount: 2,
DroppedLinksCount: 3,
}
got := SpanData([]*export.SpanData{spanData})
if !assert.Len(t, got, 1) {
return
}
// Break the span down as large diffs can be hard to read.
actualSpans := got[0].GetInstrumentationLibrarySpans()
if !assert.Len(t, actualSpans, 1) && !assert.Len(t, actualSpans[0].Spans, 1) {
return
}
actualSpan := actualSpans[0].Spans[0]
if diff := cmp.Diff(expectedSpan, actualSpan, cmp.Comparer(proto.Equal)); diff != "" {
t.Fatalf("transformed span differs %v\n", diff)
}
}
| 1 | 11,729 | Use an explicit time to ensure conversion is not copy-paste and wrong. | open-telemetry-opentelemetry-go | go |
@@ -804,7 +804,11 @@ void handle_lookup_account_poll(GUI_RPC_CONN& grc) {
grc.lookup_account_op.error_num
);
} else {
- grc.mfout.printf("%s", grc.lookup_account_op.reply.c_str());
+ const char *p = grc.lookup_account_op.reply.c_str();
+ const char *q = strstr(p, "<account_out");
+ if (!q) q = strstr(p, "<error");
+ if (!q) q = "<account_out/>\n";
+ grc.mfout.printf("%s", q);
}
}
| 1 | // This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2008 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
// GUI RPC server side (the actual RPCs)
#include "cpp.h"
#ifdef __APPLE__
#include <Carbon/Carbon.h>
#endif
#ifdef _WIN32
#include "boinc_win.h"
#else
#include "config.h"
#include <cstdio>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#if HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#if HAVE_SYS_UN_H
#include <sys/un.h>
#endif
#include <vector>
#include <cstring>
#if HAVE_NETINET_IN_H
#include <netinet/in.h>
#endif
#if HAVE_NETINET_TCP_H
#include <netinet/tcp.h>
#endif
#if HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#endif
#ifdef _MSC_VER
#define snprintf _snprintf
#endif
#include "error_numbers.h"
#include "filesys.h"
#include "network.h"
#include "parse.h"
#include "str_replace.h"
#include "str_util.h"
#include "url.h"
#include "util.h"
#include "client_state.h"
#include "client_msgs.h"
#include "client_state.h"
#include "cs_proxy.h"
#include "cs_notice.h"
#include "file_names.h"
#include "project.h"
#include "result.h"
using std::string;
using std::vector;
static void auth_failure(MIOFILE& fout) {
fout.printf("<unauthorized/>\n");
}
void GUI_RPC_CONN::handle_auth1(MIOFILE& fout) {
sprintf(nonce, "%f", dtime());
fout.printf("<nonce>%s</nonce>\n", nonce);
}
int GUI_RPC_CONN::handle_auth2(char* buf, MIOFILE& fout) {
char nonce_hash[256], nonce_hash_correct[256], buf2[512];
if (!parse_str(buf, "<nonce_hash>", nonce_hash, 256)) {
auth_failure(fout);
return ERR_AUTHENTICATOR;
}
snprintf(buf2, sizeof(buf2), "%s%s", nonce, gstate.gui_rpcs.password);
md5_block((const unsigned char*)buf2, (int)strlen(buf2), nonce_hash_correct);
if (strcmp(nonce_hash, nonce_hash_correct)) {
auth_failure(fout);
return ERR_AUTHENTICATOR;
}
fout.printf("<authorized/>\n");
auth_needed = false;
return 0;
}
// client passes its version, but ignore it for now
//
static void handle_exchange_versions(GUI_RPC_CONN& grc) {
grc.mfout.printf(
"<server_version>\n"
" <major>%d</major>\n"
" <minor>%d</minor>\n"
" <release>%d</release>\n"
"</server_version>\n",
BOINC_MAJOR_VERSION,
BOINC_MINOR_VERSION,
BOINC_RELEASE
);
}
static void handle_get_simple_gui_info(GUI_RPC_CONN& grc) {
unsigned int i;
grc.mfout.printf("<simple_gui_info>\n");
for (i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
p->write_state(grc.mfout, true);
}
gstate.write_tasks_gui(grc.mfout, true);
grc.mfout.printf("</simple_gui_info>\n");
}
static void handle_get_project_status(GUI_RPC_CONN& grc) {
unsigned int i;
grc.mfout.printf("<projects>\n");
for (i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
p->write_state(grc.mfout, true);
}
grc.mfout.printf("</projects>\n");
}
static void handle_get_disk_usage(GUI_RPC_CONN& grc) {
unsigned int i;
double size, boinc_non_project, d_allowed, boinc_total;
grc.mfout.printf("<disk_usage_summary>\n");
int retval = get_filesystem_info(
gstate.host_info.d_total, gstate.host_info.d_free
);
if (retval) {
msg_printf(0, MSG_INTERNAL_ERROR,
"get_filesystem_info(): %s", boincerror(retval)
);
}
dir_size(".", boinc_non_project, false);
dir_size("locale", size, false);
boinc_non_project += size;
#ifdef __APPLE__
if (gstate.launched_by_manager) {
// If launched by Manager, get Manager's size on disk
ProcessSerialNumber managerPSN;
FSRef ourFSRef;
char path[MAXPATHLEN];
double manager_size = 0.0;
OSStatus err;
err = GetProcessForPID(getppid(), &managerPSN);
if (! err) err = GetProcessBundleLocation(&managerPSN, &ourFSRef);
if (! err) err = FSRefMakePath (&ourFSRef, (UInt8*)path, sizeof(path));
if (! err) dir_size(path, manager_size, true);
if (! err) boinc_non_project += manager_size;
}
#endif
boinc_total = boinc_non_project;
gstate.get_disk_usages();
for (i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
grc.mfout.printf(
"<project>\n"
" <master_url>%s</master_url>\n"
" <disk_usage>%f</disk_usage>\n"
"</project>\n",
p->master_url, p->disk_usage
);
boinc_total += p->disk_usage;
}
d_allowed = gstate.allowed_disk_usage(gstate.total_disk_usage);
grc.mfout.printf(
"<d_total>%f</d_total>\n"
"<d_free>%f</d_free>\n"
"<d_boinc>%f</d_boinc>\n"
"<d_allowed>%f</d_allowed>\n",
gstate.host_info.d_total,
gstate.host_info.d_free,
boinc_non_project,
d_allowed
);
grc.mfout.printf("</disk_usage_summary>\n");
}
static PROJECT* get_project(GUI_RPC_CONN& grc, string url) {
if (url.empty()) {
grc.mfout.printf("<error>Missing project URL</error>\n");
return 0;
}
PROJECT* p = gstate.lookup_project(url.c_str());
if (!p) {
grc.mfout.printf("<error>No such project</error>\n");
return 0 ;
}
return p;
}
static PROJECT* get_project_parse(GUI_RPC_CONN& grc) {
string url;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_string("project_url", url)) continue;
}
return get_project(grc, url);
}
static void handle_project_reset(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
gstate.request_schedule_cpus("project reset by user");
gstate.request_work_fetch("project reset by user");
gstate.reset_project(p, false);
grc.mfout.printf("<success/>\n");
}
static void handle_project_suspend(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
msg_printf(p, MSG_INFO, "project suspended by user");
p->suspend();
grc.mfout.printf("<success/>\n");
}
static void handle_project_resume(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
msg_printf(p, MSG_INFO, "project resumed by user");
p->resume();
grc.mfout.printf("<success/>\n");
}
static void handle_project_detach(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
if (p->attached_via_acct_mgr) {
msg_printf(p, MSG_INFO,
"This project must be detached using the account manager web site."
);
grc.mfout.printf("<error>must detach using account manager</error>");
return;
}
gstate.detach_project(p);
gstate.request_schedule_cpus("project detached by user");
gstate.request_work_fetch("project detached by user");
grc.mfout.printf("<success/>\n");
}
static void handle_project_update(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
msg_printf(p, MSG_INFO, "update requested by user");
p->sched_rpc_pending = RPC_REASON_USER_REQ;
p->min_rpc_time = 0;
#if 1
rss_feeds.trigger_fetch(p);
#endif
gstate.request_work_fetch("project updated by user");
grc.mfout.printf("<success/>\n");
}
static void handle_project_nomorework(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
msg_printf(p, MSG_INFO, "work fetch suspended by user");
p->dont_request_more_work = true;
grc.mfout.printf("<success/>\n");
}
static void handle_project_allowmorework(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
msg_printf(p, MSG_INFO, "work fetch resumed by user");
p->dont_request_more_work = false;
gstate.request_work_fetch("project work fetch resumed by user");
grc.mfout.printf("<success/>\n");
}
static void handle_project_detach_when_done(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
msg_printf(p, MSG_INFO, "detach when done set by user");
p->detach_when_done = true;
p->dont_request_more_work = true;
grc.mfout.printf("<success/>\n");
}
static void handle_project_dont_detach_when_done(GUI_RPC_CONN& grc) {
PROJECT* p = get_project_parse(grc);
if (!p) return;
gstate.set_client_state_dirty("Project modified by user");
msg_printf(p, MSG_INFO, "detach when done cleared by user");
p->detach_when_done = true;
p->dont_request_more_work = false;
grc.mfout.printf("<success/>\n");
}
static void handle_set_run_mode(GUI_RPC_CONN& grc) {
double duration = 0;
bool btemp;
int mode=-1;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_double("duration", duration)) continue;
if (grc.xp.parse_bool("always", btemp)) {
mode = RUN_MODE_ALWAYS;
continue;
}
if (grc.xp.parse_bool("never", btemp)) {
mode = RUN_MODE_NEVER;
continue;
}
if (grc.xp.parse_bool("auto", btemp)) {
mode = RUN_MODE_AUTO;
continue;
}
if (grc.xp.parse_bool("restore", btemp)) {
mode = RUN_MODE_RESTORE;
continue;
}
}
if (mode < 0) {
grc.mfout.printf("<error>Missing mode</error>\n");
return;
}
gstate.cpu_run_mode.set(mode, duration);
grc.mfout.printf("<success/>\n");
}
static void handle_set_gpu_mode(GUI_RPC_CONN& grc) {
double duration = 0;
bool btemp;
int mode=-1;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_double("duration", duration)) continue;
if (grc.xp.parse_bool("always", btemp)) {
mode = RUN_MODE_ALWAYS;
continue;
}
if (grc.xp.parse_bool("never", btemp)) {
mode = RUN_MODE_NEVER;
continue;
}
if (grc.xp.parse_bool("auto", btemp)) {
mode = RUN_MODE_AUTO;
continue;
}
if (grc.xp.parse_bool("restore", btemp)) {
mode = RUN_MODE_RESTORE;
continue;
}
}
if (mode < 0) {
grc.mfout.printf("<error>Missing mode</error>\n");
return;
}
gstate.gpu_run_mode.set(mode, duration);
gstate.request_schedule_cpus("GPU mode changed");
grc.mfout.printf("<success/>\n");
}
// On Android, get product name, OS name, OS version, and MAC addr from GUI,
//
static void handle_set_host_info(GUI_RPC_CONN& grc) {
while (!grc.xp.get_tag()) {
if (grc.xp.match_tag("host_info")) {
HOST_INFO hi;
int retval = hi.parse(grc.xp);
if (retval) {
grc.mfout.printf("<error>host_info parse error</error>\n");
return;
}
if (strlen(hi.product_name)) {
safe_strcpy(gstate.host_info.product_name, hi.product_name);
}
// this will always be "Android"
//
if (strlen(hi.os_name)) {
safe_strcpy(gstate.host_info.os_name, hi.os_name);
}
// We already have the Linux kernel version;
// append the Android version.
//
if (strlen(hi.os_version)) {
if (!strstr(gstate.host_info.os_version, "Android")) {
safe_strcat(gstate.host_info.os_version, " (Android ");
safe_strcat(gstate.host_info.os_version, hi.os_version);
safe_strcat(gstate.host_info.os_version, ")");
}
}
grc.mfout.printf("<success/>\n");
gstate.set_client_state_dirty("set_host_info RPC");
return;
}
}
grc.mfout.printf("<error>Missing host_info</error>\n");
}
static void handle_set_network_mode(GUI_RPC_CONN& grc) {
double duration = 0;
bool btemp;
int mode=-1;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_double("duration", duration)) continue;
if (grc.xp.parse_bool("always", btemp)) {
mode = RUN_MODE_ALWAYS;
continue;
}
if (grc.xp.parse_bool("never", btemp)) {
mode = RUN_MODE_NEVER;
continue;
}
if (grc.xp.parse_bool("auto", btemp)) {
mode = RUN_MODE_AUTO;
continue;
}
if (grc.xp.parse_bool("restore", btemp)) {
mode = RUN_MODE_RESTORE;
continue;
}
}
if (mode < 0) {
grc.mfout.printf("<error>Missing mode</error>\n");
return;
}
// user is turning network on/off explicitly,
// so disable the "5 minute grace period" mechanism
//
gstate.gui_rpcs.time_of_last_rpc_needing_network = 0;
gstate.network_run_mode.set(mode, duration);
grc.mfout.printf("<success/>\n");
}
static void handle_run_benchmarks(GUI_RPC_CONN& grc) {
gstate.start_cpu_benchmarks();
grc.mfout.printf("<success/>\n");
}
static void handle_set_proxy_settings(GUI_RPC_CONN& grc) {
gui_proxy_info.parse(grc.xp);
gstate.set_client_state_dirty("Set proxy settings RPC");
grc.mfout.printf("<success/>\n");
select_proxy_info();
// tell running apps to reread app_info file (for F@h)
//
gstate.active_tasks.request_reread_app_info();
}
static void handle_get_proxy_settings(GUI_RPC_CONN& grc) {
gui_proxy_info.write(grc.mfout);
}
// params:
// [ <seqno>n</seqno> ]
// return only msgs with seqno > n; if absent or zero, return all
//
static void handle_get_messages(GUI_RPC_CONN& grc) {
int seqno=0;
bool translatable = false;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_int("seqno", seqno)) continue;
if (grc.xp.parse_bool("translatable", translatable)) continue;
}
message_descs.write(seqno, grc.mfout, translatable);
}
static void handle_get_message_count(GUI_RPC_CONN& grc) {
grc.mfout.printf("<seqno>%d</seqno>\n", message_descs.highest_seqno());
}
// <retry_file_transfer>
// <project_url>XXX</project_url>
// <filename>XXX</filename>
// </retry_file_transfer>
//
static void handle_file_transfer_op(GUI_RPC_CONN& grc, const char* op) {
string project_url, filename;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_string("filename", filename)) continue;
if (grc.xp.parse_string("project_url", project_url)) continue;
}
PROJECT* p = get_project(grc, project_url);
if (!p) return;
if (filename.empty()) {
grc.mfout.printf("<error>Missing filename</error>\n");
return;
}
FILE_INFO* f = gstate.lookup_file_info(p, filename.c_str());
if (!f) {
grc.mfout.printf("<error>No such file</error>\n");
return;
}
PERS_FILE_XFER* pfx = f->pers_file_xfer;
if (!pfx) {
grc.mfout.printf("<error>No such transfer waiting</error>\n");
return;
}
if (!strcmp(op, "retry")) {
pfx->next_request_time = 0;
// leave file-level backoff mode
f->project->file_xfer_backoff(pfx->is_upload).file_xfer_succeeded();
// and leave project-level backoff mode
} else if (!strcmp(op, "abort")) {
f->pers_file_xfer->abort();
} else {
grc.mfout.printf("<error>unknown op</error>\n");
return;
}
gstate.set_client_state_dirty("File transfer RPC");
grc.mfout.printf("<success/>\n");
}
static void handle_retry_file_transfer(GUI_RPC_CONN& grc) {
handle_file_transfer_op(grc, "retry");
}
static void handle_abort_file_transfer(GUI_RPC_CONN& grc) {
handle_file_transfer_op(grc, "abort");
}
static void handle_result_op(GUI_RPC_CONN& grc, const char* op) {
RESULT* rp;
char result_name[256];
ACTIVE_TASK* atp;
string project_url;
safe_strcpy(result_name, "");
while (!grc.xp.get_tag()) {
if (grc.xp.parse_str("name", result_name, sizeof(result_name))) continue;
if (grc.xp.parse_string("project_url", project_url)) continue;
}
PROJECT* p = get_project(grc, project_url);
if (!p) return;
if (!strlen(result_name)) {
grc.mfout.printf("<error>Missing result name</error>\n");
return;
}
rp = gstate.lookup_result(p, result_name);
if (!rp) {
grc.mfout.printf("<error>no such result</error>\n");
return;
}
if (!strcmp(op, "abort")) {
msg_printf(p, MSG_INFO, "task %s aborted by user", result_name);
atp = gstate.lookup_active_task_by_result(rp);
if (atp) {
atp->abort_task(EXIT_ABORTED_VIA_GUI, "aborted by user");
} else {
rp->abort_inactive(EXIT_ABORTED_VIA_GUI);
}
gstate.request_work_fetch("result aborted by user");
} else if (!strcmp(op, "suspend")) {
msg_printf(p, MSG_INFO, "task %s suspended by user", result_name);
rp->suspended_via_gui = true;
gstate.request_work_fetch("task suspended by user");
} else if (!strcmp(op, "resume")) {
msg_printf(p, MSG_INFO, "task %s resumed by user", result_name);
rp->suspended_via_gui = false;
}
gstate.request_schedule_cpus("task suspended, resumed or aborted by user");
gstate.set_client_state_dirty("Result RPC");
grc.mfout.printf("<success/>\n");
}
static void handle_suspend_result(GUI_RPC_CONN& grc) {
handle_result_op(grc, "suspend");
}
static void handle_resume_result(GUI_RPC_CONN& grc) {
handle_result_op(grc, "resume");
}
static void handle_abort_result(GUI_RPC_CONN& grc) {
handle_result_op(grc, "abort");
}
static void handle_get_host_info(GUI_RPC_CONN& grc) {
gstate.host_info.write(grc.mfout, true, true);
}
static void handle_get_screensaver_tasks(GUI_RPC_CONN& grc) {
unsigned int i;
ACTIVE_TASK* atp;
grc.mfout.printf(
"<handle_get_screensaver_tasks>\n"
" <suspend_reason>%d</suspend_reason>\n",
gstate.suspend_reason
);
for (i=0; i<gstate.active_tasks.active_tasks.size(); i++) {
atp = gstate.active_tasks.active_tasks[i];
if (atp->scheduler_state == CPU_SCHED_SCHEDULED) {
atp->result->write_gui(grc.mfout);
}
}
grc.mfout.printf("</handle_get_screensaver_tasks>\n");
}
static void handle_quit(GUI_RPC_CONN& grc) {
gstate.requested_exit = true;
grc.mfout.printf("<success/>\n");
}
static void handle_acct_mgr_info(GUI_RPC_CONN& grc) {
grc.mfout.printf(
"<acct_mgr_info>\n"
" <acct_mgr_url>%s</acct_mgr_url>\n"
" <acct_mgr_name>%s</acct_mgr_name>\n",
gstate.acct_mgr_info.master_url,
gstate.acct_mgr_info.project_name
);
if (strlen(gstate.acct_mgr_info.login_name)) {
grc.mfout.printf(" <have_credentials/>\n");
}
if (gstate.acct_mgr_info.cookie_required) {
grc.mfout.printf(" <cookie_required/>\n");
grc.mfout.printf(
" <cookie_failure_url>%s</cookie_failure_url>\n",
gstate.acct_mgr_info.cookie_failure_url
);
}
grc.mfout.printf("</acct_mgr_info>\n");
}
static void handle_get_statistics(GUI_RPC_CONN& grc) {
grc.mfout.printf("<statistics>\n");
for (unsigned int i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
p->write_statistics(grc.mfout);
}
grc.mfout.printf("</statistics>\n");
}
static void handle_get_cc_status(GUI_RPC_CONN& grc) {
grc.mfout.printf(
"<cc_status>\n"
" <network_status>%d</network_status>\n"
" <ams_password_error>%d</ams_password_error>\n"
" <task_suspend_reason>%d</task_suspend_reason>\n"
" <task_mode>%d</task_mode>\n"
" <task_mode_perm>%d</task_mode_perm>\n"
" <task_mode_delay>%f</task_mode_delay>\n"
" <gpu_suspend_reason>%d</gpu_suspend_reason>\n"
" <gpu_mode>%d</gpu_mode>\n"
" <gpu_mode_perm>%d</gpu_mode_perm>\n"
" <gpu_mode_delay>%f</gpu_mode_delay>\n"
" <network_suspend_reason>%d</network_suspend_reason>\n"
" <network_mode>%d</network_mode>\n"
" <network_mode_perm>%d</network_mode_perm>\n"
" <network_mode_delay>%f</network_mode_delay>\n"
" <disallow_attach>%d</disallow_attach>\n"
" <simple_gui_only>%d</simple_gui_only>\n"
" <max_event_log_lines>%d</max_event_log_lines>\n",
net_status.network_status(),
gstate.acct_mgr_info.password_error?1:0,
gstate.suspend_reason,
gstate.cpu_run_mode.get_current(),
gstate.cpu_run_mode.get_perm(),
gstate.cpu_run_mode.delay(),
gpu_suspend_reason,
gstate.gpu_run_mode.get_current(),
gstate.gpu_run_mode.get_perm(),
gstate.gpu_run_mode.delay(),
gstate.network_suspend_reason,
gstate.network_run_mode.get_current(),
gstate.network_run_mode.get_perm(),
gstate.network_run_mode.delay(),
cc_config.disallow_attach?1:0,
cc_config.simple_gui_only?1:0,
cc_config.max_event_log_lines
);
if (grc.au_mgr_state == AU_MGR_QUIT_REQ) {
grc.mfout.printf(
" <manager_must_quit>1</manager_must_quit>\n"
);
grc.au_mgr_state = AU_MGR_QUIT_SENT;
}
grc.mfout.printf(
"</cc_status>\n"
);
}
static void handle_network_available(GUI_RPC_CONN& grc) {
net_status.network_available();
grc.mfout.printf("<success/>\n");
}
static void handle_get_project_init_status(GUI_RPC_CONN& grc) {
// If we're already attached to the project specified in the
// project init file, delete the file.
//
for (unsigned i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
if (!strcmp(p->master_url, gstate.project_init.url)) {
gstate.project_init.remove();
break;
}
}
grc.mfout.printf(
"<get_project_init_status>\n"
" <url>%s</url>\n"
" <name>%s</name>\n"
" <team_name>%s</team_name>\n"
" <setup_cookie>%s</setup_cookie>\n"
" %s\n"
" %s\n"
"</get_project_init_status>\n",
gstate.project_init.url,
gstate.project_init.name,
gstate.project_init.team_name,
gstate.project_init.setup_cookie,
strlen(gstate.project_init.account_key)?"<has_account_key/>":"",
gstate.project_init.embedded?"<embedded/>":""
);
}
void handle_get_project_config(GUI_RPC_CONN& grc) {
string url;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_string("url", url)) continue;
}
if (url.empty()) {
grc.mfout.printf("<error>no url</error>\n");
return;
}
canonicalize_master_url(url);
grc.get_project_config_op.do_rpc(url);
grc.mfout.printf("<success/>\n");
}
void handle_get_project_config_poll(GUI_RPC_CONN& grc) {
if (grc.get_project_config_op.error_num) {
grc.mfout.printf(
"<project_config>\n"
" <error_num>%d</error_num>\n"
"</project_config>\n",
grc.get_project_config_op.error_num
);
} else {
const char *p = grc.get_project_config_op.reply.c_str();
const char *q = strstr(p, "<project_config");
if (!q) q = "<project_config/>\n";
grc.mfout.printf("%s", q);
}
}
void handle_lookup_account(GUI_RPC_CONN& grc) {
ACCOUNT_IN ai;
MIOFILE in;
ai.parse(grc.xp);
if ((!ai.url.size() || !ai.email_addr.size() || !ai.passwd_hash.size()) && !ai.server_assigned_cookie) {
grc.mfout.printf("<error>missing URL, email address, or password</error>\n");
return;
}
grc.lookup_account_op.do_rpc(ai);
grc.mfout.printf("<success/>\n");
}
void handle_lookup_account_poll(GUI_RPC_CONN& grc) {
if (grc.lookup_account_op.error_num) {
grc.mfout.printf(
"<account_out>\n"
" <error_num>%d</error_num>\n"
"</account_out>\n",
grc.lookup_account_op.error_num
);
} else {
grc.mfout.printf("%s", grc.lookup_account_op.reply.c_str());
}
}
void handle_create_account(GUI_RPC_CONN& grc) {
ACCOUNT_IN ai;
ai.parse(grc.xp);
grc.create_account_op.do_rpc(ai);
grc.mfout.printf("<success/>\n");
}
void handle_create_account_poll(GUI_RPC_CONN& grc) {
if (grc.create_account_op.error_num) {
grc.mfout.printf(
"<account_out>\n"
" <error_num>%d</error_num>\n"
"</account_out>\n",
grc.create_account_op.error_num
);
} else {
grc.mfout.printf("%s", grc.create_account_op.reply.c_str());
}
}
static void handle_project_attach(GUI_RPC_CONN& grc) {
string url, authenticator, project_name;
bool use_config_file = false;
bool already_attached = false;
unsigned int i;
int retval;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_bool("use_config_file", use_config_file)) continue;
if (grc.xp.parse_string("project_url", url)) continue;
if (grc.xp.parse_string("authenticator", authenticator)) continue;
if (grc.xp.parse_string("project_name", project_name)) continue;
}
// Get URL/auth from project_init.xml?
//
if (use_config_file) {
if (!strlen(gstate.project_init.url)) {
grc.mfout.printf("<error>Missing URL</error>\n");
return;
}
if (!strlen(gstate.project_init.account_key)) {
grc.mfout.printf("<error>Missing authenticator</error>\n");
return;
}
url = gstate.project_init.url;
authenticator = gstate.project_init.account_key;
} else {
if (url.empty()) {
grc.mfout.printf("<error>Missing URL</error>\n");
return;
}
if (authenticator.empty()) {
grc.mfout.printf("<error>Missing authenticator</error>\n");
return;
}
}
for (i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
if (url == p->master_url) already_attached = true;
}
if (already_attached) {
grc.mfout.printf("<error>Already attached to project</error>\n");
return;
}
// clear messages from previous attach to project.
//
gstate.project_attach.messages.clear();
gstate.project_attach.error_num = gstate.add_project(
url.c_str(), authenticator.c_str(), project_name.c_str(), false
);
// if project_init.xml refers to this project,
// delete the file, otherwise we'll just
// reattach the next time the client starts
//
if (!strcmp(url.c_str(), gstate.project_init.url)) {
retval = gstate.project_init.remove();
if (retval) {
msg_printf(NULL, MSG_INTERNAL_ERROR,
"Can't delete project init file: %s", boincerror(retval)
);
}
}
grc.mfout.printf("<success/>\n");
}
static void handle_project_attach_poll(GUI_RPC_CONN& grc) {
unsigned int i;
grc.mfout.printf(
"<project_attach_reply>\n"
);
for (i=0; i<gstate.project_attach.messages.size(); i++) {
grc.mfout.printf(
" <message>%s</message>\n",
gstate.project_attach.messages[i].c_str()
);
}
grc.mfout.printf(
" <error_num>%d</error_num>\n",
gstate.project_attach.error_num
);
grc.mfout.printf(
"</project_attach_reply>\n"
);
}
static void handle_acct_mgr_rpc(GUI_RPC_CONN& grc) {
string url, name, password;
string password_hash, name_lc;
bool use_config_file = false;
bool bad_arg = false;
bool url_found=false, name_found=false, password_found = false;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_string("url", url)) {
url_found = true;
continue;
}
if (grc.xp.parse_string("name", name)) {
name_found = true;
continue;
}
if (grc.xp.parse_string("password", password)) {
password_found = true;
continue;
}
if (grc.xp.parse_bool("use_config_file", use_config_file)) continue;
}
if (!use_config_file) {
bad_arg = !url_found || !name_found || !password_found;
if (!bad_arg) {
name_lc = name;
downcase_string(name_lc);
if (!starts_with(password, "hash:")) {
password_hash = md5_string(password+name_lc);
} else {
// Remove 'hash:'
password_hash = password.substr(5);
}
}
} else {
if (!strlen(gstate.acct_mgr_info.master_url)) {
bad_arg = true;
msg_printf(NULL, MSG_INTERNAL_ERROR,
"Account manager info missing from config file"
);
} else {
url = gstate.acct_mgr_info.master_url;
name = gstate.acct_mgr_info.login_name;
password_hash = gstate.acct_mgr_info.password_hash;
}
}
if (bad_arg) {
grc.mfout.printf("<error>bad arg</error>\n");
} else {
gstate.acct_mgr_op.do_rpc(url, name, password_hash, true);
grc.mfout.printf("<success/>\n");
}
}
static void handle_acct_mgr_rpc_poll(GUI_RPC_CONN& grc) {
grc.mfout.printf(
"<acct_mgr_rpc_reply>\n"
);
if (gstate.acct_mgr_op.error_str.size()) {
grc.mfout.printf(
" <message>%s</message>\n",
gstate.acct_mgr_op.error_str.c_str()
);
}
grc.mfout.printf(
" <error_num>%d</error_num>\n",
gstate.acct_mgr_op.error_num
);
grc.mfout.printf(
"</acct_mgr_rpc_reply>\n"
);
}
static void handle_get_newer_version(GUI_RPC_CONN& grc) {
grc.mfout.printf(
"<newer_version>%s</newer_version>\n"
"<download_url>%s</download_url>\n",
gstate.newer_version.c_str(),
cc_config.client_download_url.c_str()
);
}
static void handle_get_global_prefs_file(GUI_RPC_CONN& grc) {
GLOBAL_PREFS p;
bool found;
int retval = p.parse_file(
GLOBAL_PREFS_FILE_NAME, gstate.main_host_venue, found
);
if (retval) {
grc.mfout.printf("<error>%d</error>\n", retval);
return;
}
p.write(grc.mfout);
}
static void handle_get_global_prefs_working(GUI_RPC_CONN& grc) {
gstate.global_prefs.write(grc.mfout);
}
static void handle_get_global_prefs_override(GUI_RPC_CONN& grc) {
string s;
int retval = read_file_string(GLOBAL_PREFS_OVERRIDE_FILE, s);
if (!retval) {
strip_whitespace(s);
grc.mfout.printf("%s\n", s.c_str());
} else {
grc.mfout.printf("<error>no prefs override file</error>\n");
}
}
static void handle_set_global_prefs_override(GUI_RPC_CONN& grc) {
int retval;
char buf[65536];
retval = grc.xp.element_contents("</set_global_prefs_override>", buf, sizeof(buf));
if (!retval) {
if (strlen(buf)) {
FILE* f = boinc_fopen(GLOBAL_PREFS_OVERRIDE_FILE, "w");
if (f) {
fprintf(f, "%s\n", buf);
fclose(f);
retval = 0;
} else {
retval = ERR_FOPEN;
}
} else {
retval = boinc_delete_file(GLOBAL_PREFS_OVERRIDE_FILE);
}
}
if (retval) {
grc.mfout.printf("<status>%d</status>\n", retval);
} else {
grc.mfout.printf("<success/>\n");
}
}
static void handle_get_cc_config(GUI_RPC_CONN& grc) {
string s;
int retval = read_file_string(CONFIG_FILE, s);
if (!retval) {
strip_whitespace(s);
grc.mfout.printf("%s\n", s.c_str());
}
}
static void read_all_projects_list_file(GUI_RPC_CONN& grc) {
string s;
int retval = read_file_string(ALL_PROJECTS_LIST_FILENAME, s);
if (!retval) {
strip_whitespace(s);
grc.mfout.printf("%s\n", s.c_str());
}
}
static void handle_get_state(GUI_RPC_CONN& grc) {
gstate.write_state_gui(grc.mfout);
}
static void handle_set_cc_config(GUI_RPC_CONN& grc) {
int retval;
char buf[65536];
retval = grc.xp.element_contents("</set_cc_config>", buf, sizeof(buf));
if (!retval) {
if (strlen(buf)) {
FILE* f = boinc_fopen(CONFIG_FILE, "w");
if (f) {
fprintf(f, "%s\n", buf);
fclose(f);
retval = 0;
} else {
retval = ERR_FOPEN;
}
} else {
retval = boinc_delete_file(CONFIG_FILE);
}
}
if (retval) {
grc.mfout.printf("<status>%d</status>\n", retval);
} else {
grc.mfout.printf("<success/>\n");
}
}
static void handle_get_notices(GUI_RPC_CONN& grc) {
int seqno = 0;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_int("seqno", seqno)) continue;
}
notices.write(seqno, grc, false);
}
static void handle_get_notices_public(GUI_RPC_CONN& grc) {
int seqno = 0;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_int("seqno", seqno)) continue;
}
notices.write(seqno, grc, true);
}
static void handle_get_old_results(GUI_RPC_CONN& grc) {
print_old_results(grc.mfout);
}
static void handle_get_results(GUI_RPC_CONN& grc) {
bool active_only = false;
while (!grc.xp.get_tag()) {
if (grc.xp.parse_bool("active_only", active_only)) continue;
}
grc.mfout.printf("<results>\n");
gstate.write_tasks_gui(grc.mfout, active_only);
grc.mfout.printf("</results>\n");
}
static void handle_get_all_projects_list(GUI_RPC_CONN& grc) {
read_all_projects_list_file(grc);
}
static void handle_get_file_transfers(GUI_RPC_CONN& grc) {
gstate.write_file_transfers_gui(grc.mfout);
}
static void handle_read_global_prefs_override(GUI_RPC_CONN& grc) {
grc.mfout.printf("<success/>\n");
gstate.read_global_prefs();
gstate.request_schedule_cpus("Preferences override");
gstate.request_work_fetch("Preferences override");
}
static void handle_read_cc_config(GUI_RPC_CONN& grc) {
grc.mfout.printf("<success/>\n");
read_config_file(false);
cc_config.show();
log_flags.show();
gstate.set_ncpus();
process_gpu_exclusions();
// also reread app_config.xml files
//
check_app_config();
gstate.request_schedule_cpus("Core client configuration");
gstate.request_work_fetch("Core client configuration");
set_no_rsc_config();
}
static void handle_get_daily_xfer_history(GUI_RPC_CONN& grc) {
daily_xfer_history.write_xml(grc.mfout);
}
static bool complete_post_request(char* buf) {
if (strncmp(buf, "POST", 4)) return false;
char* p = strstr(buf, "Content-Length: ");
if (!p) return false;
p += strlen("Content-Length: ");
int n = atoi(p);
p = strstr(p, "\r\n\r\n");
if (!p) return false;
p += 4;
if ((int)strlen(p) < n) return false;
return true;
}
static void handle_set_language(GUI_RPC_CONN& grc) {
while (!grc.xp.get_tag()) {
if (grc.xp.parse_str("language", gstate.language, sizeof(gstate.language))) {
gstate.set_client_state_dirty("set_language");
grc.mfout.printf("<success/>\n");
return;
}
}
grc.mfout.printf("<error>no language found</error>\n");
}
static void handle_report_device_status(GUI_RPC_CONN& grc) {
DEVICE_STATUS d;
while (!grc.xp.get_tag()) {
if (grc.xp.match_tag("device_status")) {
int retval = d.parse(grc.xp);
if (log_flags.android_debug) {
if (retval) {
msg_printf(0, MSG_INFO,
"report_device_status RPC parse failed: %d", retval
);
} else {
msg_printf(0, MSG_INFO,
"Android device status:"
);
msg_printf(0, MSG_INFO,
"On AC: %s; on USB: %s; on WiFi: %s; user active: %s",
d.on_ac_power?"yes":"no",
d.on_usb_power?"yes":"no",
d.wifi_online?"yes":"no",
d.user_active?"yes":"no"
);
msg_printf(0, MSG_INFO,
"Battery: charge pct: %f; temp %f state %s",
d.battery_charge_pct,
d.battery_temperature_celsius,
battery_state_string(d.battery_state)
);
}
}
if (!retval) {
// if the GUI reported a device name, use it
//
if (strlen(d.device_name)) {
if (strcmp(d.device_name, gstate.host_info.domain_name)) {
safe_strcpy(gstate.host_info.domain_name, d.device_name);
gstate.set_client_state_dirty("Device name changed");
}
}
gstate.device_status = d;
gstate.device_status_time = gstate.now;
grc.mfout.printf("<success/>\n");
return;
}
}
}
grc.mfout.printf("<error/>\n");
}
int DEVICE_STATUS::parse(XML_PARSER& xp) {
while (!xp.get_tag()) {
if (xp.match_tag("/device_status")) {
return 0;
}
if (xp.parse_bool("on_ac_power", on_ac_power)) continue;
if (xp.parse_bool("on_usb_power", on_usb_power)) continue;
if (xp.parse_double("battery_charge_pct", battery_charge_pct)) continue;
if (xp.parse_int("battery_state", battery_state)) continue;
if (xp.parse_double("battery_temperature_celsius", battery_temperature_celsius)) continue;
if (xp.parse_bool("wifi_online", wifi_online)) continue;
if (xp.parse_bool("user_active", user_active)) continue;
if (xp.parse_str("device_name", device_name, sizeof(device_name))) continue;
}
return ERR_XML_PARSE;
}
// Some of the RPCs have empty-element request messages.
// We accept both <foo/> and <foo></foo>
//
#define match_req(buf, tag) (match_tag(buf, "<" tag ">") || match_tag(buf, "<" tag "/>"))
typedef void (*GUI_RPC_HANDLER)(GUI_RPC_CONN&);
struct GUI_RPC {
const char* req_tag;
char alt_req_tag[256];
GUI_RPC_HANDLER handler;
bool auth_required;
// operations that require authentication only for non-local clients.
// Use this only for information that should be available to people
// sharing this computer (e.g. what jobs are running)
// but not for anything sensitive (passwords etc.)
bool enable_network;
// RPCs that should enable network communication for 5 minutes,
// overriding other factors.
// Things like attaching projects, etc.
bool read_only; // doesn't modify the client's data structs
GUI_RPC(const char* req, GUI_RPC_HANDLER h, bool ar, bool en, bool ro) {
req_tag = req;
safe_strcpy(alt_req_tag, req);
safe_strcat(alt_req_tag, "/");
handler = h;
auth_required = ar;
enable_network = en;
read_only = ro;
}
};
// local auth required
// enable network
// read-only
GUI_RPC gui_rpcs[] = {
GUI_RPC("exchange_versions", handle_exchange_versions, false, false, true),
GUI_RPC("get_all_projects_list", handle_get_all_projects_list, false, false, true),
GUI_RPC("get_cc_status", handle_get_cc_status, false, false, true),
GUI_RPC("get_disk_usage", handle_get_disk_usage, false, false, true),
GUI_RPC("get_daily_xfer_history", handle_get_daily_xfer_history,
false, false, true),
GUI_RPC("get_file_transfers", handle_get_file_transfers, false, false, true),
GUI_RPC("get_host_info", handle_get_host_info, false, false, true),
GUI_RPC("get_messages", handle_get_messages, false, false, true),
GUI_RPC("get_message_count", handle_get_message_count, false, false, true),
GUI_RPC("get_newer_version", handle_get_newer_version, false, false, true),
GUI_RPC("get_notices_public", handle_get_notices_public, false, false, true),
GUI_RPC("get_old_results", handle_get_old_results, false, false, true),
GUI_RPC("get_project_status", handle_get_project_status, false, false, true),
GUI_RPC("get_results", handle_get_results, false, false, true),
GUI_RPC("get_screensaver_tasks", handle_get_screensaver_tasks, false, false, true),
GUI_RPC("get_simple_gui_info", handle_get_simple_gui_info, false, false, true),
GUI_RPC("get_state", handle_get_state, false, false, true),
GUI_RPC("get_statistics", handle_get_statistics, false, false, true),
// ops requiring local auth start here
GUI_RPC("abort_file_transfer", handle_abort_file_transfer, true, false, false),
GUI_RPC("abort_result", handle_abort_result, true, false, false),
GUI_RPC("acct_mgr_info", handle_acct_mgr_info, true, false, true),
GUI_RPC("get_cc_config", handle_get_cc_config, true, false, false),
GUI_RPC("get_global_prefs_file", handle_get_global_prefs_file, true, false, false),
GUI_RPC("get_global_prefs_override", handle_get_global_prefs_override,
true, false, false),
GUI_RPC("get_global_prefs_working", handle_get_global_prefs_working,
true, false, false),
GUI_RPC("get_notices", handle_get_notices, true, false, true),
GUI_RPC("get_project_init_status", handle_get_project_init_status,
true, false, false),
GUI_RPC("get_proxy_settings", handle_get_proxy_settings, true, false, true),
GUI_RPC("network_available", handle_network_available, true, false, false),
GUI_RPC("project_allowmorework", handle_project_allowmorework, true, false, false),
GUI_RPC("project_detach", handle_project_detach, true, false, false),
GUI_RPC("project_detach_when_done", handle_project_detach_when_done,
true, false, false),
GUI_RPC("project_dont_detach_when_done", handle_project_dont_detach_when_done,
true, false, false),
GUI_RPC("project_nomorework", handle_project_nomorework, true, false, false),
GUI_RPC("project_resume", handle_project_resume, true, false, false),
GUI_RPC("project_suspend", handle_project_suspend, true, false, false),
GUI_RPC("quit", handle_quit, true, false, false),
GUI_RPC("read_cc_config", handle_read_cc_config, true, false, false),
GUI_RPC("read_global_prefs_override", handle_read_global_prefs_override,
true, false, false),
GUI_RPC("report_device_status", handle_report_device_status, true, false, false),
GUI_RPC("resume_result", handle_resume_result, true, false, false),
GUI_RPC("run_benchmarks", handle_run_benchmarks, true, false, false),
GUI_RPC("set_cc_config", handle_set_cc_config, true, false, false),
GUI_RPC("set_global_prefs_override", handle_set_global_prefs_override,
true, false, false),
GUI_RPC("set_gpu_mode", handle_set_gpu_mode, true, false, false),
GUI_RPC("set_host_info", handle_set_host_info, true, false, false),
GUI_RPC("set_language", handle_set_language, true, false, false),
GUI_RPC("set_network_mode", handle_set_network_mode, true, false, false),
GUI_RPC("set_proxy_settings", handle_set_proxy_settings, true, false, false),
GUI_RPC("set_run_mode", handle_set_run_mode, true, false, false),
GUI_RPC("suspend_result", handle_suspend_result, true, false, false),
// ops requiring temporary network access start here
GUI_RPC("acct_mgr_rpc", handle_acct_mgr_rpc, true, true, false),
GUI_RPC("acct_mgr_rpc_poll", handle_acct_mgr_rpc_poll, true, true, false),
GUI_RPC("create_account", handle_create_account, true, true, false),
GUI_RPC("create_account_poll", handle_create_account_poll, true, true, false),
GUI_RPC("get_project_config", handle_get_project_config, true, true, false),
GUI_RPC("get_project_config_poll", handle_get_project_config_poll,
true, true, false),
GUI_RPC("lookup_account", handle_lookup_account, true, true, false),
GUI_RPC("lookup_account_poll", handle_lookup_account_poll, true, true, false),
GUI_RPC("project_attach", handle_project_attach, true, true, false),
GUI_RPC("project_attach_poll", handle_project_attach_poll, true, true, false),
GUI_RPC("project_reset", handle_project_reset, true, true, false),
GUI_RPC("project_update", handle_project_update, true, true, false),
GUI_RPC("retry_file_transfer", handle_retry_file_transfer, true, true, false),
};
// return nonzero only if we need to close the connection
//
static int handle_rpc_aux(GUI_RPC_CONN& grc) {
int retval = 0;
grc.mfin.init_buf_read(grc.request_msg);
if (grc.xp.get_tag()) return ERR_XML_PARSE; // parse <boinc_gui_rpc_request>
if (grc.xp.get_tag()) return ERR_XML_PARSE; // parse the request tag
for (unsigned int i=0; i<sizeof(gui_rpcs)/sizeof(GUI_RPC); i++) {
GUI_RPC& gr = gui_rpcs[i];
if (!grc.xp.match_tag(gr.req_tag) && !grc.xp.match_tag(gr.alt_req_tag)) {
continue;
}
if (gr.auth_required && grc.auth_needed) {
auth_failure(grc.mfout);
if (grc.sent_unauthorized) {
retval = ERR_AUTHENTICATOR;
}
grc.sent_unauthorized = true;
return retval;
}
if (gr.enable_network) {
gstate.gui_rpcs.time_of_last_rpc_needing_network = gstate.now;
}
(*gr.handler)(grc);
return 0;
}
grc.mfout.printf("<error>unrecognized op: %s</error>\n", grc.xp.parsed_tag);
return 0;
}
// return nonzero only if we need to close the connection
//
int GUI_RPC_CONN::handle_rpc() {
int n, retval=0;
char* p;
int left = GUI_RPC_REQ_MSG_SIZE - request_nbytes;
#ifdef _WIN32
n = recv(sock, request_msg+request_nbytes, left, 0);
#else
n = read(sock, request_msg+request_nbytes, left);
#endif
if (n <= 0) {
request_nbytes = 0;
return ERR_READ;
}
request_nbytes += n;
// buffer full?
if (request_nbytes >= GUI_RPC_REQ_MSG_SIZE) {
request_nbytes = 0;
return ERR_READ;
}
request_msg[request_nbytes] = 0;
if (!strncmp(request_msg, "OPTIONS", 7)) {
char buf[1024];
snprintf(buf, sizeof(buf),
"HTTP/1.1 200 OK\n"
"Server: BOINC client\n"
"Access-Control-Allow-Origin: *\n"
"Access-Control-Allow-Methods: POST, GET, OPTIONS\n"
"Content-Length: 0\n"
"Keep-Alive: timeout=2, max=100\n"
"Connection: Keep-Alive\n"
"Content-Type: text/plain\n\n"
);
send(sock, buf, (int)strlen(buf), 0);
request_nbytes = 0;
if (log_flags.gui_rpc_debug) {
msg_printf(0, MSG_INFO,
"[gui_rpc] processed OPTIONS"
);
}
return 0;
}
bool http_request;
if (complete_post_request(request_msg)) {
http_request = true;
} else {
p = strchr(request_msg, 3);
if (p) {
*p = 0;
http_request = false;
} else {
if (log_flags.gui_rpc_debug) {
msg_printf(0, MSG_INFO,
"[gui_rpc] partial GUI RPC Command = '%s'\n", request_msg
);
}
return 0;
}
}
request_nbytes = 0;
if (log_flags.gui_rpc_debug) {
msg_printf(0, MSG_INFO,
"[gui_rpc] GUI RPC Command = '%s'\n", request_msg
);
}
// Policy:
// - the first auth failure gets an error message; after that, disconnect
// - if we get an unexpected auth1 or auth2, disconnect
mfout.printf("<boinc_gui_rpc_reply>\n");
if (match_req(request_msg, "auth1")) {
if (got_auth1 && auth_needed) {
retval = ERR_AUTHENTICATOR;
} else {
handle_auth1(mfout);
got_auth1 = true;
}
} else if (match_req(request_msg, "auth2")) {
if ((!got_auth1 || got_auth2) && auth_needed) {
retval = ERR_AUTHENTICATOR;
} else {
retval = handle_auth2(request_msg, mfout);
got_auth2 = true;
}
} else if (auth_needed && !is_local) {
auth_failure(mfout);
if (sent_unauthorized) {
retval = ERR_AUTHENTICATOR;
}
sent_unauthorized = true;
} else {
retval = handle_rpc_aux(*this);
}
mfout.printf("</boinc_gui_rpc_reply>\n\003");
mout.get_buf(p, n);
if (http_request) {
char buf[1024];
snprintf(buf, sizeof(buf),
"HTTP/1.1 200 OK\n"
"Date: Fri, 31 Dec 1999 23:59:59 GMT\n"
"Server: BOINC client\n"
"Connection: close\n"
"Content-Type: text/xml; charset=utf-8\n"
"Content-Length: %d\n\n"
"<?xml version=\"1.0\" encoding=\"ISO-8859-1\" ?>\n",
n
);
send(sock, buf, (int)strlen(buf), 0);
}
if (p) {
send(sock, p, n, 0);
p[n-1]=0; // replace 003 with NULL
if (log_flags.gui_rpc_debug) {
if (n > 128) p[128] = 0;
msg_printf(0, MSG_INFO,
"[gui_rpc] GUI RPC reply: '%s'\n", p
);
}
free(p);
}
return retval;
}
| 1 | 8,603 | The logic now is to look for `<account_out` if that is present return the string starting from there. If it is not present look for `<error` and return the string from there (which can produce malformed XML because you do not check if the closing tag matches). If this is also not found return an empty `<account_out/>`. This `<error>` output is also not consistent with the other two functions you mentioned. Do the other two RPCs not report an error? | BOINC-boinc | php |
@@ -0,0 +1,12 @@
+class Api::V1::VideosController < ApiController
+ def index
+ show = find_show
+ render json: { videos: show.videos }
+ end
+
+ private
+
+ def find_show
+ Show.friendly.find(params[:show_id])
+ end
+end | 1 | 1 | 18,394 | Use nested module/class definitions instead of compact style. | thoughtbot-upcase | rb |
|
@@ -56,6 +56,7 @@ var individualTestTimeout = 10 * time.Second
func kbfsOpsInit(t *testing.T, changeMd bool) (mockCtrl *gomock.Controller,
config *ConfigMock, ctx context.Context, cancel context.CancelFunc) {
+ t.Log("Init KBFSOps")
ctr := NewSafeTestReporter(t)
mockCtrl = gomock.NewController(ctr)
config = NewConfigMock(mockCtrl, ctr) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"fmt"
"math/rand"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-codec/codec"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfshash"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
type CheckBlockOps struct {
BlockOps
tr gomock.TestReporter
}
var _ BlockOps = (*CheckBlockOps)(nil)
func (cbo *CheckBlockOps) Ready(ctx context.Context, kmd KeyMetadata,
block Block) (id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData,
err error) {
id, plainSize, readyBlockData, err = cbo.BlockOps.Ready(ctx, kmd, block)
encodedSize := readyBlockData.GetEncodedSize()
if plainSize > encodedSize {
cbo.tr.Errorf("expected plainSize <= encodedSize, got plainSize = %d, "+
"encodedSize = %d", plainSize, encodedSize)
}
return
}
type tCtxIDType int
const (
tCtxID tCtxIDType = iota
)
// Time out individual tests after 10 seconds.
var individualTestTimeout = 10 * time.Second
func kbfsOpsInit(t *testing.T, changeMd bool) (mockCtrl *gomock.Controller,
config *ConfigMock, ctx context.Context, cancel context.CancelFunc) {
ctr := NewSafeTestReporter(t)
mockCtrl = gomock.NewController(ctr)
config = NewConfigMock(mockCtrl, ctr)
config.SetCodec(kbfscodec.NewMsgpack())
blockops := &CheckBlockOps{config.mockBops, ctr}
config.SetBlockOps(blockops)
kbfsops := NewKBFSOpsStandard(config)
config.SetKBFSOps(kbfsops)
config.SetNotifier(kbfsops)
// Use real caches, to avoid the overhead of tracking cache calls.
// Each test is expected to check the cache for correctness at the
// end of the test.
config.SetBlockCache(NewBlockCacheStandard(100, 1<<30))
config.SetDirtyBlockCache(NewDirtyBlockCacheStandard(wallClock{},
config.MakeLogger(""), 5<<20, 10<<20, 5<<20))
config.mockBcache = nil
config.mockDirtyBcache = nil
if changeMd {
// Give different values for the MD Id so we can test that it
// is properly cached
config.mockCrypto.EXPECT().MakeMdID(gomock.Any()).AnyTimes().
Return(fakeMdID(2), nil)
} else {
config.mockCrypto.EXPECT().MakeMdID(gomock.Any()).AnyTimes().
Return(fakeMdID(1), nil)
}
// These tests don't rely on external notifications at all, so ignore any
// goroutine attempting to register:
c := make(chan error, 1)
config.mockMdserv.EXPECT().RegisterForUpdate(gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return(c, nil)
config.mockMdserv.EXPECT().OffsetFromServerTime().
Return(time.Duration(0), true).AnyTimes()
// None of these tests depend on time
config.mockClock.EXPECT().Now().AnyTimes().Return(time.Now())
// Ignore Notify calls for now
config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes()
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore key bundle ID creation calls for now
config.mockCrypto.EXPECT().MakeTLFWriterKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFWriterKeyBundleID{}, nil)
config.mockCrypto.EXPECT().MakeTLFReaderKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFReaderKeyBundleID{}, nil)
// Ignore favorites
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).AnyTimes().
Return(nil, nil)
config.mockKbpki.EXPECT().FavoriteAdd(gomock.Any(), gomock.Any()).
AnyTimes().Return(nil)
interposeDaemonKBPKI(config, "alice", "bob", "charlie")
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
initSuccess := false
defer func() {
if !initSuccess {
cancel()
}
}()
// make the context identifiable, to verify that it is passed
// correctly to the observer
id := rand.Int()
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(ctx context.Context) context.Context {
return context.WithValue(ctx, tCtxID, id)
}))
if err != nil {
t.Fatal(err)
}
initSuccess = true
return mockCtrl, config, ctx, cancel
}
func kbfsTestShutdown(mockCtrl *gomock.Controller, config *ConfigMock,
ctx context.Context, cancel context.CancelFunc) {
config.ctr.CheckForFailures()
config.KBFSOps().(*KBFSOpsStandard).Shutdown(ctx)
if config.mockDirtyBcache == nil {
if err := config.DirtyBlockCache().Shutdown(); err != nil {
// Ignore error; some tests intentionally leave around dirty data.
}
}
cancel()
if err := CleanupCancellationDelayer(ctx); err != nil {
panic(err)
}
mockCtrl.Finish()
}
// kbfsOpsInitNoMocks returns a config that doesn't use any mocks. The
// shutdown call is kbfsTestShutdownNoMocks.
func kbfsOpsInitNoMocks(t *testing.T, users ...libkb.NormalizedUsername) (
*ConfigLocal, keybase1.UID, context.Context, context.CancelFunc) {
config := MakeTestConfigOrBust(t, users...)
config.SetRekeyWithPromptWaitTime(individualTestTimeout)
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
initSuccess := false
defer func() {
if !initSuccess {
cancel()
}
}()
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(c context.Context) context.Context {
return c
}))
if err != nil {
t.Fatal(err)
}
_, currentUID, err := config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
t.Fatal(err)
}
initSuccess = true
return config, currentUID, ctx, cancel
}
func kbfsTestShutdownNoMocks(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
CheckConfigAndShutdown(ctx, t, config)
cancel()
CleanupCancellationDelayer(ctx)
}
// TODO: Get rid of all users of this.
func kbfsTestShutdownNoMocksNoCheck(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
config.Shutdown(ctx)
cancel()
CleanupCancellationDelayer(ctx)
}
func checkBlockCache(t *testing.T, config *ConfigMock, id tlf.ID,
expectedCleanBlocks []kbfsblock.ID,
expectedDirtyBlocks map[BlockPointer]BranchName) {
bcache := config.BlockCache().(*BlockCacheStandard)
// make sure the LRU consists of exactly the right set of clean blocks
for _, id := range expectedCleanBlocks {
_, ok := bcache.cleanTransient.Get(id)
if !ok {
t.Errorf("BlockCache missing clean block %v at the end of the test",
id)
}
}
if bcache.cleanTransient.Len() != len(expectedCleanBlocks) {
t.Errorf("BlockCache has extra clean blocks at end of test")
}
// make sure the dirty cache consists of exactly the right set of
// dirty blocks
dirtyBcache := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
for ptr, branch := range expectedDirtyBlocks {
_, err := dirtyBcache.Get(id, ptr, branch)
if err != nil {
t.Errorf("BlockCache missing dirty block %v, branch %s at "+
"the end of the test: err %+v", ptr, branch, err)
}
if !dirtyBcache.IsDirty(id, ptr, branch) {
t.Errorf("BlockCache has incorrectly clean block %v, branch %s at "+
"the end of the test: err %+v", ptr, branch, err)
}
}
if len(dirtyBcache.cache) != len(expectedDirtyBlocks) {
t.Errorf("BlockCache has extra dirty blocks at end of test")
}
}
func TestKBFSOpsGetFavoritesSuccess(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
handle1 := parseTlfHandleOrBust(t, config, "alice", false)
handle2 := parseTlfHandleOrBust(t, config, "alice,bob", false)
// dup for testing
handles := []*TlfHandle{handle1, handle2, handle2}
for _, h := range handles {
config.KeybaseService().FavoriteAdd(
context.Background(), h.ToFavorite().toKBFolder(false))
}
// The favorites list contains our own public dir by default, even
// if KBPKI doesn't return it.
handle3 := parseTlfHandleOrBust(t, config, "alice", true)
handles = append(handles, handle3)
handles2, err := config.KBFSOps().GetFavorites(ctx)
if err != nil {
t.Errorf("Got error on favorites: %+v", err)
}
if len(handles2) != len(handles)-1 {
t.Errorf("Got bad handles back: %v", handles2)
}
}
func TestKBFSOpsGetFavoritesFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
err := errors.New("Fake fail")
// Replace the old one (added in init function)
config.mockKbpki = NewMockKBPKI(mockCtrl)
config.SetKBPKI(config.mockKbpki)
// expect one call to favorites, and fail it
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).Return(nil, err)
if _, err2 := config.KBFSOps().GetFavorites(ctx); err2 != err {
t.Errorf("Got bad error on favorites: %+v", err2)
}
}
func getOps(config Config, id tlf.ID) *folderBranchOps {
return config.KBFSOps().(*KBFSOpsStandard).
getOpsNoAdd(FolderBranch{id, MasterBranch})
}
// createNewRMD creates a new RMD for the given name. Returns its ID
// and handle also.
func createNewRMD(t *testing.T, config Config, name string, public bool) (
tlf.ID, *TlfHandle, *RootMetadata) {
id := tlf.FakeID(1, public)
h := parseTlfHandleOrBust(t, config, name, public)
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
return id, h, rmd
}
func makeImmutableRMDForTest(t *testing.T, config Config, rmd *RootMetadata,
mdID MdID) ImmutableRootMetadata {
key, err := config.KBPKI().GetCurrentVerifyingKey(context.Background())
require.NoError(t, err)
// We have to fake out the signature here because most tests
// in this file modify the returned value, invalidating any
// real signatures. TODO: Fix all the tests in this file to
// not do so, and then just use MakeImmutableRootMetadata.
if brmdv2, ok := rmd.bareMd.(*BareRootMetadataV2); ok {
vk := brmdv2.WriterMetadataSigInfo.VerifyingKey
require.True(t, vk == (kbfscrypto.VerifyingKey{}) || vk == key,
"Writer signature %s with unexpected non-nil verifying key != %s",
brmdv2.WriterMetadataSigInfo, key)
brmdv2.WriterMetadataSigInfo = kbfscrypto.SignatureInfo{
VerifyingKey: key,
}
}
return MakeImmutableRootMetadata(rmd, key, mdID, time.Now())
}
// injectNewRMD creates a new RMD and makes sure the existing ops for
// its ID has as its head that RMD.
func injectNewRMD(t *testing.T, config *ConfigMock) (
keybase1.UID, tlf.ID, *RootMetadata) {
id, h, rmd := createNewRMD(t, config, "alice", false)
var keyGen KeyGen
if id.IsPublic() {
keyGen = PublicKeyGen
} else {
keyGen = 1
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
KeyGen: keyGen,
DataVer: 1,
},
EncodedSize: 1,
},
}
rmd.fakeInitialRekey(config.Codec(), config.Crypto())
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(
t, config, rmd, fakeMdID(tlf.FakeIDByte(id)))
rmd.SetSerializedPrivateMetadata(make([]byte, 1))
config.Notifier().RegisterForChanges(
[]FolderBranch{{id, MasterBranch}}, config.observer)
uid := h.FirstResolvedWriter()
rmd.data.Dir.Creator = uid
return uid, id, rmd
}
func TestKBFSOpsGetRootNodeCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
func TestKBFSOpsGetRootNodeReIdentify(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
// Mark everything for reidentifying, and wait for it to finish
// before checking.
kop := config.KBFSOps().(*KBFSOpsStandard)
returnCh := make(chan struct{})
kop.reIdentifyControlChan <- returnCh
<-returnCh
assert.False(t, fboIdentityDone(ops))
// Trigger new identify.
lState = makeFBOLockState()
_, err = ops.getMDLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
// fboIdentityDone is needed to avoid data races.
func fboIdentityDone(fbo *folderBranchOps) bool {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
return fbo.identifyDone
}
type failIdentifyKBPKI struct {
KBPKI
identifyErr error
}
func (kbpki failIdentifyKBPKI) Identify(ctx context.Context, assertion, reason string) (UserInfo, error) {
return UserInfo{}, kbpki.identifyErr
}
func TestKBFSOpsGetRootNodeCacheIdentifyFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
expectedErr := errors.New("Identify failure")
config.SetKBPKI(failIdentifyKBPKI{config.KBPKI(), expectedErr})
// Trigger identify.
lState := makeFBOLockState()
_, err := ops.getMDLocked(ctx, lState, mdReadNeedIdentify)
assert.Equal(t, expectedErr, err)
assert.False(t, fboIdentityDone(ops))
}
func expectBlock(config *ConfigMock, kmd KeyMetadata, blockPtr BlockPointer, block Block, err error) {
config.mockBops.EXPECT().Get(gomock.Any(), kmdMatcher{kmd},
ptrMatcher{blockPtr}, gomock.Any(), gomock.Any()).
Do(func(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer, getBlock Block, lifetime BlockCacheLifetime) {
switch v := getBlock.(type) {
case *FileBlock:
*v = *block.(*FileBlock)
case *DirBlock:
*v = *block.(*DirBlock)
}
config.BlockCache().Put(blockPtr, kmd.TlfID(), getBlock, lifetime)
}).Return(err)
}
// ptrMatcher implements the gomock.Matcher interface to compare
// BlockPointer objects. We don't care about some of the fields in a
// pointer for the purposes of these tests.
type ptrMatcher struct {
ptr BlockPointer
}
// Matches implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) Matches(x interface{}) bool {
xPtr, ok := x.(BlockPointer)
if !ok {
return false
}
return (xPtr.ID == p.ptr.ID && xPtr.RefNonce == p.ptr.RefNonce)
}
// String implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) String() string {
return fmt.Sprintf("Matches BlockPointer %v", p.ptr)
}
func fillInNewMD(t *testing.T, config *ConfigMock, rmd *RootMetadata) {
if !rmd.TlfID().IsPublic() {
rmd.fakeInitialRekey(config.Codec(), config.Crypto())
}
rootPtr := BlockPointer{
ID: kbfsblock.FakeID(42),
KeyGen: 1,
DataVer: 1,
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: rootPtr,
EncodedSize: 5,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 3,
},
}
return
}
func testKBFSOpsGetRootNodeCreateNewSuccess(t *testing.T, public bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", public)
fillInNewMD(t, config, rmd)
// create a new MD
config.mockMdops.EXPECT().GetUnmergedForTLF(
gomock.Any(), id, gomock.Any()).Return(ImmutableRootMetadata{}, nil)
irmd := makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
config.mockMdops.EXPECT().GetForTLF(gomock.Any(), id).Return(irmd, nil)
config.mockMdcache.EXPECT().Put(irmd).Return(nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
require.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
require.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
require.Equal(t, rmd.data.Dir.EntryInfo, ei)
require.Equal(t, rmd.GetTlfHandle(), h)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPublic(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, true)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPrivate(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, false)
}
func TestKBFSOpsGetRootMDForHandleExisting(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
ID: kbfsblock.FakeID(1),
},
EncodedSize: 15,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 10,
Mtime: 1,
Ctime: 2,
},
}
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Unmerged).Return(
tlf.ID{}, ImmutableRootMetadata{}, nil)
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Merged).Return(
tlf.ID{}, makeImmutableRMDForTest(t, config, rmd, fakeMdID(1)), nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(2))
config.mockBops.EXPECT().Prefetcher().Return(newBlockPrefetcher(nil))
n, ei, err :=
config.KBFSOps().GetOrCreateRootNode(ctx, h, MasterBranch)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
if p.Tlf != id {
t.Errorf("Got bad dir id back: %v", p.Tlf)
} else if len(p.path) != 1 {
t.Errorf("Got bad MD back: path size %d", len(p.path))
} else if p.path[0].ID != rmd.data.Dir.ID {
t.Errorf("Got bad MD back: root ID %v", p.path[0].ID)
} else if ei.Type != Dir {
t.Error("Got bad MD non-dir rootID back")
} else if ei.Size != 10 {
t.Errorf("Got bad MD Size back: %d", ei.Size)
} else if ei.Mtime != 1 {
t.Errorf("Got bad MD MTime back: %d", ei.Mtime)
} else if ei.Ctime != 2 {
t.Errorf("Got bad MD CTime back: %d", ei.Ctime)
}
}
// rmd should really be a ReadOnlyRootMetadata or *BareRootMetadata in
// the helper functions below, but all the callers would have to go
// md.ReadOnly(), which doesn't buy us much in tests.
func makeBP(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID) BlockPointer {
return BlockPointer{
ID: id,
KeyGen: kmd.LatestKeyGeneration(),
DataVer: DefaultNewBlockDataVersion(false),
Context: kbfsblock.Context{
Creator: u,
// Refnonces not needed; explicit refnonce
// testing happens elsewhere.
},
}
}
func makeBI(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32) BlockInfo {
return BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
}
}
func makeIFP(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32, off int64) IndirectFilePtr {
return IndirectFilePtr{
BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
},
off,
false,
codec.UnknownFieldSetHandler{},
}
}
func makeBIFromID(id kbfsblock.ID, user keybase1.UID) BlockInfo {
return BlockInfo{
BlockPointer: BlockPointer{
ID: id, KeyGen: 1, DataVer: 1,
Context: kbfsblock.Context{
Creator: user,
},
},
EncodedSize: 1,
}
}
func nodeFromPath(t *testing.T, ops *folderBranchOps, p path) Node {
var prevNode Node
// populate the node cache with all the nodes we'll need
for _, pathNode := range p.path {
n, err := ops.nodeCache.GetOrCreate(pathNode.BlockPointer,
pathNode.Name, prevNode)
if err != nil {
t.Fatal(err)
}
prevNode = n
}
return prevNode
}
func testPutBlockInCache(
t *testing.T, config *ConfigMock, ptr BlockPointer, id tlf.ID,
block Block) {
err := config.BlockCache().Put(ptr, id, block, TransientEntry)
require.NoError(t, err)
}
func TestKBFSOpsGetBaseDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: File}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Dir}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
testPutBlockInCache(t, config, node.BlockPointer, id, dirBlock)
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, blockPtr, dirBlock, nil)
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err != nil {
t.Errorf("Got error on getdir: %+v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailNonReader(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id := tlf.FakeID(1, false)
h := parseTlfHandleOrBust(t, config, "bob#alice", false)
// Hack around access check in ParseTlfHandle.
h.resolvedReaders = nil
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
_, uid, err := config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
t.Fatal(err)
}
rootID := kbfsblock.FakeID(42)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
// won't even try getting the block if the user isn't a reader
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
expectedErr := NewReadAccessError(h, "alice", "/keybase/private/bob#alice")
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err == nil {
t.Errorf("Got no expected error on getdir")
} else if err != expectedErr {
t.Errorf("Got unexpected error on root MD: %+v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailMissingBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key, then
// fail block fetch
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, blockPtr, dirBlock, err)
if _, err2 := config.KBFSOps().GetDirChildren(ctx, n); err2 == nil {
t.Errorf("Got no expected error on getdir")
} else if err2 != err {
t.Errorf("Got unexpected error on root MD: %+v", err)
}
}
func TestKBFSOpsGetNestedDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: Exec}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Sym}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, bNode.BlockPointer, id, dirBlock)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsLookupSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %+v", err)
}
bPath := ops.nodeCache.PathFromNode(bn)
expectedBNode := pathNode{makeBP(bID, rmd, config, u), "b"}
expectedBNode.KeyGen = 1
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bPath.path[2] != expectedBNode {
t.Errorf("Bad path node after lookup: %v vs %v",
bPath.path[2], expectedBNode)
}
}
func TestKBFSOpsLookupSymlinkSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Sym,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %+v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad directory entry: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bn != nil {
t.Errorf("Node for symlink is not nil: %v", bn)
}
}
func TestKBFSOpsLookupNoSuchNameFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := NoSuchNameError{"c"}
_, _, err := config.KBFSOps().Lookup(ctx, n, "c")
if err == nil {
t.Error("No error as expected on Lookup")
} else if err != expectedErr {
t.Errorf("Unexpected error after bad Lookup: %+v", err)
}
}
func TestKBFSOpsLookupNewDataVersionFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
bInfo := makeBIFromID(bID, u)
bInfo.DataVer = 10
dirBlock.Children["b"] = DirEntry{
BlockInfo: bInfo,
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := &NewDataVersionError{
path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}},
bInfo.DataVer,
}
_, _, err := config.KBFSOps().Lookup(ctx, n, "b")
if err == nil {
t.Error("No expected error found on lookup")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Unexpected error after bad lookup: %+v", err)
}
}
func TestKBFSOpsStatSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
ei, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %+v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Stat returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
}
}
type shimMDOps struct {
isUnmerged bool
codec kbfscodec.Codec
crypto cryptoPure
kbpki KBPKI
MDOps
}
func (s shimMDOps) Put(ctx context.Context, rmd *RootMetadata) (MdID, error) {
if s.isUnmerged {
return MdID{}, MDServerErrorConflictRevision{}
}
rmd.SetSerializedPrivateMetadata([]byte{0x1})
username, _, err := s.kbpki.GetCurrentUserInfo(ctx)
if err != nil {
return MdID{}, err
}
signingKey := MakeLocalUserSigningKeyOrBust(username)
err = rmd.bareMd.SignWriterMetadataInternally(
ctx, s.codec, kbfscrypto.SigningKeySigner{Key: signingKey})
if err != nil {
return MdID{}, err
}
return s.crypto.MakeMdID(rmd.bareMd)
}
func (s shimMDOps) PutUnmerged(ctx context.Context, rmd *RootMetadata) (MdID, error) {
if !s.isUnmerged {
panic("Unexpected PutUnmerged call")
}
rmd.SetSerializedPrivateMetadata([]byte{0x2})
username, _, err := s.kbpki.GetCurrentUserInfo(ctx)
if err != nil {
return MdID{}, err
}
signingKey := MakeLocalUserSigningKeyOrBust(username)
err = rmd.bareMd.SignWriterMetadataInternally(
ctx, s.codec, kbfscrypto.SigningKeySigner{Key: signingKey})
if err != nil {
return MdID{}, err
}
return s.crypto.MakeMdID(rmd.bareMd)
}
func expectSyncBlockHelper(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id tlf.ID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []kbfsblock.ID, isUnmerged bool) (
path, *gomock.Call) {
// construct new path
newPath := path{
FolderBranch{Tlf: id},
make([]pathNode, 0, len(p.path)+1),
}
for _, node := range p.path {
newPath.path = append(newPath.path, pathNode{Name: node.Name})
}
if newEntry {
// one for the new entry
newPath.path = append(newPath.path, pathNode{Name: name})
}
// all MD is embedded for now
config.mockBsplit.EXPECT().ShouldEmbedBlockChanges(gomock.Any()).
AnyTimes().Return(true)
// By convention for these tests, the old blocks along the path
// all have EncodedSize == 1.
unrefBytes += uint64(len(p.path) * 1)
lastID := p.tailPointer().ID
for i := len(newPath.path) - 1; i >= skipSync; i-- {
newID := kbfsblock.FakeIDMul(lastID, 2)
newBuf := []byte{byte(i)}
refBytes += uint64(len(newBuf))
lastID = newID
readyBlockData := ReadyBlockData{
buf: newBuf,
}
call := config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{kmd},
gomock.Any()).Return(newID, len(newBuf), readyBlockData, nil)
if lastCall != nil {
call = call.After(lastCall)
}
lastCall = call
newPath.path[i].ID = newID
newBlockIDs[i] = newID
config.mockBserv.EXPECT().Put(gomock.Any(), kmd.TlfID(), newID,
gomock.Any(), readyBlockData.buf, readyBlockData.serverHalf).
Return(nil)
}
if skipSync == 0 {
// sign the MD and put it
oldMDOps := config.MDOps()
if oldShim, ok := oldMDOps.(shimMDOps); ok {
if oldShim.isUnmerged != isUnmerged {
t.Fatal("old shim with different isUnmerged")
}
} else {
mdOps := shimMDOps{
isUnmerged,
config.Codec(),
config.Crypto(),
config.KBPKI(),
oldMDOps,
}
config.SetMDOps(mdOps)
}
config.mockMdcache.EXPECT().Put(gomock.Any()).
Do(func(rmd ImmutableRootMetadata) {
*newRmd = rmd
// Check that the ref bytes are correct.
if rmd.RefBytes() != refBytes {
t.Errorf("Unexpected refbytes: %d vs %d",
rmd.RefBytes(), refBytes)
}
if rmd.UnrefBytes() != unrefBytes {
t.Errorf("Unexpected unrefbytes: %d vs %d",
rmd.UnrefBytes(), unrefBytes)
}
}).Return(nil)
}
return newPath, lastCall
}
func expectSyncBlock(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id tlf.ID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []kbfsblock.ID) (path, *gomock.Call) {
return expectSyncBlockHelper(t, config, lastCall, uid, id, name, p, kmd,
newEntry, skipSync, refBytes, unrefBytes, newRmd, newBlockIDs, false)
}
func expectSyncBlockUnmerged(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id tlf.ID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []kbfsblock.ID) (path, *gomock.Call) {
return expectSyncBlockHelper(t, config, lastCall, uid, id, name, p, kmd,
newEntry, skipSync, refBytes, unrefBytes, newRmd, newBlockIDs, true)
}
func getBlockFromCache(t *testing.T, config Config, id tlf.ID, ptr BlockPointer,
branch BranchName) Block {
if block, err := config.DirtyBlockCache().Get(id, ptr, branch); err == nil {
return block
}
block, err := config.BlockCache().Get(ptr)
if err != nil {
t.Errorf("Couldn't find block %v, branch %s in the cache after test: "+
"%+v", ptr, branch, err)
return nil
}
return block
}
func getDirBlockFromCache(t *testing.T, config Config, id tlf.ID,
ptr BlockPointer, branch BranchName) *DirBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
dblock, ok := block.(*DirBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a DirBlock", ptr, branch)
}
return dblock
}
func getFileBlockFromCache(t *testing.T, config Config, id tlf.ID,
ptr BlockPointer, branch BranchName) *FileBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
fblock, ok := block.(*FileBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a FileBlock", ptr, branch)
}
return fblock
}
func checkNewPath(t *testing.T, ctx context.Context, config Config,
newPath path, expectedPath path, rmd ReadOnlyRootMetadata, blocks []kbfsblock.ID,
entryType EntryType, newName string, rename bool) {
// TODO: check that the observer updates match the expectedPath as
// well (but need to handle the rename case where there can be
// multiple updates). For now, just check that there's at least
// one update.
if len(config.(*ConfigMock).observer.batchChanges) < 1 {
t.Errorf("No batch notifications sent, at least one expected")
}
if ctx.Value(tCtxID) != config.(*ConfigMock).observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in batch notify: %v",
config.(*ConfigMock).observer.ctx.Value(tCtxID))
}
if len(newPath.path) != len(expectedPath.path) {
t.Errorf("Unexpected new path length: %d", len(newPath.path))
return
}
if newPath.Tlf != expectedPath.Tlf {
t.Errorf("Unexpected topdir in new path: %s",
newPath.Tlf)
}
// check all names and IDs
for i, node := range newPath.path {
eNode := expectedPath.path[i]
if node.ID != eNode.ID {
t.Errorf("Wrong id on new path[%d]: %v vs. %v", i, node, eNode)
}
if node.Name != eNode.Name {
t.Errorf("Wrong name on new path[%d]: %v vs. %v", i, node, eNode)
}
}
// all the entries should point correctly and have the right times set
currDe := rmd.data.Dir
for i, id := range blocks {
var timeSet bool
if newName != "" {
// only the last 2 nodes should have their times changed
timeSet = i > len(blocks)-3
} else {
// only the last node should have its times changed
timeSet = i > len(blocks)-2
}
// for a rename, the last entry only changes ctime
if (!rename || i != len(blocks)-1) && (currDe.Mtime != 0) != timeSet {
t.Errorf("mtime was wrong (%d): %d", i, currDe.Mtime)
}
if (currDe.Ctime != 0) != timeSet {
t.Errorf("ctime was wrong (%d): %d", i, currDe.Ctime)
}
if i < len(expectedPath.path) {
eID := expectedPath.path[i].ID
if currDe.ID != eID {
t.Errorf("Entry does not point to %v, but to %v",
eID, currDe.ID)
}
}
if i < len(blocks)-1 {
var nextName string
if i+1 >= len(expectedPath.path) {
// new symlinks don't have an entry in the path
nextName = newName
} else {
nextName = expectedPath.path[i+1].Name
}
// TODO: update BlockPointer for refnonces when we start deduping
dblock := getDirBlockFromCache(t, config, newPath.Tlf,
makeBP(id, rmd.RootMetadata, config, rmd.data.Dir.Creator), newPath.Branch)
nextDe, ok := dblock.Children[nextName]
if !ok {
t.Errorf("No entry (%d) for %s", i, nextName)
}
currDe = nextDe
} else if newName != "" {
if currDe.Type != entryType {
t.Errorf("New entry has wrong type %s, expected %s",
currDe.Type, entryType)
}
}
if (currDe.Type != File && currDe.Type != Exec) && currDe.Size == 0 {
t.Errorf("Type %s unexpectedly has 0 size (%d)", currDe.Type, i)
}
}
}
func checkBPs(t *testing.T, bps []BlockPointer, expectedBPs []BlockPointer,
kind string) {
if len(expectedBPs) != len(bps) {
t.Errorf("Unexpected %s size: %d vs %d",
kind, len(bps), len(expectedBPs))
}
for _, ptr := range expectedBPs {
found := false
for _, ptr2 := range bps {
if ptr == ptr2 {
found = true
break
}
}
if !found {
t.Errorf("Missing expected %s block: %v", kind, ptr)
}
}
}
func checkOp(t *testing.T, op OpCommon, refs []BlockPointer,
unrefs []BlockPointer, updates []blockUpdate) {
checkBPs(t, op.RefBlocks, refs, "Refs")
checkBPs(t, op.UnrefBlocks, unrefs, "Unrefs")
if len(updates) != len(op.Updates) {
t.Errorf("Unexpected updates size: %d vs %d",
len(op.Updates), len(updates))
}
for _, up := range updates {
found := false
for _, up2 := range op.Updates {
if up == up2 {
found = true
break
}
}
if !found {
t.Errorf("Missing expected block update: %v", up)
}
}
}
func testCreateEntrySuccess(t *testing.T, entryType EntryType) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// creating "a/b"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 3)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "b", p, rmd,
entryType != Sym, 0, 0, 0, &newRmd, blocks)
var newN Node
var err error
switch entryType {
case File:
newN, _, err = config.KBFSOps().CreateFile(ctx, n, "b", false, NoExcl)
case Exec:
newN, _, err = config.KBFSOps().CreateFile(ctx, n, "b", true, NoExcl)
case Dir:
newN, _, err = config.KBFSOps().CreateDir(ctx, n, "b")
case Sym:
_, err = config.KBFSOps().CreateLink(ctx, n, "b", "c")
newN = n
}
newP := ops.nodeCache.PathFromNode(newN)
if err != nil {
t.Errorf("Got error on create: %+v", err)
}
require.NotNil(t, newRmd)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
entryType, "b", false)
b1 := getDirBlockFromCache(t, config, id, newP.path[1].BlockPointer,
newP.Branch)
if entryType == Sym {
de := b1.Children["b"]
if de.Type != Sym {
t.Error("Entry is not a symbolic link")
}
if de.SymPath != "c" {
t.Errorf("Symbolic path points to the wrong thing: %s", de.SymPath)
}
blocks = blocks[:len(blocks)-1] // discard fake block for symlink
} else if entryType != Dir {
de := b1.Children["b"]
if de.Size != 0 {
t.Errorf("New file has non-zero size: %d", de.Size)
}
}
checkBlockCache(t, config, id, append(blocks, rootID, aID), nil)
// make sure the createOp is correct
co, ok := newRmd.data.Changes.Ops[0].(*createOp)
if !ok {
t.Errorf("Couldn't find the createOp")
}
var refBlocks []BlockPointer
if entryType != Sym {
refBlocks = append(refBlocks, newP.path[2].BlockPointer)
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, co.OpCommon, refBlocks, nil, updates)
dirUpdate := blockUpdate{rootBlock.Children["a"].BlockPointer,
newP.path[1].BlockPointer}
if co.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", co.Dir, dirUpdate)
} else if co.NewName != "b" {
t.Errorf("Incorrect name in op: %v", co.NewName)
} else if co.Type != entryType {
t.Errorf("Incorrect entry type in op: %v", co.Type)
}
}
func TestKBFSOpsCreateDirSuccess(t *testing.T) {
testCreateEntrySuccess(t, Dir)
}
func TestKBFSOpsCreateFileSuccess(t *testing.T) {
testCreateEntrySuccess(t, File)
}
func TestKBFSOpsCreateExecFileSuccess(t *testing.T) {
testCreateEntrySuccess(t, Exec)
}
func TestKBFSOpsCreateLinkSuccess(t *testing.T) {
testCreateEntrySuccess(t, Sym)
}
func testCreateEntryFailDupName(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// creating "a", which already exists in the root block
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameExistsError{"a"}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, "a")
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, "a", "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, true)
}
func TestCreateLinkFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, false)
}
func testCreateEntryFailNameTooLong(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
config.maxNameBytes = 2
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameTooLongError{name, config.maxNameBytes}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, true)
}
func TestCreateLinkFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, false)
}
func testCreateEntryFailDirTooBig(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
rmd.data.Dir.Size = 10
config.maxDirBytes = 12
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if _, ok := err.(DirTooBigError); !ok {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, true)
}
func TestCreateLinkFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, false)
}
func testCreateEntryFailKBFSPrefix(t *testing.T, et EntryType) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
name := ".kbfs_status"
expectedErr := DisallowedPrefixError{name, ".kbfs"}
var err error
// dir and link have different checks for dup name
switch et {
case Dir:
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
case Sym:
_, err = config.KBFSOps().CreateLink(ctx, n, name, "a")
case Exec:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, true, NoExcl)
case File:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, false, NoExcl)
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Dir)
}
func TestCreateFileFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, File)
}
func TestCreateExecFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Exec)
}
func TestCreateLinkFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Sym)
}
// TODO: Currently only the remove tests use makeDirTree(),
// makeFile(), et al. Make the other tests use these functions, too.
// makeDirTree creates a block tree for the given path components and
// returns the DirEntry for the root block, a path, and the
// corresponding list of blocks. If n components are given, then the
// path will have n+1 nodes (one extra for the root node), and there
// will be n+1 corresponding blocks.
func makeDirTree(id tlf.ID, uid keybase1.UID, components ...string) (
DirEntry, path, []*DirBlock) {
var idCounter byte = 0x10
makeBlockID := func() kbfsblock.ID {
id := kbfsblock.FakeID(idCounter)
idCounter++
return id
}
// Handle the first (root) block.
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
rootEntry := DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes := []pathNode{{bi.BlockPointer, "{root}"}}
rootBlock := NewDirBlock().(*DirBlock)
blocks := []*DirBlock{rootBlock}
// Handle the rest.
parentDirBlock := rootBlock
for _, component := range components {
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
parentDirBlock.Children[component] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes = append(nodes, pathNode{bi.BlockPointer, component})
dirBlock := NewDirBlock().(*DirBlock)
blocks = append(blocks, dirBlock)
parentDirBlock = dirBlock
}
return rootEntry, path{FolderBranch{Tlf: id}, nodes}, blocks
}
func makeFile(dir path, parentDirBlock *DirBlock, name string, et EntryType) (
path, *FileBlock) {
if et != File && et != Exec {
panic(fmt.Sprintf("Unexpected type %s", et))
}
bid := kbfsblock.FakeIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: et,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewFileBlock().(*FileBlock)
}
func makeDir(dir path, parentDirBlock *DirBlock, name string) (
path, *DirBlock) {
bid := kbfsblock.FakeIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewDirBlock().(*DirBlock)
}
func makeSym(dir path, parentDirBlock *DirBlock, name string) {
parentDirBlock.Children[name] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
},
}
}
func checkRmOp(t *testing.T, entryName string, newRmd ReadOnlyRootMetadata,
dirPath, newDirPath path, unrefBlocks []BlockPointer) {
// make sure the rmOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*rmOp)
require.True(t, ok)
var updates []blockUpdate
for i := 0; i < len(dirPath.path)-1; i++ {
updates = append(updates, blockUpdate{
dirPath.path[i].BlockPointer,
newDirPath.path[i].BlockPointer,
})
}
checkOp(t, ro.OpCommon, nil, unrefBlocks, updates)
dirUpdate := blockUpdate{
dirPath.tailPointer(), newDirPath.tailPointer(),
}
require.Equal(t, dirUpdate, ro.Dir)
require.Equal(t, entryName, ro.OldName)
}
func testKBFSOpsRemoveFileSuccess(t *testing.T, et EntryType) {
if et != File && et != Exec {
t.Fatalf("Unexpected type %s", et)
}
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "file"
if et == Exec {
entryName += ".exe"
}
p, block := makeFile(dirPath, parentDirBlock, entryName, et)
testPutBlockInCache(t, config, p.tailPointer(), id, block)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, et, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range p.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveFileSuccess(t *testing.T) {
testKBFSOpsRemoveFileSuccess(t, File)
}
func TestKBFSOpsRemoveExecSuccess(t *testing.T) {
testKBFSOpsRemoveFileSuccess(t, Exec)
}
func TestKBFSOpsRemoveDirSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
entryName := "dir"
rootEntry, p, blocks := makeDirTree(
id, uid, "a", "b", "c", "d", entryName)
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
dirPath := *p.parentPath()
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveDir(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Dir, "", false)
newParentBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentBlock.Children[entryName]
require.False(t, ok)
for _, n := range p.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveSymSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "sym"
makeSym(dirPath, parentDirBlock, entryName)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// No block is being referenced.
var unrefBytes uint64
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Sym, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, nil)
}
func TestKBFSOpRemoveMultiBlockFileSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
entryName := "multiBlockFile"
lastBID := dirPath.tailPointer().ID
fileBID := kbfsblock.FakeIDAdd(lastBID, 1)
fileBI := makeBIFromID(fileBID, dirPath.tailPointer().Creator)
parentDirBlock.Children[entryName] = DirEntry{
BlockInfo: fileBI,
EntryInfo: EntryInfo{
Type: File,
},
}
// TODO: Write a helper function for making a file with
// indirect blocks and use it in other tests.
bid1 := kbfsblock.FakeIDAdd(lastBID, 2)
bid2 := kbfsblock.FakeIDAdd(lastBID, 3)
bid3 := kbfsblock.FakeIDAdd(lastBID, 4)
bid4 := kbfsblock.FakeIDAdd(lastBID, 5)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(bid1, rmd, config, uid, 5, 0),
makeIFP(bid2, rmd, config, uid, 5, 5),
makeIFP(bid3, rmd, config, uid, 5, 10),
makeIFP(bid4, rmd, config, uid, 5, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
fileBP := makeBP(fileBID, rmd, config, uid)
p := dirPath.ChildPath(entryName, fileBP)
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// let the top block be uncached, so we have to fetch it from BlockOps.
expectBlock(config, rmd, fileBP, fileBlock, nil)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
testPutBlockInCache(t, config, fileBlock.IPtrs[3].BlockPointer, id, block4)
// sync block
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
unrefBytes := uint64(1 + 4*5) // fileBlock + 4 indirect blocks
var newRmd ImmutableRootMetadata
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(), blockIDs,
File, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range p.path {
blockIDs = append(blockIDs, n.ID)
}
blockIDs = append(blockIDs, bid1, bid2, bid3, bid4)
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{
fileBP,
fileBlock.IPtrs[0].BlockPointer,
fileBlock.IPtrs[1].BlockPointer,
fileBlock.IPtrs[2].BlockPointer,
fileBlock.IPtrs[3].BlockPointer,
}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestRemoveDirFailNonEmpty(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, *p.parentPath().parentPath())
expectedErr := DirNotEmptyError{p.parentPath().tailName()}
err := config.KBFSOps().RemoveDir(ctx, n, "d")
require.Equal(t, expectedErr, err)
}
func testKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T, et EntryType) {
if et != File && et != Exec {
t.Fatalf("Unexpected type %s", et)
}
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "file"
if et == Exec {
entryName += ".exe"
}
p, _ := makeFile(dirPath, parentDirBlock, entryName, et)
// The operation might be retried several times.
config.mockBops.EXPECT().Get(
gomock.Any(), gomock.Any(), p.tailPointer(),
gomock.Any(), gomock.Any()).Return(kbfsblock.BServerErrorBlockNonExistent{}).MinTimes(1)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, File, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, File)
}
func TestKBFSOpsRemoveExecMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, Exec)
}
func TestKBFSOpsRemoveDirMissingBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
entryName := "dir"
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", entryName)
rmd.data.Dir = rootEntry
// Prime cache with all directory blocks.
for i := 0; i < len(blocks)-1; i++ {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, blocks[i])
}
dirPath := *p.parentPath()
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// The operation might be retried several times.
config.mockBops.EXPECT().Get(
gomock.Any(), gomock.Any(), p.tailPointer(),
gomock.Any(), gomock.Any()).Return(kbfsblock.BServerErrorBlockNonExistent{}).MinTimes(1)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveDir(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Dir, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestRemoveDirFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
expectedErr := NoSuchNameError{"nonexistent"}
err := config.KBFSOps().RemoveDir(ctx, n, "nonexistent")
require.Equal(t, expectedErr, err)
}
func TestRenameInDirSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 3)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "b", n, "c")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP.path[1].BlockPointer, newP.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID, aID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, nil, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP.path[1].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameInDirOverEntrySuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
cID := kbfsblock.FakeID(44)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock.Children["c"] = DirEntry{
BlockInfo: makeBIFromID(cID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
cBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
cNode := pathNode{makeBP(cID, rmd, config, uid), "c"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, cNode.BlockPointer, id, cBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 3)
unrefBytes := uint64(1)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, unrefBytes, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "b", n, "c")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP.path[1].BlockPointer, newP.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID, aID, cID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, []BlockPointer{cNode.BlockPointer}, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP.path[1].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameInRootSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a" to "b"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "a", n, "b")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "b", true)
b0 := getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
if _, ok := b0.Children["a"]; ok {
t.Errorf("entry for a is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
checkOp(t, ro.OpCommon, nil, nil, nil)
oldDirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "a" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "b" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameAcrossDirsSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
dID := kbfsblock.FakeID(40)
rootBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
dBlock := NewDirBlock().(*DirBlock)
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, dNode}}
n2 := nodeFromPath(t, ops, p2)
// renaming "a/b" to "d/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks1 := make([]kbfsblock.ID, 2)
expectedPath1, lastCall :=
expectSyncBlock(t, config, nil, uid, id, "", p1, rmd, false,
1, 0, 0, nil, blocks1)
blocks2 := make([]kbfsblock.ID, 3)
refBytes := uint64(1) // need to include directory "a"
unrefBytes := uint64(1) // need to include directory "a"
expectedPath2, _ :=
expectSyncBlock(t, config, lastCall, uid, id, "", p2, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks2)
// fix up old expected path's common ancestor
expectedPath1.path[0].ID = expectedPath2.path[0].ID
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
// fix up blocks1 -- the first partial sync stops at aBlock, and
// checkNewPath expects {rootBlock, aBlock}
blocks1 = []kbfsblock.ID{blocks2[0], blocks1[0]}
checkNewPath(t, ctx, config, newP1, expectedPath1, newRmd.ReadOnly(), blocks1,
File, "", true)
checkNewPath(t, ctx, config, newP2, expectedPath2, newRmd.ReadOnly(), blocks2,
File, "c", true)
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if _, ok := b0.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks2 = blocks2[:len(blocks2)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks2, rootID, aID, dID, blocks1[0]), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP1.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, nil, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP1.path[1].BlockPointer}
newDirUpdate := blockUpdate{dNode.BlockPointer, newP2.path[1].BlockPointer}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v vs. %v",
ro.NewDir, newDirUpdate)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameAcrossPrefixSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
dID := kbfsblock.FakeID(40)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
dBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, dNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
n2 := nodeFromPath(t, ops, p2)
// renaming "a/b" to "a/d/c"
// the common ancestor and its parent will be changed once and then re-read
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 4)
expectedPath2, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p2, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
if newP1.path[0].ID != newP2.path[0].ID {
t.Errorf("New old path not a prefix of new new path")
}
if newP1.path[1].ID != newP2.path[1].ID {
t.Errorf("New old path not a prefix of new new path")
}
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if b0.Children["a"].Mtime == 0 {
t.Errorf("a's mtime didn't change")
}
if b0.Children["a"].Ctime == 0 {
t.Errorf("a's ctime didn't change")
}
// now change the times back so checkNewPath below works without hacking
aDe := b0.Children["a"]
aDe.Mtime = 0
aDe.Ctime = 0
b0.Children["a"] = aDe
checkNewPath(t, ctx, config, newP2, expectedPath2, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP1.path[1].BlockPointer, newP1.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks, rootID, aID, dID), nil)
}
func TestRenameAcrossOtherPrefixSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
dID := kbfsblock.FakeID(40)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
dBlock := NewDirBlock().(*DirBlock)
dBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, dNode}}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
n2 := nodeFromPath(t, ops, p2)
// renaming "a/d/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks1 := make([]kbfsblock.ID, 3)
expectedPath1, lastCall :=
expectSyncBlock(t, config, nil, uid, id, "", p1, rmd, false,
2, 0, 0, &newRmd, blocks1)
blocks2 := make([]kbfsblock.ID, 3)
refBytes := uint64(1) // need to include directory "d"
unrefBytes := uint64(1) // need to include directory "d"
expectedPath2, _ :=
expectSyncBlock(t, config, lastCall, uid, id, "", p2, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks2)
// the new path is a prefix of the old path
expectedPath1.path[0].ID = expectedPath2.path[0].ID
expectedPath1.path[1].ID = expectedPath2.path[1].ID
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on removal: %+v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
if newP2.path[0].ID != newP1.path[0].ID {
t.Errorf("New old path not a prefix of new new path")
}
if newP2.path[1].ID != newP1.path[1].ID {
t.Errorf("New old path not a prefix of new new path")
}
b1 := getDirBlockFromCache(
t, config, id, newP1.path[1].BlockPointer, newP1.Branch)
if b1.Children["d"].Mtime == 0 {
t.Errorf("d's mtime didn't change")
}
if b1.Children["d"].Ctime == 0 {
t.Errorf("d's ctime didn't change")
}
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if b0.Children["a"].Mtime == 0 {
t.Errorf("d's mtime didn't change")
}
if b0.Children["a"].Ctime == 0 {
t.Errorf("d's ctime didn't change")
}
checkNewPath(t, ctx, config, newP1, expectedPath1, newRmd.ReadOnly(), blocks2,
File, "c", true)
b2 := getDirBlockFromCache(
t, config, id, newP1.path[2].BlockPointer, newP1.Branch)
if _, ok := b2.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks2 = blocks2[:len(blocks2)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks2, rootID, aID, dID, blocks1[2]), nil)
}
func TestRenameFailAcrossTopLevelFolders(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := tlf.FakeID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
require.NoError(t, err)
id2 := tlf.FakeID(2, false)
h2 := parseTlfHandleOrBust(t, config, "alice,bob,charlie", false)
rmd2, err := makeInitialRootMetadata(config.MetadataVersion(), id2, h2)
require.NoError(t, err)
uid1 := h2.ResolvedWriters()[0]
uid2 := h2.ResolvedWriters()[2]
rootID1 := kbfsblock.FakeID(41)
aID1 := kbfsblock.FakeID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
rootID2 := kbfsblock.FakeID(38)
aID2 := kbfsblock.FakeID(39)
node2 := pathNode{makeBP(rootID2, rmd2, config, uid2), "p"}
aNode2 := pathNode{makeBP(aID2, rmd2, config, uid2), "a"}
p2 := path{FolderBranch{Tlf: id2}, []pathNode{node2, aNode2}}
ops2 := getOps(config, id2)
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %+v", err)
}
}
func TestRenameFailAcrossBranches(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := tlf.FakeID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
require.NoError(t, err)
uid1 := h1.FirstResolvedWriter()
rootID1 := kbfsblock.FakeID(41)
aID1 := kbfsblock.FakeID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
p2 := path{FolderBranch{id1, "test"}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
ops2 := config.KBFSOps().(*KBFSOpsStandard).getOpsNoAdd(
FolderBranch{id1, "test"})
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %+v", err)
}
}
func TestKBFSOpsCacheReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 2); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n != 4 {
t.Errorf("Read the wrong number of bytes: %d", n)
} else if !bytes.Equal(dest, fileBlock.Contents[2:6]) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFullMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
testPutBlockInCache(t, config, fileBlock.IPtrs[3].BlockPointer, id, block4)
n := 20
dest := make([]byte, n, n)
fullContents := append(block1.Contents, block2.Contents...)
fullContents = append(fullContents, block3.Contents...)
fullContents = append(fullContents, block4.Contents...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fullContents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
n := 10
dest := make([]byte, n, n)
contents := append(block1.Contents[3:], block2.Contents...)
contents = append(contents, block3.Contents[:3]...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 3); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFailPastEnd(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 10); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n != 0 {
t.Errorf("Read the wrong number of bytes: %d", n)
}
}
func TestKBFSOpsServerReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, fileBlockPtr, fileBlock, nil)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsServerReadFailNoSuchBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, fileBlockPtr, fileBlock, err)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if _, err2 := config.KBFSOps().Read(ctx, pNode, dest, 0); err2 == nil {
t.Errorf("Got no expected error")
} else if err2 != err {
t.Errorf("Got unexpected error: %+v", err2)
}
}
func checkSyncOp(t *testing.T, codec kbfscodec.Codec,
so *syncOp, filePtr BlockPointer, writes []WriteRange) {
if so == nil {
t.Error("No sync info for written file!")
}
if so.File.Unref != filePtr {
t.Errorf("Unexpected unref file in sync op: %v vs %v",
so.File.Unref, filePtr)
}
if len(so.Writes) != len(writes) {
t.Errorf("Unexpected number of writes: %v (expected %v)",
len(so.Writes), len(writes))
}
for i, w := range writes {
writeEqual, err := kbfscodec.Equal(codec, so.Writes[i], w)
if err != nil {
t.Fatal(err)
}
if !writeEqual {
t.Errorf("Unexpected write: %v vs %v", so.Writes[i], w)
}
}
}
func checkSyncOpInCache(t *testing.T, codec kbfscodec.Codec,
ops *folderBranchOps, filePtr BlockPointer, writes []WriteRange) {
// check the in-progress syncOp
si, ok := ops.blocks.unrefCache[filePtr.Ref()]
if !ok {
t.Error("No sync info for written file!")
}
checkSyncOp(t, codec, si.op, filePtr, writes)
}
func updateWithDirtyEntries(ctx context.Context, ops *folderBranchOps,
lState *lockState, block *DirBlock) (*DirBlock, error) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
return ops.blocks.updateWithDirtyEntriesLocked(ctx, lState, block)
}
func TestKBFSOpsWriteNewBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(ctx, ops, lState, newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
} else if newRootBlock.Children["f"].GetWriter() != uid {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != uint64(len(data)) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteExtendSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 5); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: uint64(len(data))}})
}
func TestKBFSOpsWritePastEndSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 0, 0, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(7)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 7); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteCauseSplit(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
newData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
expectedFullData := append([]byte{0}, newData...)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData, int64(1)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append([]byte{0}, data[0:5]...)
}).Return(int64(5))
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
// new left block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id1, nil)
// new right block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id2, nil)
// next we'll get the right block again
// then the second half
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData[5:10], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(5))
if err := config.KBFSOps().Write(ctx, n, newData, 1); err != nil {
t.Errorf("Got error on write: %+v", err)
}
b, _ := config.BlockCache().Get(node.BlockPointer)
newRootBlock := b.(*DirBlock)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(ctx, ops, lState, newRootBlock)
require.NoError(t, err)
b, _ = config.DirtyBlockCache().Get(id, fileNode.BlockPointer, p.Branch)
pblock := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id1, rmd, config, uid),
p.Branch)
block1 := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id2, rmd, config, uid),
p.Branch)
block2 := b.(*FileBlock)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:6], block1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[6:11], block2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
} else if !pblock.IsInd {
t.Errorf("Parent block is not indirect!")
} else if len(pblock.IPtrs) != 2 {
t.Errorf("Wrong number of pointers in pblock: %v", pblock.IPtrs)
} else if pblock.IPtrs[0].ID != id1 {
t.Errorf("Parent block has wrong id for block 1: %v (vs. %v)",
pblock.IPtrs[0].ID, id1)
} else if pblock.IPtrs[1].ID != id2 {
t.Errorf("Parent block has wrong id for block 2: %v",
pblock.IPtrs[1].ID)
} else if pblock.IPtrs[0].Off != 0 {
t.Errorf("Parent block has wrong offset for block 1: %d",
pblock.IPtrs[0].Off)
} else if pblock.IPtrs[1].Off != 6 {
t.Errorf("Parent block has wrong offset for block 5: %d",
pblock.IPtrs[1].Off)
} else if newRootBlock.Children["f"].Size != uint64(11) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
pblock.IPtrs[0].BlockPointer: p.Branch,
pblock.IPtrs[1].BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 1, Len: uint64(len(newData))}})
}
func mergeUnrefCache(
ops *folderBranchOps, lState *lockState, file path, md *RootMetadata) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
ops.blocks.unrefCache[file.tailPointer().Ref()].mergeUnrefCache(md)
}
func TestKBFSOpsWriteOverMultipleBlocks(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
filePtr := BlockPointer{
ID: fileID, KeyGen: 1, DataVer: 1,
Context: kbfsblock.Context{
Creator: uid,
},
}
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: filePtr,
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5}
expectedFullData := []byte{5, 4, 1, 2, 3, 4, 5, 8, 7, 6}
so, err := newSyncOp(filePtr)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
// gomock.Any(), gomock.Any(), data, int64(2)).
gomock.Any(), gomock.Any(), []byte{1, 2, 3}, int64(2)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block1.Contents[0:2], data[0:3]...)
}).Return(int64(3))
// update block 2
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data[3:], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(data, block2.Contents[2:]...)
}).Return(int64(2))
if err := config.KBFSOps().Write(ctx, n, data, 2); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:5], newBlock1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[5:10], newBlock2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
}
lState := makeFBOLockState()
// merge the unref cache to make it easy to check for changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 2, Len: uint64(len(data))}})
mergeUnrefCache(ops, lState, p, rmd)
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
func TestKBFSOpsWriteFailTooBig(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8}
config.maxFileBytes = 12
err := config.KBFSOps().Write(ctx, n, data, 10)
if err == nil {
t.Errorf("Got no expected error on Write")
} else if _, ok := err.(FileTooBigError); !ok {
t.Errorf("Got unexpected error on Write: %+v", err)
}
}
// Read tests check the same error cases, so no need for similar write
// error tests
func TestKBFSOpsTruncateToZeroSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{}
if err := config.KBFSOps().Truncate(ctx, n, 0); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(ctx, ops, lState, newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", newFileBlock.Contents)
} else if newRootBlock.Children["f"].GetWriter() != uid {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != 0 {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: 0}})
}
func TestKBFSOpsTruncateSameSize(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: makeBIFromID(fileID, u),
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := fileBlock.Contents
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %+v", err)
} else if config.observer.localChange != nil {
t.Errorf("Unexpected local update during truncate: %v",
config.observer.localChange)
} else if !bytes.Equal(data, fileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID}, nil)
}
func TestKBFSOpsTruncateSmallerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{1, 2, 3, 4, 5}
if err := config.KBFSOps().Truncate(ctx, n, 5); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 0}})
}
func TestKBFSOpsTruncateShortensLastBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid)
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
data2 := []byte{10, 9}
if err := config.KBFSOps().Truncate(ctx, n, 7); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(block1.Contents, newBlock1.Contents) {
t.Errorf("Wrote bad contents for block 1: %v", newBlock1.Contents)
} else if !bytes.Equal(data2, newBlock2.Contents) {
t.Errorf("Wrote bad contents for block 2: %v", newBlock2.Contents)
} else if len(newPBlock.IPtrs) != 2 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+6 {
// The fileid and the last block was all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateRemovesABlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid)
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
data := []byte{5, 4, 3, 2}
if err := config.KBFSOps().Truncate(ctx, n, 4); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 4, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newBlock1.Contents) {
t.Errorf("Wrote bad contents: %v", newBlock1.Contents)
} else if len(newPBlock.IPtrs) != 1 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+5+6 {
// The fileid and both blocks were all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateBiggerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), []byte{0, 0, 0, 0, 0}, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data...)
}).Return(int64(5))
data := []byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
// A truncate past the end of the file actually translates into a
// write for the difference
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 5}})
}
func testSetExSuccess(t *testing.T, entryType EntryType, ex bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, entryType != Sym)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Size: 1,
Type: entryType,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedChanges := 1
// SetEx() should do nothing when the exec status doesn't change.
if entryType == Sym || entryType == Dir || (entryType == File && !ex) ||
(entryType == Exec && ex) {
expectedChanges = 0
}
var expectedPath path
var newRmd ImmutableRootMetadata
var blocks []kbfsblock.ID
if expectedChanges > 0 {
// sync block
blocks = make([]kbfsblock.ID, 2)
expectedPath, _ = expectSyncBlock(t, config, nil, uid, id, "",
*p.parentPath(), rmd, false, 0, 0, 0, &newRmd, blocks)
expectedPath.path = append(expectedPath.path, aNode)
}
// SetEx() should only change the type of File and Exec.
var expectedType EntryType
if entryType == File && ex {
expectedType = Exec
} else if entryType == Exec && !ex {
expectedType = File
} else {
expectedType = entryType
}
// chmod a+x a
err := config.KBFSOps().SetEx(ctx, n, ex)
if err != nil {
t.Errorf("Got unexpected error on setex: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if expectedChanges != len(config.observer.batchChanges) {
t.Errorf("got changed=%d, expected %d",
len(config.observer.batchChanges), expectedChanges)
} else {
if blocks != nil {
rootBlock = getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
}
if rootBlock.Children["a"].Type != expectedType {
t.Errorf("a has type %s, expected %s",
rootBlock.Children["a"].Type, expectedType)
} else if expectedChanges > 0 {
// SetEx() should always change the ctime of
// non-symlinks.
// pretend it's a rename so only ctime gets checked
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
expectedType, "", true)
}
}
if expectedChanges > 0 {
blocks = blocks[:len(blocks)-1] // last block is never in the cache
}
checkBlockCache(t, config, id, append(blocks, rootID), nil)
if expectedChanges > 0 {
// make sure the setAttrOp is correct
sao, ok := newRmd.data.Changes.Ops[0].(*setAttrOp)
if !ok {
t.Errorf("Couldn't find the setAttrOp")
}
checkOp(t, sao.OpCommon, nil, nil, nil)
dirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
if sao.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", sao.Dir,
dirUpdate)
} else if sao.Name != "a" {
t.Errorf("Incorrect name in op: %v", sao.Name)
} else if sao.Attr != exAttr {
t.Errorf("Incorrect attr in op: %v", sao.Attr)
}
}
}
func TestSetExFileSuccess(t *testing.T) {
testSetExSuccess(t, File, true)
}
func TestSetNoExFileSuccess(t *testing.T) {
testSetExSuccess(t, File, false)
}
func TestSetExExecSuccess(t *testing.T) {
testSetExSuccess(t, Exec, true)
}
func TestSetNoExExecSuccess(t *testing.T) {
testSetExSuccess(t, Exec, false)
}
func TestSetExDirSuccess(t *testing.T) {
testSetExSuccess(t, Dir, true)
}
func TestSetNoExDirSuccess(t *testing.T) {
testSetExSuccess(t, Dir, false)
}
func TestSetExSymSuccess(t *testing.T) {
testSetExSuccess(t, Sym, true)
}
func TestSetNoExSymSuccess(t *testing.T) {
testSetExSuccess(t, Sym, false)
}
func TestSetExFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
// chmod a+x a
if err := config.KBFSOps().SetEx(ctx, n, true); err == nil {
t.Errorf("Got no expected error on setex")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setex: %+v", err)
}
}
// Other SetEx failure cases are all the same as any other block sync
func TestSetMtimeSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
*p.parentPath(), rmd, false, 0, 0, 0, &newRmd, blocks)
expectedPath.path = append(expectedPath.path, aNode)
newMtime := time.Now()
err := config.KBFSOps().SetMtime(ctx, n, &newMtime)
if err != nil {
t.Errorf("Got unexpected error on setmtime: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
b0 := getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
if b0.Children["a"].Mtime != newMtime.UnixNano() {
t.Errorf("a has wrong mtime: %v", b0.Children["a"].Mtime)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
blocks = blocks[:len(blocks)-1] // last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID), nil)
// make sure the setAttrOp is correct
sao, ok := newRmd.data.Changes.Ops[0].(*setAttrOp)
if !ok {
t.Errorf("Couldn't find the setAttrOp")
}
checkOp(t, sao.OpCommon, nil, nil, nil)
dirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
if sao.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", sao.Dir,
dirUpdate)
} else if sao.Name != "a" {
t.Errorf("Incorrect name in op: %v", sao.Name)
} else if sao.Attr != mtimeAttr {
t.Errorf("Incorrect attr in op: %v", sao.Attr)
}
}
func TestSetMtimeNull(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
oldMtime := time.Now().UnixNano()
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: File,
Mtime: oldMtime,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
if err := config.KBFSOps().SetMtime(ctx, n, nil); err != nil {
t.Errorf("Got unexpected error on null setmtime: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if rootBlock.Children["a"].Mtime != oldMtime {
t.Errorf("a has wrong mtime: %v", rootBlock.Children["a"].Mtime)
} else if newP.path[0].ID != p.path[0].ID {
t.Errorf("Got back a changed path for null setmtime test: %v", newP)
}
checkBlockCache(t, config, id, nil, nil)
}
func TestMtimeFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
newMtime := time.Now()
if err := config.KBFSOps().SetMtime(ctx, n, &newMtime); err == nil {
t.Errorf("Got no expected error on setmtime")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setmtime: %+v", err)
}
}
func getOrCreateSyncInfo(
ops *folderBranchOps, lState *lockState, de DirEntry) (*syncInfo, error) {
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
return ops.blocks.getOrCreateSyncInfoLocked(lState, de)
}
func makeBlockStateDirty(config Config, kmd KeyMetadata, p path,
ptr BlockPointer) {
ops := getOps(config, kmd.TlfID())
lState := makeFBOLockState()
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
df := ops.blocks.getOrCreateDirtyFileLocked(lState, p)
df.setBlockDirty(ptr)
}
// SetMtime failure cases are all the same as any other block sync
func testSyncDirtySuccess(t *testing.T, isUnmerged bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock := NewFileBlock().(*FileBlock)
aBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
si, err := getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
require.NoError(t, err)
si.op.addWrite(0, 10)
// fsync a
config.DirtyBlockCache().Put(id, aNode.BlockPointer, p.Branch, aBlock)
makeBlockStateDirty(config, rmd, p, aNode.BlockPointer)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// TODO: put a dirty DE entry in the cache, to test that the new
// root block has the correct file size.
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
var expectedPath path
if isUnmerged {
// Turn off the conflict resolver to avoid unexpected mock
// calls. Recreate the input channel to make sure the later
// Shutdown() call works.
ops.cr.Pause()
expectedPath, _ = expectSyncBlockUnmerged(t, config, nil, uid, id,
"", p, rmd, false, 0, 0, 0, &newRmd, blocks)
} else {
expectedPath, _ = expectSyncBlock(t, config, nil, uid, id, "", p,
rmd, false, 0, 0, 0, &newRmd, blocks)
}
err = config.KBFSOps().Sync(ctx, n)
if err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
checkBlockCache(t, config, id, append(blocks, rootID), nil)
// check the sync op
so, ok := newRmd.data.Changes.Ops[0].(*syncOp)
if !ok {
t.Errorf("Couldn't find the syncOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, so.OpCommon, nil, nil, updates)
fileUpdate := blockUpdate{aNode.BlockPointer, newP.path[1].BlockPointer}
if so.File != fileUpdate {
t.Errorf("Incorrect file update in op: %v vs. %v", so.File,
fileUpdate)
}
// make sure the write is propagated
checkSyncOp(t, config.Codec(), so,
aNode.BlockPointer, []WriteRange{{Off: 0, Len: 10}})
}
func TestSyncDirtySuccess(t *testing.T) {
testSyncDirtySuccess(t, false)
}
func TestSyncDirtyUnmergedSuccess(t *testing.T) {
testSyncDirtySuccess(t, true)
}
func TestSyncCleanSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// fsync a
if err := config.KBFSOps().Sync(ctx, n); err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(newP.path) != len(p.path) {
// should be the exact same path back
t.Errorf("Got a different length path back: %v", newP)
} else {
for i, n := range newP.path {
if n != p.path[i] {
t.Errorf("Node %d differed: %v", i, n)
}
}
}
checkBlockCache(t, config, id, nil, nil)
}
func expectSyncDirtyBlock(config *ConfigMock, kmd KeyMetadata,
p path, ptr BlockPointer, block *FileBlock, splitAt int64,
padSize int, opsLockHeld bool) *gomock.Call {
branch := MasterBranch
if config.mockDirtyBcache != nil {
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(true)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(block, nil)
} else {
config.DirtyBlockCache().Put(p.Tlf, ptr, branch, block)
}
if !opsLockHeld {
makeBlockStateDirty(config, kmd, p, ptr)
}
c1 := config.mockBsplit.EXPECT().CheckSplit(block).Return(splitAt)
newID := kbfsblock.FakeIDAdd(ptr.ID, 100)
// Ideally, we'd use the size of block.Contents at the time
// that Ready() is called, but GoMock isn't expressive enough
// for that.
newEncBuf := make([]byte, len(block.Contents)+padSize)
readyBlockData := ReadyBlockData{
buf: newEncBuf,
}
c2 := config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{kmd}, block).
After(c1).Return(newID, len(block.Contents), readyBlockData, nil)
newPtr := BlockPointer{ID: newID}
if config.mockBcache != nil {
config.mockBcache.EXPECT().Put(ptrMatcher{newPtr}, kmd.TlfID(), block, PermanentEntry).Return(nil)
config.mockBcache.EXPECT().DeletePermanent(newID).Return(nil)
} else {
// Nothing to do, since the cache entry is added and
// removed.
}
config.mockBserv.EXPECT().Put(gomock.Any(), kmd.TlfID(), newID,
gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
return c2
}
func TestSyncDirtyMultiBlocksSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(fileID, uid),
EntryInfo: EntryInfo{
Size: 20,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config,
keybase1.MakeTestUID(0), 0, 5),
makeIFP(id3, rmd, config, uid, 7, 10),
makeIFP(id4, rmd, config, uid, 0, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
si, err := getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
require.NoError(t, err)
// add the dirty blocks to the unref list
si.op.addWrite(5, 5)
si.op.addWrite(15, 5)
si.unrefs = append(si.unrefs,
makeBI(id2, rmd, config, keybase1.MakeTestUID(0), 5),
makeBI(id4, rmd, config, keybase1.MakeTestUID(0), 5))
// fsync a, only block 2 is dirty
config.DirtyBlockCache().Put(id, fileNode.BlockPointer, p.Branch, fileBlock)
makeBlockStateDirty(config, rmd, p, fileNode.BlockPointer)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// the split is good
pad2 := 5
pad4 := 8
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[1].BlockPointer,
block2, int64(0), pad2, false)
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[3].BlockPointer,
block4, int64(0), pad4, false)
// sync 2 blocks, plus their pad sizes
refBytes := uint64((len(block2.Contents) + pad2) +
(len(block4.Contents) + pad4))
unrefBytes := uint64(5 + 5) // blocks 1 and 3
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks)
err = config.KBFSOps().Sync(ctx, n)
if err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if fileBlock.IPtrs[0].EncodedSize != 5 {
t.Errorf("Indirect pointer encoded size1 wrong: %d", fileBlock.IPtrs[0].EncodedSize)
} else if fileBlock.IPtrs[1].GetWriter() != uid {
t.Errorf("Got unexpected writer: %s", fileBlock.IPtrs[1].GetWriter())
} else if fileBlock.IPtrs[1].EncodedSize != 10 {
t.Errorf("Indirect pointer encoded size2 wrong: %d", fileBlock.IPtrs[1].EncodedSize)
} else if fileBlock.IPtrs[2].EncodedSize != 7 {
t.Errorf("Indirect pointer encoded size3 wrong: %d", fileBlock.IPtrs[2].EncodedSize)
} else if fileBlock.IPtrs[3].EncodedSize != 13 {
t.Errorf("Indirect pointer encoded size4 wrong: %d", fileBlock.IPtrs[3].EncodedSize)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
checkBlockCache(t, config, id,
append(blocks, rootID, fileBlock.IPtrs[1].ID, fileBlock.IPtrs[3].ID),
nil)
// check the sync op
so, ok := newRmd.data.Changes.Ops[0].(*syncOp)
if !ok {
t.Errorf("Couldn't find the syncOp")
}
refBlocks := []BlockPointer{fileBlock.IPtrs[1].BlockPointer,
fileBlock.IPtrs[3].BlockPointer}
unrefBlocks := []BlockPointer{
makeBP(id2, rmd, config, keybase1.MakeTestUID(0)),
makeBP(id4, rmd, config, keybase1.MakeTestUID(0)),
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, so.OpCommon, refBlocks, unrefBlocks, updates)
fileUpdate := blockUpdate{fileNode.BlockPointer, newP.path[1].BlockPointer}
if so.File != fileUpdate {
t.Errorf("Incorrect file update in op: %v vs. %v", so.File,
fileUpdate)
}
}
func TestSyncDirtyDupBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
rootBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock := NewFileBlock().(*FileBlock)
aBlock.Contents = []byte{1, 2, 3, 4, 5}
bBlock := NewFileBlock().(*FileBlock)
bBlock.Contents = aBlock.Contents
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
bNode := pathNode{makeBP(bID, rmd, config, uid), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, bNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
si, err := getOrCreateSyncInfo(ops, lState, rootBlock.Children["b"])
require.NoError(t, err)
si.op.addWrite(0, 10)
config.DirtyBlockCache().Put(id, bNode.BlockPointer, p.Branch, bBlock)
makeBlockStateDirty(config, rmd, p, bNode.BlockPointer)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
readyBlockData := ReadyBlockData{
buf: []byte{6, 7, 8, 9, 10, 11, 12},
}
config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{rmd}, bBlock).
Return(bID, len(bBlock.Contents), readyBlockData, nil)
refNonce := kbfsblock.RefNonce{1}
config.mockCrypto.EXPECT().MakeBlockRefNonce().AnyTimes().
Return(refNonce, nil)
// sync block (but skip the last block)
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 1)
unrefBytes := uint64(1) // unref'd block b
refBytes := uint64(len(readyBlockData.buf))
rootP := path{FolderBranch: p.FolderBranch, path: []pathNode{p.path[0]}}
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "", rootP,
rmd, false, 0, refBytes, unrefBytes, &newRmd, blocks)
blocks = append(blocks, bID)
// manually add b
expectedPath.path = append(expectedPath.path,
pathNode{BlockPointer{ID: aID}, "b"})
// TODO: build a context matcher that can check that the
// refnonce is nonzero.
config.mockBserv.EXPECT().AddBlockReference(gomock.Any(), rmd.TlfID(),
expectedPath.path[1].ID, gomock.Any()).Return(nil)
// fsync b
err = config.KBFSOps().Sync(ctx, n)
if err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
// block b shouldn't be anywhere in the cache
checkBlockCache(t, config, id, append(blocks[0:1], rootID, aID), nil)
// make sure the new blockpointer for b has a non-zero refnonce,
// marking it as a dup
if newP.path[1].RefNonce == kbfsblock.ZeroRefNonce {
t.Errorf("Block was not caught as a dup: %v", newP.path[1])
}
if newP.path[1].Creator != aNode.GetWriter() {
t.Errorf("Creator was not successfully propagated: saw %v, expected %v",
newP.path[1].Creator, aNode.GetWriter())
}
// check the sync op
so, ok := newRmd.data.Changes.Ops[0].(*syncOp)
if !ok {
t.Errorf("Couldn't find the syncOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, so.OpCommon, nil, nil, updates)
fileUpdate := blockUpdate{bNode.BlockPointer, newP.path[1].BlockPointer}
if so.File != fileUpdate {
t.Errorf("Incorrect file update in op: %v vs. %v", so.File,
fileUpdate)
}
// make sure the write is propagated
checkSyncOp(t, config.Codec(), so,
bNode.BlockPointer, []WriteRange{{Off: 0, Len: 10}})
}
func putAndCleanAnyBlock(config *ConfigMock, p path) {
config.mockBcache.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), TransientEntry).
Do(func(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) {
config.mockDirtyBcache.EXPECT().
Get(gomock.Any(), ptrMatcher{BlockPointer{ID: ptr.ID}},
p.Branch).AnyTimes().Return(nil, NoSuchBlockError{ptr.ID})
config.mockBcache.EXPECT().
Get(ptrMatcher{BlockPointer{ID: ptr.ID}}).
AnyTimes().Return(block, nil)
}).AnyTimes().Return(nil)
config.mockDirtyBcache.EXPECT().Delete(gomock.Any(), gomock.Any(),
p.Branch).AnyTimes().Return(nil)
}
func TestSyncDirtyMultiBlocksSplitInBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
// we need to mock the bcache for this test, because we have to
// capture new file blocks that are created as they are written to
// the cache (in order to expect calls on them)
config.mockBcache = NewMockBlockCache(mockCtrl)
config.SetBlockCache(config.mockBcache)
config.mockDirtyBcache = NewMockDirtyBlockCache(mockCtrl)
config.SetDirtyBlockCache(config.mockDirtyBcache)
config.mockDirtyBcache.EXPECT().UpdateSyncingBytes(gomock.Any(),
gomock.Any()).AnyTimes()
config.mockDirtyBcache.EXPECT().BlockSyncFinished(gomock.Any(),
gomock.Any()).AnyTimes()
config.mockDirtyBcache.EXPECT().SyncFinished(gomock.Any(), gomock.Any())
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(fileID, uid),
EntryInfo: EntryInfo{
Size: 20,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 10, 0),
makeIFP(id2, rmd, config, uid, 0, 5),
makeIFP(id3, rmd, config, uid, 0, 10),
makeIFP(id4, rmd, config, uid, 0, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
// fsync a, only block 2 is dirty
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[0].BlockPointer},
p.Branch).AnyTimes().Return(false)
makeBlockStateDirty(config, rmd, p, fileNode.BlockPointer)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[2].BlockPointer},
p.Branch).Return(false)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[2].BlockPointer}, p.Branch).Return(nil,
NoSuchBlockError{fileBlock.IPtrs[2].BlockPointer.ID})
config.mockBcache.EXPECT().Get(ptrMatcher{fileBlock.IPtrs[2].BlockPointer}).
Return(block3, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[3].BlockPointer},
p.Branch).AnyTimes().Return(false)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{node.BlockPointer}, p.Branch).AnyTimes().Return(true)
makeBlockStateDirty(config, rmd, p, node.BlockPointer)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{node.BlockPointer}, p.Branch).
AnyTimes().Return(rootBlock, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileNode.BlockPointer}, p.Branch).AnyTimes().Return(true)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileNode.BlockPointer}, p.Branch).
AnyTimes().Return(fileBlock, nil)
// no matching pointers
config.mockBcache.EXPECT().CheckForKnownPtr(gomock.Any(), gomock.Any()).
AnyTimes().Return(BlockPointer{}, nil)
// the split is in the middle
pad2 := 0
pad3 := 14
extraBytesFor3 := 2
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[1].BlockPointer,
block2, int64(len(block2.Contents)-extraBytesFor3), pad2, false)
// this causes block 3 to be updated
var newBlock3 *FileBlock
config.mockDirtyBcache.EXPECT().Put(gomock.Any(),
fileBlock.IPtrs[2].BlockPointer, p.Branch, gomock.Any()).
Do(func(id tlf.ID, ptr BlockPointer, branch BranchName, block Block) {
newBlock3 = block.(*FileBlock)
// id3 syncs just fine
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{ptr}, branch).AnyTimes().Return(true)
expectSyncDirtyBlock(config, rmd, p, ptr, newBlock3, int64(0), pad3,
true)
}).Return(nil)
// id4 is the final block, and the split causes a new block to be made
pad4 := 9
pad5 := 1
c4 := expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[3].BlockPointer,
block4, int64(3), pad4, false)
var newID5 kbfsblock.ID
var newBlock5 *FileBlock
id5 := kbfsblock.FakeID(48)
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id5, nil)
config.mockDirtyBcache.EXPECT().Put(gomock.Any(),
ptrMatcher{BlockPointer{ID: id5}}, p.Branch, gomock.Any()).
Do(func(id tlf.ID, ptr BlockPointer, branch BranchName, block Block) {
newID5 = ptr.ID
newBlock5 = block.(*FileBlock)
// id5 syncs just fine
expectSyncDirtyBlock(config, rmd, p, ptr, newBlock5, int64(0), pad5,
true)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{ptr}, branch).AnyTimes().Return(true)
}).Return(nil)
// The parent is dirtied too since the pointers changed
config.mockDirtyBcache.EXPECT().Put(gomock.Any(), fileNode.BlockPointer,
p.Branch, gomock.Any()).AnyTimes().Return(nil)
// sync block contents and their padding sizes
refBytes := uint64((len(block2.Contents) + pad2) +
(len(block3.Contents) + extraBytesFor3 + pad3) +
(len(block4.Contents) + pad4) + pad5)
unrefBytes := uint64(0) // no encoded sizes on dirty blocks
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, c4, uid, id, "", p, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks)
putAndCleanAnyBlock(config, p)
newID2 := kbfsblock.FakeIDAdd(id2, 100)
newID3 := kbfsblock.FakeIDAdd(id3, 100)
newID4 := kbfsblock.FakeIDAdd(id4, 100)
if err := config.KBFSOps().Sync(ctx, n); err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(fileBlock.IPtrs) != 5 {
t.Errorf("Wrong number of indirect pointers: %d", len(fileBlock.IPtrs))
} else if fileBlock.IPtrs[0].ID != id1 {
t.Errorf("Indirect pointer id1 wrong: %v", fileBlock.IPtrs[0].ID)
} else if fileBlock.IPtrs[0].EncodedSize != 10 {
t.Errorf("Indirect pointer encoded size1 wrong: %d", fileBlock.IPtrs[0].EncodedSize)
} else if fileBlock.IPtrs[0].Off != 0 {
t.Errorf("Indirect pointer off1 wrong: %d", fileBlock.IPtrs[0].Off)
} else if fileBlock.IPtrs[1].ID != newID2 {
t.Errorf("Indirect pointer id2 wrong: %v", fileBlock.IPtrs[1].ID)
} else if fileBlock.IPtrs[1].EncodedSize != 5 {
t.Errorf("Indirect pointer encoded size2 wrong: %d", fileBlock.IPtrs[1].EncodedSize)
} else if fileBlock.IPtrs[1].Off != 5 {
t.Errorf("Indirect pointer off2 wrong: %d", fileBlock.IPtrs[1].Off)
} else if fileBlock.IPtrs[2].ID != newID3 {
t.Errorf("Indirect pointer id3 wrong: %v", fileBlock.IPtrs[2].ID)
} else if fileBlock.IPtrs[2].EncodedSize != 21 {
t.Errorf("Indirect pointer encoded size3 wrong: %d", fileBlock.IPtrs[2].EncodedSize)
} else if fileBlock.IPtrs[2].Off != 8 {
t.Errorf("Indirect pointer off3 wrong: %d", fileBlock.IPtrs[2].Off)
} else if fileBlock.IPtrs[3].ID != newID4 {
t.Errorf("Indirect pointer id4 wrong: %v", fileBlock.IPtrs[3].ID)
} else if fileBlock.IPtrs[3].EncodedSize != 14 {
t.Errorf("Indirect pointer encoded size4 wrong: %d", fileBlock.IPtrs[3].EncodedSize)
} else if fileBlock.IPtrs[3].Off != 15 {
t.Errorf("Indirect pointer off4 wrong: %d", fileBlock.IPtrs[3].Off)
} else if fileBlock.IPtrs[4].ID != kbfsblock.FakeIDAdd(newID5, 100) {
t.Errorf("Indirect pointer id5 wrong: %v", fileBlock.IPtrs[4].ID)
} else if fileBlock.IPtrs[4].EncodedSize != 1 {
t.Errorf("Indirect pointer encoded size5 wrong: %d", fileBlock.IPtrs[4].EncodedSize)
} else if fileBlock.IPtrs[4].Off != 18 {
t.Errorf("Indirect pointer off5 wrong: %d", fileBlock.IPtrs[4].Off)
} else if !bytes.Equal([]byte{10, 9, 8}, block2.Contents) {
t.Errorf("Block 2 has the wrong data: %v", block2.Contents)
} else if !bytes.Equal(
[]byte{7, 6, 15, 14, 13, 12, 11}, newBlock3.Contents) {
t.Errorf("Block 3 has the wrong data: %v", newBlock3.Contents)
} else if !bytes.Equal([]byte{20, 19, 18}, block4.Contents) {
t.Errorf("Block 4 has the wrong data: %v", block4.Contents)
} else if !bytes.Equal([]byte{17, 16}, newBlock5.Contents) {
t.Errorf("Block 5 has the wrong data: %v", newBlock5.Contents)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
}
func TestSyncDirtyMultiBlocksCopyNextBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
// we need to mock the bcache for this test, because we have to
// capture new file blocks that are created as they are written to
// the cache (in order to expect calls on them)
config.mockBcache = NewMockBlockCache(mockCtrl)
config.SetBlockCache(config.mockBcache)
config.mockDirtyBcache = NewMockDirtyBlockCache(mockCtrl)
config.SetDirtyBlockCache(config.mockDirtyBcache)
config.mockDirtyBcache.EXPECT().UpdateSyncingBytes(gomock.Any(),
gomock.Any()).AnyTimes()
config.mockDirtyBcache.EXPECT().BlockSyncFinished(gomock.Any(),
gomock.Any()).AnyTimes()
config.mockDirtyBcache.EXPECT().SyncFinished(gomock.Any(), gomock.Any())
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(fileID, uid),
EntryInfo: EntryInfo{
Size: 20,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 0, 0),
makeIFP(id2, rmd, config, uid, 10, 5),
makeIFP(id3, rmd, config, uid, 0, 10),
makeIFP(id4, rmd, config, uid, 15, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
// fsync a, only block 2 is dirty
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileNode.BlockPointer}, p.Branch).AnyTimes().Return(true)
makeBlockStateDirty(config, rmd, p, fileNode.BlockPointer)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileNode.BlockPointer}, p.Branch).AnyTimes().Return(fileBlock, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{node.BlockPointer}, p.Branch).AnyTimes().Return(true)
makeBlockStateDirty(config, rmd, p, node.BlockPointer)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{node.BlockPointer}, p.Branch).
AnyTimes().Return(rootBlock, nil)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[1].BlockPointer}, p.Branch).Return(nil,
NoSuchBlockError{fileBlock.IPtrs[1].BlockPointer.ID})
config.mockBcache.EXPECT().Get(ptrMatcher{fileBlock.IPtrs[1].BlockPointer}).
Return(block2, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[1].BlockPointer},
p.Branch).AnyTimes().Return(false)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[3].BlockPointer}, p.Branch).Return(nil,
NoSuchBlockError{fileBlock.IPtrs[3].BlockPointer.ID})
config.mockBcache.EXPECT().Get(ptrMatcher{fileBlock.IPtrs[3].BlockPointer}).
Return(block4, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[3].BlockPointer},
p.Branch).Return(false)
// no matching pointers
config.mockBcache.EXPECT().CheckForKnownPtr(gomock.Any(), gomock.Any()).
AnyTimes().Return(BlockPointer{}, nil)
// the split is in the middle
pad1 := 14
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[0].BlockPointer,
block1, int64(-1), pad1, false)
// this causes block 2 to be copied from (copy whole block)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), block2.Contents, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data...)
}).Return(int64(5))
// now block 2 is empty, and should be deleted
// block 3 is dirty too, just copy part of block 4
pad3 := 10
split4At := int64(3)
pad4 := 15
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[2].BlockPointer,
block3, int64(-1), pad3, false)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), block4.Contents, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data[:3]...)
}).Return(split4At)
var newBlock4 *FileBlock
config.mockDirtyBcache.EXPECT().Put(gomock.Any(),
fileBlock.IPtrs[3].BlockPointer, p.Branch, gomock.Any()).
Do(func(id tlf.ID, ptr BlockPointer, branch BranchName, block Block) {
newBlock4 = block.(*FileBlock)
// now block 4 is dirty, but it's the end of the line,
// so nothing else to do
expectSyncDirtyBlock(config, rmd, p, ptr, newBlock4, int64(-1),
pad4, true)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{ptr}, branch).AnyTimes().Return(false)
}).Return(nil)
// The parent is dirtied too since the pointers changed
config.mockDirtyBcache.EXPECT().Put(gomock.Any(), fileNode.BlockPointer,
p.Branch, gomock.Any()).AnyTimes().Return(nil)
// sync block
refBytes := uint64((len(block1.Contents) + pad1) +
(len(block3.Contents) + pad3) +
(len(block4.Contents) - int(split4At) + pad4))
unrefBytes := uint64(10 + 15) // id2 and id4
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks)
putAndCleanAnyBlock(config, p)
newID1 := kbfsblock.FakeIDAdd(id1, 100)
newID3 := kbfsblock.FakeIDAdd(id3, 100)
newID4 := kbfsblock.FakeIDAdd(id4, 100)
if err := config.KBFSOps().Sync(ctx, n); err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(fileBlock.IPtrs) != 3 {
t.Errorf("Wrong number of indirect pointers: %d", len(fileBlock.IPtrs))
} else if fileBlock.IPtrs[0].ID != newID1 {
t.Errorf("Indirect pointer id1 wrong: %v", fileBlock.IPtrs[0].ID)
} else if fileBlock.IPtrs[0].EncodedSize != 19 {
t.Errorf("Indirect pointer encoded size1 wrong: %d", fileBlock.IPtrs[0].EncodedSize)
} else if fileBlock.IPtrs[0].Off != 0 {
t.Errorf("Indirect pointer off1 wrong: %d", fileBlock.IPtrs[0].Off)
} else if fileBlock.IPtrs[1].ID != newID3 {
t.Errorf("Indirect pointer id3 wrong: %v", fileBlock.IPtrs[1].ID)
} else if fileBlock.IPtrs[1].EncodedSize != 15 {
t.Errorf("Indirect pointer encoded size3 wrong: %d", fileBlock.IPtrs[1].EncodedSize)
} else if fileBlock.IPtrs[1].Off != 10 {
t.Errorf("Indirect pointer off3 wrong: %d", fileBlock.IPtrs[1].Off)
} else if fileBlock.IPtrs[2].ID != newID4 {
t.Errorf("Indirect pointer id4 wrong: %v", fileBlock.IPtrs[2].ID)
} else if fileBlock.IPtrs[2].EncodedSize != 17 {
t.Errorf("Indirect pointer encoded size4 wrong: %d", fileBlock.IPtrs[2].EncodedSize)
} else if fileBlock.IPtrs[2].Off != 18 {
t.Errorf("Indirect pointer off4 wrong: %d", fileBlock.IPtrs[2].Off)
} else if !bytes.Equal([]byte{5, 4, 3, 2, 1, 10, 9, 8, 7, 6},
block1.Contents) {
t.Errorf("Block 1 has the wrong data: %v", block1.Contents)
} else if !bytes.Equal(
[]byte{15, 14, 13, 12, 11, 20, 19, 18}, block3.Contents) {
t.Errorf("Block 3 has the wrong data: %v", block3.Contents)
} else if !bytes.Equal([]byte{17, 16}, newBlock4.Contents) {
t.Errorf("Block 4 has the wrong data: %v", newBlock4.Contents)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
}
func TestSyncDirtyWithBlockChangePointerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock := NewFileBlock().(*FileBlock)
aBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
// fsync a
config.DirtyBlockCache().Put(id, aNode.BlockPointer, p.Branch, aBlock)
makeBlockStateDirty(config, rmd, p, aNode.BlockPointer)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// override the AnyTimes expect call done by default in expectSyncBlock()
config.mockBsplit.EXPECT().ShouldEmbedBlockChanges(gomock.Any()).
AnyTimes().Return(false)
// sync block
refBytes := uint64(1) // 1 new block changes block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectedPath, lastCall := expectSyncBlock(t, config, nil, uid, id, "", p,
rmd, false, 0, refBytes, 0, &newRmd, blocks)
// expected calls for block changes block
changeBlockID := kbfsblock.FakeID(253)
changePlainSize := 1
changeBuf := []byte{253}
changeReadyBlockData := ReadyBlockData{
buf: changeBuf,
}
tempBCID := kbfsblock.FakeID(252)
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(tempBCID, nil)
_ = config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{rmd},
gomock.Any()).Return(changeBlockID, changePlainSize,
changeReadyBlockData, nil).After(lastCall)
config.mockBserv.EXPECT().Put(gomock.Any(), rmd.TlfID(), changeBlockID,
gomock.Any(), changeReadyBlockData.buf,
changeReadyBlockData.serverHalf).Return(nil)
// For now, fake the amount copied by using a large number, since
// we don't have easy access here to the actual encoded data. The
// exact return value doesn't matter as long as it's large enough.
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), gomock.Any(), int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(100 * 1024 * 1024))
if err := config.KBFSOps().Sync(ctx, n); err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if newRmd.data.cachedChanges.Info.ID != changeBlockID {
t.Errorf("Got unexpected changeBlocks pointer: %v vs %v",
newRmd.data.cachedChanges.Info.ID, changeBlockID)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
checkBlockCache(t, config, id, append(blocks, rootID, changeBlockID), nil)
}
func TestKBFSOpsStatRootSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
_, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %+v", err)
}
}
func TestKBFSOpsFailingRootOps(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.BlockPointer = makeBP(rootID, rmd, config, u)
node := pathNode{rmd.data.Dir.BlockPointer, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
// TODO: Make sure Read, Write, and Truncate fail also with
// InvalidPathError{}.
err := config.KBFSOps().SetEx(ctx, n, true)
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetEx: %+v", err)
}
err = config.KBFSOps().SetMtime(ctx, n, &time.Time{})
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetMtime: %+v", err)
}
// TODO: Sync succeeds, but it should fail. Fix this!
}
type testBGObserver struct {
c chan<- struct{}
}
func (t *testBGObserver) LocalChange(ctx context.Context, node Node,
write WriteRange) {
// ignore
}
func (t *testBGObserver) BatchChanges(ctx context.Context,
changes []NodeChange) {
t.c <- struct{}{}
}
func (t *testBGObserver) TlfHandleChange(ctx context.Context,
newHandle *TlfHandle) {
return
}
// Tests that the background flusher will sync a dirty file if the
// application does not.
func TestKBFSOpsBackgroundFlush(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
// Make sure all MDs get different MD IDs, as otherwise
// setHeadLocked will panic).
injectShimCrypto(config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %+v", err)
}
// expect a sync to happen in the background
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false, 0, 0, 0,
&newRmd, blocks)
c := make(chan struct{})
observer := &testBGObserver{c}
config.Notifier().RegisterForChanges([]FolderBranch{{id, MasterBranch}},
observer)
// start the background flusher
go ops.backgroundFlusher(1 * time.Millisecond)
// Make sure we get the notification
<-c
// Make sure we get a sync even if we overwrite (not extend) the file
data[1] = 0
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
// expect another sync to happen in the background
var newRmd2 ImmutableRootMetadata
blocks = make([]kbfsblock.ID, 2)
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false, 0, 0, 0,
&newRmd2, blocks)
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %+v", err)
}
<-c
}
func TestKBFSOpsWriteRenameStat(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %+v", err)
}
// Stat it again.
newEi, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei != newEi {
t.Errorf("Entry info unexpectedly changed from %+v to %+v", ei, newEi)
}
}
func TestKBFSOpsWriteRenameGetDirChildren(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %+v", err)
}
// Get the stats via GetDirChildren.
eis, err := kbfsOps.GetDirChildren(ctx, rootNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei != eis["b"] {
t.Errorf("Entry info unexpectedly changed from %+v to %+v",
ei, eis["b"])
}
}
func TestKBFSOpsCreateFileWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Remove the file, which will archive the block
err = kbfsOps.RemoveEntry(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %+v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Create a second file, which will use the same initial block ID
// from the cache, even though it's been archived, and will be
// forced to try again.
_, _, err = kbfsOps.CreateFile(ctx, rootNode, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create second file: %+v", err)
}
}
func TestKBFSOpsMultiBlockSyncWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// make blocks small
blockSize := int64(5)
config.BlockSplitter().(*BlockSplitterSimple).maxSize = blockSize
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write a few blocks
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
// Now overwrite those blocks to archive them
newData := []byte{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
err = kbfsOps.Write(ctx, fileNode, newData, 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Now write the original first block, which has been archived,
// and make sure it works.
err = kbfsOps.Write(ctx, fileNode, data[0:blockSize], 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
}
type corruptBlockServer struct {
BlockServer
}
func (cbs corruptBlockServer) Get(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
data, keyServerHalf, err := cbs.BlockServer.Get(ctx, tlfID, id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return append(data, 0), keyServerHalf, nil
}
func TestKBFSOpsFailToReadUnverifiableBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.SetBlockServer(&corruptBlockServer{
BlockServer: config.BlockServer(),
})
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Read using a different "device"
config2 := ConfigAsUser(config, "test_user")
defer CheckConfigAndShutdown(ctx, t, config2)
// Shutdown the mdserver explicitly before the state checker tries to run
defer config2.MDServer().Shutdown()
rootNode2 := GetRootNodeOrBust(ctx, t, config2, "test_user", false)
// Lookup the file, which should fail on block ID verification
kbfsOps2 := config2.KBFSOps()
_, _, err = kbfsOps2.Lookup(ctx, rootNode2, "a")
if _, ok := errors.Cause(err).(kbfshash.HashMismatchError); !ok {
t.Fatalf("Could unexpectedly lookup the file: %+v", err)
}
}
// Test that the size of a single empty block doesn't change. If this
// test ever fails, consult max or strib before merging.
func TestKBFSOpsEmptyTlfSize(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Create a TLF.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
status, _, err := config.KBFSOps().FolderStatus(ctx,
rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't get folder status: %+v", err)
}
if status.DiskUsage != 313 {
t.Fatalf("Disk usage of an empty TLF is no longer 313. " +
"Talk to max or strib about why this matters.")
}
}
type cryptoFixedTlf struct {
Crypto
tlf tlf.ID
}
func (c cryptoFixedTlf) MakeRandomTlfID(isPublic bool) (tlf.ID, error) {
return c.tlf, nil
}
// TestKBFSOpsMaliciousMDServerRange tries to trick KBFSOps into
// accepting bad MDs.
func TestKBFSOpsMaliciousMDServerRange(t *testing.T) {
config1, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "mallory")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config1, ctx, cancel)
// Create alice's TLF.
rootNode1 := GetRootNodeOrBust(ctx, t, config1, "alice", false)
fb1 := rootNode1.GetFolderBranch()
kbfsOps1 := config1.KBFSOps()
_, _, err := kbfsOps1.CreateFile(ctx, rootNode1, "dummy.txt", false, NoExcl)
require.NoError(t, err)
// Create mallory's fake TLF using the same TLF ID as alice's.
config2 := ConfigAsUser(config1, "mallory")
crypto2 := cryptoFixedTlf{config2.Crypto(), fb1.Tlf}
config2.SetCrypto(crypto2)
mdserver2, err := NewMDServerMemory(mdServerLocalConfigAdapter{config2})
require.NoError(t, err)
config2.MDServer().Shutdown()
config2.SetMDServer(mdserver2)
config2.SetMDCache(NewMDCacheStandard(1))
rootNode2 := GetRootNodeOrBust(ctx, t, config2, "alice,mallory", false)
require.Equal(t, fb1.Tlf, rootNode2.GetFolderBranch().Tlf)
kbfsOps2 := config2.KBFSOps()
// Add some operations to get mallory's TLF to have a higher
// MetadataVersion.
_, _, err = kbfsOps2.CreateFile(
ctx, rootNode2, "dummy.txt", false, NoExcl)
require.NoError(t, err)
err = kbfsOps2.RemoveEntry(ctx, rootNode2, "dummy.txt")
require.NoError(t, err)
// Now route alice's TLF to mallory's MD server.
config1.SetMDServer(mdserver2.copy(mdServerLocalConfigAdapter{config1}))
// Simulate the server triggering alice to update.
config1.SetKeyCache(NewKeyCacheStandard(1))
err = kbfsOps1.SyncFromServerForTesting(ctx, fb1)
// TODO: We can actually fake out the PrevRoot pointer, too
// and then we'll be caught by the handle check. But when we
// have MDOps do the handle check, that'll trigger first.
require.IsType(t, MDPrevRootMismatch{}, err)
}
| 1 | 14,996 | Do you think we need to keep this in, or is it just leftover personal debugging? Seems like it wouldn't be too useful most of the time, but I don't care too much if you want to keep it. | keybase-kbfs | go |
@@ -95,7 +95,7 @@ public class FreezeTimersOverlay extends Overlay
{
final Player player = playerDespawned.getPlayer();
// All despawns ok: death, teleports, log out, runs away from screen
- if (config.showPlayers() | config.showNpcs() | config.FreezeTimers() | config.Veng() | config.TB())
+ if (config.showPlayers() | config.showNpcs() | config.FreezeTimers() | config.Veng())
{
this.remove(player);
} | 1 | /*
* Copyright (c) 2019, ganom <https://github.com/Ganom>
* Copyright (c) 2019, kyle <https://github.com/kyleeld>
* Copyright (c) 2019, pklite <https://github.com/pklite/pklite>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.freezetimers;
import com.google.common.eventbus.Subscribe;
import java.awt.Color;
import static java.awt.Color.RED;
import static java.awt.Color.WHITE;
import java.awt.Dimension;
import java.awt.Font;
import java.awt.Graphics2D;
import java.awt.Polygon;
import java.awt.image.BufferedImage;
import java.util.Map;
import java.util.HashMap;
import javax.inject.Inject;
import net.runelite.api.Actor;
import net.runelite.api.Client;
import net.runelite.api.events.PlayerDespawned;
import net.runelite.api.GraphicID;
import net.runelite.api.Player;
import net.runelite.api.Point;
import net.runelite.client.ui.FontManager;
import net.runelite.client.ui.overlay.Overlay;
import net.runelite.client.ui.overlay.OverlayLayer;
import net.runelite.client.ui.overlay.OverlayPosition;
import net.runelite.client.ui.overlay.OverlayPriority;
import net.runelite.client.ui.overlay.OverlayUtil;
import net.runelite.client.util.ImageUtil;
public class FreezeTimersOverlay extends Overlay
{
private final Map<String, FreezeInfo> freezes = new HashMap<>();
private final FreezeTimersConfig config;
private final Client client;
private final Font timerFont = FontManager.getRunescapeBoldFont().deriveFont(14.0f);
private final BufferedImage FREEZE_IMAGE = ImageUtil.getResourceStreamFromClass(getClass(), "freeze.png");
private final BufferedImage TB_IMAGE = ImageUtil.getResourceStreamFromClass(getClass(), "teleblock.png");
private final BufferedImage VENG_IMAGE = ImageUtil.getResourceStreamFromClass(getClass(), "veng.png");
@Inject
private Timers timers;
private boolean lock;
private long finishedAtTest;
private Actor player;
@Inject
public FreezeTimersOverlay(FreezeTimersConfig config, Client client)
{
this.config = config;
this.client = client;
setPriority(OverlayPriority.HIGHEST);
setPosition(OverlayPosition.DYNAMIC);
setLayer(OverlayLayer.UNDER_WIDGETS);
}
@Override
public Dimension render(Graphics2D graphics)
{
if (config.showPlayers())
{
client.getPlayers().forEach((p) -> renderOverlayFor(graphics, p));
}
if (config.showNpcs())
{
client.getNpcs().forEach((npc) -> renderOverlayFor(graphics, npc));
}
return null;
}
@Subscribe
public void onPlayerDespawned(PlayerDespawned playerDespawned)
{
final Player player = playerDespawned.getPlayer();
// All despawns ok: death, teleports, log out, runs away from screen
if (config.showPlayers() | config.showNpcs() | config.FreezeTimers() | config.Veng() | config.TB())
{
this.remove(player);
}
}
public void remove(Actor actor)
{
freezes.remove(actor.getName());
}
private void renderOverlayFor(Graphics2D g, Actor actor)
{
if (timers.areAllTimersZero(actor))
{
return;
}
int overlaysDrawn = 0;
if (drawFreezeOverlay(g, actor, overlaysDrawn) && config.FreezeTimers())
{
overlaysDrawn++;
}
if (drawTBOverlay(g, actor, overlaysDrawn) && config.TB())
{
overlaysDrawn++;
}
if (drawVengOverlay(g, actor, overlaysDrawn) && config.Veng())
{
overlaysDrawn++;
}
}
private boolean drawFreezeOverlay(Graphics2D g, Actor actor, int overlaysDrawn)
{
long currentTick = System.currentTimeMillis();
if (timers.getTimerEnd(actor, TimerType.FREEZE) <= currentTick)
{
return false;
}
long finishedAt = timers.getTimerEnd(actor, TimerType.FREEZE);
String text = processTickCounter(finishedAt);
int test = Integer.parseInt(text);
Point poi = actor.getCanvasTextLocation(g, text, 0);
int xpoi = poi.getX();
int ypoi = poi.getY();
Point FixedPoint = new Point(xpoi, ypoi);
if (config.noImage())
{
if (test > 3)
{
renderTextLocation(g, text, config.textSize(), config.fontStyle().getFont(), Color.WHITE, FixedPoint);
}
else
{
renderTextLocation(g, text, config.textSize(), config.fontStyle().getFont(), Color.YELLOW, FixedPoint);
}
}
else
{
renderActorText(g, actor, text, overlaysDrawn, FREEZE_IMAGE);
}
return true;
}
private boolean drawTBOverlay(Graphics2D g, Actor actor, int overlaysDrawn)
{
long currentTick = System.currentTimeMillis();
if (!config.TB())
{
return false;
}
if (timers.getTimerEnd(actor, TimerType.TELEBLOCK) <= currentTick)
{
return false;
}
long finishedAt = timers.getTimerEnd(actor, TimerType.TELEBLOCK);
String text = processTickCounter(finishedAt);
Point poi = actor.getCanvasTextLocation(g, text, 0);
int xpoi = poi.getX() + 20;
int ypoi = poi.getY();
Point FixedPoint = new Point(xpoi, ypoi);
if (config.noImage())
{
if (timers.getTimerEnd(actor, TimerType.FREEZE) <= currentTick)
{
renderTextLocation(g, text, config.textSize(), config.fontStyle().getFont(), Color.CYAN, poi);
}
if (timers.getTimerEnd(actor, TimerType.FREEZE) >= currentTick)
{
renderTextLocation(g, " | " + text, config.textSize(), config.fontStyle().getFont(), Color.CYAN, FixedPoint);
}
if (timers.getTimerEnd(actor, TimerType.VENG) >= currentTick)
{
renderTextLocation(g, " | " + text, config.textSize(), config.fontStyle().getFont(), Color.CYAN, FixedPoint);
}
}
else
{
renderActorText(g, actor, text, overlaysDrawn, TB_IMAGE);
}
return true;
}
private boolean drawVengOverlay(Graphics2D g, Actor actor, int overlaysDrawn)
{
long currentTick = System.currentTimeMillis();
if (!config.Veng())
{
return false;
}
if (timers.getTimerEnd(actor, TimerType.VENG) <= currentTick)
{
return false;
}
long finishedAt = timers.getTimerEnd(actor, TimerType.VENG);
String text = processTickCounter(finishedAt);
Point poi = actor.getCanvasTextLocation(g, text, 0);
int xpoi = poi.getX() - 20;
int ypoi = poi.getY();
Point FixedPoint = new Point(xpoi, ypoi);
if (config.noImage())
{
if (timers.getTimerEnd(actor, TimerType.FREEZE) <= currentTick)
{
renderTextLocation(g, text, config.textSize(), config.fontStyle().getFont(), Color.RED, poi);
}
if (timers.getTimerEnd(actor, TimerType.FREEZE) >= currentTick)
{
renderTextLocation(g, text + " | ", config.textSize(), config.fontStyle().getFont(), Color.RED, FixedPoint);
}
if (timers.getTimerEnd(actor, TimerType.TELEBLOCK) >= currentTick)
{
renderTextLocation(g, text + " | ", config.textSize(), config.fontStyle().getFont(), Color.RED, FixedPoint);
}
}
else
{
renderActorText(g, actor, text, overlaysDrawn, VENG_IMAGE);
}
if (actor.getGraphic() == GraphicID.VENGEANCE || actor.getGraphic() == GraphicID.VENGEANCE_OTHER)
{
g.setColor(RED);
Polygon poly = actor.getCanvasTilePoly();
if (poly != null)
{
OverlayUtil.renderPolygon(g, poly, RED);
}
OverlayUtil.renderTextLocation(g, new Point((int) poly.getBounds2D().getCenterX(),
(int) poly.getBounds2D().getCenterY()), actor.getName(), RED);
}
return true;
}
private void renderActorText(Graphics2D g, Actor actor, String text, int overlaysDrawn, BufferedImage image)
{
int yOffset = (overlaysDrawn * 18);
g.setFont(timerFont);
g.setColor(WHITE);
int xOffset = config.offset();
renderActorTextAndImage(g, actor, text, Color.WHITE, image, yOffset,
xOffset);
}
private void renderTextLocation(Graphics2D graphics, String txtString, int fontSize, int fontStyle, Color fontColor, Point canvasPoint)
{
graphics.setFont(new Font("Arial", fontStyle, fontSize));
if (canvasPoint != null)
{
final Point canvasCenterPoint = new Point(
canvasPoint.getX(),
canvasPoint.getY());
final Point canvasCenterPoint_shadow = new Point(
canvasPoint.getX() + 1,
canvasPoint.getY() + 1);
OverlayUtil.renderTextLocation(graphics, canvasCenterPoint_shadow, txtString, Color.BLACK);
OverlayUtil.renderTextLocation(graphics, canvasCenterPoint, txtString, fontColor);
}
}
public void renderImageLocation(Graphics2D graphics, Point imgLoc, BufferedImage image)
{
int x = imgLoc.getX();
int y = imgLoc.getY();
graphics.drawImage(image, x, y, null);
}
public void renderActorTextAndImage(Graphics2D graphics, Actor actor, String text, Color color,
BufferedImage image, int yOffset, int xOffset)
{
Point textLocation = new Point(actor.getCanvasImageLocation(image, 0).getX() + xOffset,
actor.getCanvasImageLocation(image, 0).getY() + yOffset);
renderImageLocation(graphics, textLocation, image);
xOffset = image.getWidth() + 1;
yOffset = (image.getHeight() - (int) graphics.getFontMetrics().getStringBounds(text, graphics).getHeight());
textLocation = new Point(textLocation.getX() + xOffset, textLocation.getY() + image.getHeight() - yOffset);
net.runelite.client.ui.overlay.OverlayUtil.renderTextLocation(graphics, textLocation, text, color);
}
private String processTickCounter(long finishedAt)
{
long currentTick = System.currentTimeMillis();
long tickDifference = finishedAt - currentTick;
long seconds = tickDifference / 1000;
seconds++;
int minutes = (int) (seconds / 60);
seconds = seconds % 60;
String text = seconds > 9 ? seconds + "" : "0" + seconds;
if (minutes > 0)
{
text = minutes + ":" + text;
}
return text + "";
}
}
| 1 | 14,967 | showNPCs is not needed | open-osrs-runelite | java |
@@ -0,0 +1,12 @@
+package ssm
+
+import "fmt"
+
+// ErrParameterAlreadyExists occurs when the parameter with name already existed.
+type ErrParameterAlreadyExists struct {
+ name string
+}
+
+func (e *ErrParameterAlreadyExists) Error() string {
+ return fmt.Sprintf("parameter %s already exists", e.name)
+} | 1 | 1 | 17,483 | Docstring for public function pls | aws-copilot-cli | go |
|
@@ -1539,3 +1539,16 @@ func TestSSIReApplyDuration(t *testing.T) {
}
}
}
+
+func TestFilterApplyError(t *testing.T) {
+ err1 := "error when creating \"/tmp/apply-475927931\": namespaces \"openshift-am-config\" not found"
+ expectedFilteredErr1 := "namespaces \"openshift-am-config\" not found"
+ if filterApplyError(err1) != expectedFilteredErr1 {
+ t.Fatalf("expected temporary file to be trimmed from error message")
+ }
+
+ err2 := "unable to parse \"[ { \\\"op\\\": \\\"replace\\\", \\\"path\\\": \\\"/data/foo\\\", \\\"value\\\": \\\"baz-json\\\" ]\": yaml: did not find expected ',' or '}'"
+ if filterApplyError(err2) != err2 {
+ t.Fatalf("expected error message to be unchanged")
+ }
+} | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package syncsetinstance
import (
"context"
"crypto/md5"
"encoding/json"
"fmt"
"strings"
"testing"
"time"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/openshift/hive/pkg/apis"
"github.com/openshift/hive/pkg/apis/helpers"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1alpha1"
"github.com/openshift/hive/pkg/constants"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/resource"
)
const (
testName = "foo"
testNamespace = "default"
adminKubeconfigSecret = "foo-admin-kubeconfig"
adminKubeconfigSecretKey = "kubeconfig"
)
func init() {
log.SetLevel(log.DebugLevel)
}
func TestSyncSetReconcile(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
tenMinutesAgo := time.Unix(metav1.NewTime(time.Now().Add(-10*time.Minute)).Unix(), 0)
tests := []struct {
name string
status hivev1.SyncSetInstanceStatus
syncSet *hivev1.SyncSet
deletedSyncSet *hivev1.SyncSet
selectorSyncSet *hivev1.SelectorSyncSet
deletedSelectorSyncSet *hivev1.SelectorSyncSet
existingObjs []runtime.Object
clusterDeployment *hivev1.ClusterDeployment
validate func(*testing.T, *hivev1.SyncSetInstance)
isDeleted bool
expectDeleted []deletedItemInfo
expectSSIDeleted bool
expectErr bool
}{
{
name: "Create single resource successfully",
syncSet: testSyncSetWithResources("ss1", testCM("cm1", "foo", "bar")),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status,
successfulResourceStatus(testCM("cm1", "foo", "bar")))
},
},
{
name: "Create multiple resources successfully",
syncSet: testSyncSetWithResources("foo", testCMs("bar", 5)...),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status,
successfulResourceStatus(testCMs("bar", 5)...))
},
},
{
name: "Update single resource",
status: successfulResourceStatus(testCM("cm1", "key1", "value1")),
syncSet: testSyncSetWithResources("ss1", testCM("cm1", "key1", "value***changed")),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status,
successfulResourceStatus(testCM("cm1", "key1", "value***changed")))
},
},
{
name: "Update only resources that have changed",
status: successfulResourceStatusWithTime(
[]runtime.Object{
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value2"),
testCM("cm3", "key3", "value3"),
},
metav1.NewTime(tenMinutesAgo)),
syncSet: testSyncSetWithResources("aaa",
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value***changed"),
testCM("cm3", "key3", "value3"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status, successfulResourceStatus(
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value***changed"),
testCM("cm3", "key3", "value3"),
))
unchanged := []hivev1.SyncStatus{
ssi.Status.Resources[0],
ssi.Status.Resources[2],
}
for _, ss := range unchanged {
if ss.Conditions[0].LastProbeTime.Time.Unix() != tenMinutesAgo.Unix() {
t.Errorf("unexpected condition last probe time for resource %s/%s. Got: %v, Expected: %v", ss.Namespace, ss.Name, ss.Conditions[0].LastProbeTime.Time, tenMinutesAgo)
}
if ss.Conditions[0].LastTransitionTime.Time.Unix() != tenMinutesAgo.Unix() {
t.Errorf("unexpected condition last transition time for resource %s/%s. Got: %v, Expected: %v", ss.Namespace, ss.Name, ss.Conditions[0].LastTransitionTime.Time, tenMinutesAgo)
}
}
changed := ssi.Status.Resources[1]
if changed.Conditions[0].LastProbeTime.Time.Unix() <= tenMinutesAgo.Unix() {
t.Errorf("unexpected condition last probe time for resource %s/%s. Got: %v, Expected a more recent time", changed.Namespace, changed.Name, changed.Conditions[0].LastProbeTime.Time)
}
// The last transition time should not have changed because we went from successful to successful
if changed.Conditions[0].LastTransitionTime.Time.Unix() != tenMinutesAgo.Unix() {
t.Errorf("unexpected condition last transition time for resource %s/%s. Got: %v, Expected: %v", changed.Namespace, changed.Name, changed.Conditions[0].LastTransitionTime.Time, tenMinutesAgo)
}
},
},
{
name: "Check for failed info call, set condition",
syncSet: testSyncSetWithResources("foo",
testCM("cm1", "key1", "value1"),
testCM("info-error", "key2", "value2"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateUnknownObjectCondition(t, ssi.Status)
},
expectErr: true,
},
{
name: "Stop applying resources when error occurs",
syncSet: testSyncSetWithResources("foo",
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value2"),
testCM("apply-error", "key3", "value3"),
testCM("cm4", "key4", "value4"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
status := successfulResourceStatus(
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value2"),
)
status.Resources = append(status.Resources, applyFailedResourceStatus("foo",
testCM("apply-error", "key3", "value3"),
).Resources...)
validateSyncSetInstanceStatus(t, ssi.Status, status)
},
expectErr: true,
},
{
name: "Stop applying resources when have annotation: hive.openshift.io/syncset-pause=true",
clusterDeployment: func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Annotations = map[string]string{}
cd.Annotations[constants.SyncsetPauseAnnotation] = "true"
return cd
}(),
syncSet: testSyncSetWithResources("foo",
testCM("cm1", "key1", "value1"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
if ssi.Status.Conditions != nil {
t.Fatalf("conditions should be nil")
}
},
expectErr: false,
},
{
name: "selectorsyncset: apply single resource",
selectorSyncSet: testSelectorSyncSetWithResources("foo",
testCM("cm1", "key1", "value1"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status, successfulResourceStatus(
testCM("cm1", "key1", "value1"),
))
},
},
{
name: "Apply single patch successfully",
syncSet: testSyncSetWithPatches("ss1", testSyncObjectPatch("foo", "bar", "baz", "v1", "AlwaysApply", "value1")),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status, successfulPatchStatus(
[]hivev1.SyncObjectPatch{
testSyncObjectPatch("foo", "bar", "baz", "v1", "AlwaysApply", "value1"),
}))
},
},
{
name: "Apply multiple patches successfully",
syncSet: testSyncSetWithPatches("ss1",
testSyncObjectPatch("foo", "bar", "baz", "v1", "AlwaysApply", "value1"),
testSyncObjectPatch("chicken", "potato", "stew", "v1", "AlwaysApply", "value2"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status, successfulPatchStatus(
[]hivev1.SyncObjectPatch{
testSyncObjectPatch("foo", "bar", "baz", "v1", "AlwaysApply", "value1"),
testSyncObjectPatch("chicken", "potato", "stew", "v1", "AlwaysApply", "value2"),
},
))
},
},
{
name: "Reapply single patch",
status: successfulPatchStatus([]hivev1.SyncObjectPatch{
testSyncObjectPatch("foo", "bar", "baz", "v1", "ApplyOnce", "value1"),
}),
syncSet: testSyncSetWithPatches("ss1",
testSyncObjectPatch("foo", "bar", "baz", "v1", "ApplyOnce", "value1***changed"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status, successfulPatchStatus(
[]hivev1.SyncObjectPatch{
testSyncObjectPatch("foo", "bar", "baz", "v1", "ApplyOnce", "value1***changed"),
},
))
},
},
{
name: "Stop applying patches when error occurs",
syncSet: testSyncSetWithPatches("ss1",
testSyncObjectPatch("thing1", "bar", "baz", "v1", "AlwaysApply", "value1"),
testSyncObjectPatch("thing2", "bar", "baz", "v1", "AlwaysApply", "value1"),
testSyncObjectPatch("thing3", "bar", "baz", "v1", "AlwaysApply", "patch-error"),
testSyncObjectPatch("thing4", "bar", "baz", "v1", "AlwaysApply", "value1"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
status := successfulPatchStatus([]hivev1.SyncObjectPatch{
testSyncObjectPatch("thing1", "bar", "baz", "v1", "AlwaysApply", "value1"),
testSyncObjectPatch("thing2", "bar", "baz", "v1", "AlwaysApply", "value1"),
})
status.Patches = append(status.Patches, failedPatchStatus("ss1", []hivev1.SyncObjectPatch{
testSyncObjectPatch("thing3", "bar", "baz", "v1", "AlwaysApply", "patch-error"),
}).Patches...)
validateSyncSetInstanceStatus(t, ssi.Status, status)
},
expectErr: true,
},
{
name: "No patches applied when resource error occurs",
syncSet: testSyncSet("ss1",
[]runtime.Object{
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value2"),
testCM("apply-error", "key3", "value3"),
testCM("cm4", "key4", "value4"),
},
[]hivev1.SyncObjectPatch{
testSyncObjectPatch("thing1", "bar", "baz", "v1", "AlwaysApply", "value1"),
testSyncObjectPatch("thing2", "bar", "baz", "v1", "AlwaysApply", "value2"),
}),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
status := successfulResourceStatus(
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value2"),
)
status.Resources = append(status.Resources, applyFailedResourceStatus("ss1",
testCM("apply-error", "key3", "value3"),
).Resources...)
validateSyncSetInstanceStatus(t, ssi.Status, status)
},
expectErr: true,
},
{
name: "resource sync mode, remove resources",
status: successfulResourceStatus(
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value2"),
testCM("cm3", "key3", "value3"),
testCM("cm4", "key4", "value4"),
),
syncSet: func() *hivev1.SyncSet {
ss := testSyncSetWithResources("aaa",
testCM("cm1", "key1", "value1"),
testCM("cm3", "key3", "value3"),
)
ss.Spec.ResourceApplyMode = hivev1.SyncResourceApplyMode
return ss
}(),
expectDeleted: []deletedItemInfo{
deletedItem("cm2", "ConfigMap"),
deletedItem("cm4", "ConfigMap"),
},
},
{
name: "delete syncset instance when syncset doesn't exist",
deletedSyncSet: testSyncSetWithResources("aaa",
testCM("cm1", "key1", "value1"),
),
expectSSIDeleted: true,
},
{
name: "delete syncset instance when selectorsyncset doesn't exist",
deletedSelectorSyncSet: testSelectorSyncSetWithResources("aaa",
testCM("cm1", "key1", "value1"),
),
expectSSIDeleted: true,
},
{
name: "cleanup deleted syncset resources",
deletedSyncSet: func() *hivev1.SyncSet {
ss := testSyncSetWithResources("aaa",
testCM("cm1", "key1", "value1"),
testCM("cm1", "key1", "value1"),
)
ss.Spec.ResourceApplyMode = hivev1.SyncResourceApplyMode
return ss
}(),
isDeleted: true,
status: successfulResourceStatus(
testCM("cm1", "key1", "value1"),
testCM("cm2", "key2", "value2"),
),
expectDeleted: []deletedItemInfo{
deletedItem("cm1", "ConfigMap"),
deletedItem("cm2", "ConfigMap"),
},
},
{
name: "Apply single SecretReference successfully",
existingObjs: []runtime.Object{
testSecret("foo", "bar"),
},
syncSet: testSyncSetWithSecretReferences("ss1", testSecretRef("foo")),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status,
successfulSecretReferenceStatus(testSecret("foo", "bar")))
},
},
{
name: "Local SecretReference secret does not exist",
syncSet: testSyncSetWithSecretReferences("ss1", testSecretRef("foo")),
expectErr: true,
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
status := hivev1.SyncSetInstanceStatus{}
status.SecretReferences = append(status.SecretReferences, applyFailedSecretReferenceStatus("ss1",
testSecret("foo", "bar"),
).SecretReferences...)
validateSyncSetInstanceStatus(t, ssi.Status, status)
},
},
{
name: "Local SecretReference secret has OwnerReference",
existingObjs: []runtime.Object{
testSecretWithOwner("foo", "bar"),
},
syncSet: testSyncSetWithSecretReferences("ss1", testSecretRef("foo")),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status,
successfulSecretReferenceStatus(
testSecret("foo", "bar"),
))
},
},
{
name: "Apply multiple SecretReferences successfully",
existingObjs: []runtime.Object{
testSecret("foo", "bar"),
testSecret("baz", "bar"),
},
syncSet: testSyncSetWithSecretReferences("ss1", testSecretRef("foo"), testSecretRef("baz")),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status,
successfulSecretReferenceStatus(
testSecret("foo", "bar"),
testSecret("baz", "bar"),
))
},
},
{
name: "Update single secret",
existingObjs: []runtime.Object{
testSecret("foo", "data_value***changed"),
},
status: successfulSecretReferenceStatus(testSecret("key", "data_value")),
syncSet: testSyncSetWithSecretReferences("ss1", testSecretRef("foo")),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status,
successfulSecretReferenceStatus(testSecret("foo", "data_value***changed")))
},
},
{
name: "Update only secrets that have changed",
existingObjs: []runtime.Object{
testSecret("s1", "value1"),
testSecret("s2", "value2***changed"),
testSecret("s3", "value3"),
},
status: successfulSecretReferenceStatusWithTime(
[]runtime.Object{
testSecret("s1", "value1"),
testSecret("s2", "value2"),
testSecret("s3", "value3"),
},
metav1.NewTime(tenMinutesAgo)),
syncSet: testSyncSetWithSecretReferences("aaa",
testSecretRef("s1"),
testSecretRef("s2"),
testSecretRef("s3"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status, successfulSecretReferenceStatus(
testSecret("s1", "value1"),
testSecret("s2", "value2***changed"),
testSecret("s3", "value3"),
))
unchanged := []hivev1.SyncStatus{
ssi.Status.SecretReferences[0],
ssi.Status.SecretReferences[2],
}
for _, ss := range unchanged {
if ss.Conditions[0].LastProbeTime.Time.Unix() != tenMinutesAgo.Unix() {
t.Errorf("unexpected condition last probe time for resource %s/%s. Got: %v, Expected: %v", ss.Namespace, ss.Name, ss.Conditions[0].LastProbeTime.Time, tenMinutesAgo)
}
if ss.Conditions[0].LastTransitionTime.Time.Unix() != tenMinutesAgo.Unix() {
t.Errorf("unexpected condition last transition time for resource %s/%s. Got: %v, Expected: %v", ss.Namespace, ss.Name, ss.Conditions[0].LastTransitionTime.Time, tenMinutesAgo)
}
}
changed := ssi.Status.SecretReferences[1]
if changed.Conditions[0].LastProbeTime.Time.Unix() <= tenMinutesAgo.Unix() {
t.Errorf("unexpected condition last probe time for resource %s/%s. Got: %v, Expected a more recent time", changed.Namespace, changed.Name, changed.Conditions[0].LastProbeTime.Time)
}
// The last transition time should not have changed because we went from successful to successful
if changed.Conditions[0].LastTransitionTime.Time.Unix() != tenMinutesAgo.Unix() {
t.Errorf("unexpected condition last transition time for resource %s/%s. Got: %v, Expected: %v", changed.Namespace, changed.Name, changed.Conditions[0].LastTransitionTime.Time, tenMinutesAgo)
}
},
},
{
name: "resource sync mode, remove secrets",
existingObjs: []runtime.Object{
testSecret("foo1", "bar"),
testSecret("foo2", "bar"),
testSecret("foo3", "bar"),
testSecret("foo4", "bar"),
},
status: successfulSecretReferenceStatus(
testSecret("foo1", "bar"),
testSecret("foo2", "bar"),
testSecret("foo3", "bar"),
testSecret("foo4", "bar"),
),
syncSet: func() *hivev1.SyncSet {
ss := testSyncSetWithSecretReferences("aaa",
testSecretRef("foo1"),
testSecretRef("foo3"),
)
ss.Spec.ResourceApplyMode = hivev1.SyncResourceApplyMode
return ss
}(),
expectDeleted: []deletedItemInfo{
deletedItem("foo2", secretsResource),
deletedItem("foo4", secretsResource),
},
},
{
name: "selectorsyncset: apply single secret",
existingObjs: []runtime.Object{
testSecret("s1", "value1"),
},
selectorSyncSet: testMatchingSelectorSyncSetWithSecretReferences("foo",
testSecretRef("s1"),
),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
validateSyncSetInstanceStatus(t, ssi.Status, successfulSecretReferenceStatus(
testSecret("s1", "value1"),
))
},
},
{
name: "syncset: fail to delete resource",
status: successfulResourceStatus(
testCM("cm1", "key1", "value1"),
testCM("delete-error", "key2", "value2"),
),
syncSet: func() *hivev1.SyncSet {
ss := testSyncSetWithResources("aaa", testCM("cm1", "key1", "value1"))
ss.Spec.ResourceApplyMode = hivev1.SyncResourceApplyMode
return ss
}(),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
status := successfulResourceStatus(
testCM("cm1", "key1", "value1"),
)
status.Resources = append(status.Resources, deleteFailedResourceStatus("aaa",
testCM("delete-error", "key2", "value2"),
).Resources...)
validateSyncSetInstanceStatus(t, ssi.Status, status)
},
},
{
name: "syncset: fail to delete secret reference secret",
existingObjs: []runtime.Object{
testSecret("foo", "bar"),
},
status: successfulSecretReferenceStatus(
testSecret("foo", "bar"),
testSecret("delete-error", "baz"),
),
syncSet: func() *hivev1.SyncSet {
ss := testSyncSetWithSecretReferences("aaa", testSecretRef("foo"))
ss.Spec.ResourceApplyMode = hivev1.SyncResourceApplyMode
return ss
}(),
validate: func(t *testing.T, ssi *hivev1.SyncSetInstance) {
status := successfulSecretReferenceStatus(testSecret("foo", "bar"))
status.SecretReferences = append(status.SecretReferences, deleteFailedSecretReferenceStatus("aaa",
testSecret("delete-error", "baz"),
).SecretReferences...)
validateSyncSetInstanceStatus(t, ssi.Status, status)
},
},
{
name: "cleanup deleted syncset secrets",
deletedSyncSet: func() *hivev1.SyncSet {
ss := testSyncSetWithSecretReferences("aaa",
testSecretRef("foo"),
testSecretRef("bar"),
)
ss.Spec.ResourceApplyMode = hivev1.SyncResourceApplyMode
return ss
}(),
isDeleted: true,
status: successfulSecretReferenceStatus(
testSecret("foo", "bar"),
testSecret("bar", "baz"),
),
expectDeleted: []deletedItemInfo{
deletedItem("foo", secretsResource),
deletedItem("bar", secretsResource),
},
},
}
for _, test := range tests {
apis.AddToScheme(scheme.Scheme)
t.Run(test.name, func(t *testing.T) {
var ssi *hivev1.SyncSetInstance
cd := testClusterDeployment()
if test.clusterDeployment != nil {
cd = test.clusterDeployment
}
runtimeObjs := []runtime.Object{cd, kubeconfigSecret()}
runtimeObjs = append(runtimeObjs, test.existingObjs...)
switch {
case test.deletedSyncSet != nil:
ssi = syncSetInstanceForSyncSet(cd, test.deletedSyncSet)
case test.deletedSelectorSyncSet != nil:
ssi = syncSetInstanceForSelectorSyncSet(cd, test.deletedSelectorSyncSet)
case test.syncSet != nil:
ssi = syncSetInstanceForSyncSet(cd, test.syncSet)
runtimeObjs = append(runtimeObjs, test.syncSet)
case test.selectorSyncSet != nil:
ssi = syncSetInstanceForSelectorSyncSet(cd, test.selectorSyncSet)
runtimeObjs = append(runtimeObjs, test.selectorSyncSet)
}
controllerutils.AddFinalizer(ssi, hivev1.FinalizerSyncSetInstance)
ssi.Status = test.status
if test.isDeleted {
now := metav1.Now()
ssi.DeletionTimestamp = &now
}
runtimeObjs = append(runtimeObjs, ssi)
fakeClient := fake.NewFakeClient(runtimeObjs...)
dynamicClient := &fakeDynamicClient{}
helper := &fakeHelper{t: t}
r := &ReconcileSyncSetInstance{
Client: fakeClient,
scheme: scheme.Scheme,
logger: log.WithField("controller", "syncset"),
applierBuilder: helper.newHelper,
hash: fakeHashFunc(t),
dynamicClientBuilder: func(string, string) (dynamic.Interface, error) {
return dynamicClient, nil
},
}
_, err := r.Reconcile(reconcile.Request{
NamespacedName: types.NamespacedName{
Name: ssi.Name,
Namespace: ssi.Namespace,
},
})
if !test.expectErr && err != nil {
t.Fatalf("unexpected error: %v", err)
} else if test.expectErr && err == nil {
t.Fatal("expected error not returned")
}
validateDeletedItems(t, dynamicClient.deletedItems, test.expectDeleted)
if test.expectSSIDeleted {
result := &hivev1.SyncSetInstance{}
err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: ssi.Name, Namespace: ssi.Namespace}, result)
if !errors.IsNotFound(err) {
t.Errorf("expected syncset instance to be deleted")
}
return
}
if test.validate != nil {
result := &hivev1.SyncSetInstance{}
err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: ssi.Name, Namespace: ssi.Namespace}, result)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
test.validate(t, result)
}
})
}
}
func sameDeletedItem(a, b deletedItemInfo) bool {
return a.name == b.name &&
a.namespace == b.namespace &&
a.group == b.group &&
a.version == b.version &&
a.resource == b.resource
}
func validateDeletedItems(t *testing.T, actual, expected []deletedItemInfo) {
if len(actual) != len(expected) {
t.Errorf("unexpected number of deleted items, actual: %d, expected: %d", len(actual), len(expected))
return
}
for _, item := range actual {
index := -1
for i, expectedItem := range expected {
if sameDeletedItem(item, expectedItem) {
index = i
break
}
}
if index == -1 {
t.Errorf("unexpected deleted item: %#v", item)
return
}
// remove the item from the expected array
expected = append(expected[0:index], expected[index+1:]...)
}
}
func testClusterDeployment() *hivev1.ClusterDeployment {
cd := hivev1.ClusterDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Labels: map[string]string{
"region": "us-east-1",
},
},
Spec: hivev1.ClusterDeploymentSpec{
Installed: true,
},
Status: hivev1.ClusterDeploymentStatus{
AdminKubeconfigSecret: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
},
}
return &cd
}
func testSyncSet(name string, resources []runtime.Object, patches []hivev1.SyncObjectPatch) *hivev1.SyncSet {
ss := &hivev1.SyncSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
},
Spec: hivev1.SyncSetSpec{
ClusterDeploymentRefs: []corev1.LocalObjectReference{
{
Name: testName,
},
},
},
}
for _, r := range resources {
ss.Spec.Resources = append(ss.Spec.Resources, runtime.RawExtension{
Object: r,
})
}
for _, p := range patches {
ss.Spec.Patches = append(ss.Spec.Patches, p)
}
return ss
}
func testSyncSetWithResources(name string, resources ...runtime.Object) *hivev1.SyncSet {
ss := &hivev1.SyncSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
},
Spec: hivev1.SyncSetSpec{
ClusterDeploymentRefs: []corev1.LocalObjectReference{
{
Name: testName,
},
},
},
}
for _, r := range resources {
ss.Spec.Resources = append(ss.Spec.Resources, runtime.RawExtension{
Object: r,
})
}
return ss
}
func testSyncSetWithPatches(name string, patches ...hivev1.SyncObjectPatch) *hivev1.SyncSet {
ss := &hivev1.SyncSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
},
Spec: hivev1.SyncSetSpec{
ClusterDeploymentRefs: []corev1.LocalObjectReference{
{
Name: testName,
},
},
},
}
for _, p := range patches {
ss.Spec.Patches = append(ss.Spec.Patches, p)
}
return ss
}
func testSyncSetWithSecretReferences(name string, refs ...hivev1.SecretReference) *hivev1.SyncSet {
ss := &hivev1.SyncSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
},
Spec: hivev1.SyncSetSpec{
ClusterDeploymentRefs: []corev1.LocalObjectReference{
{
Name: testName,
},
},
},
}
ss.Spec.SecretReferences = append(ss.Spec.SecretReferences, refs...)
return ss
}
func testSyncObjectPatch(name, namespace, kind, apiVersion string, applyMode hivev1.SyncSetPatchApplyMode, value string) hivev1.SyncObjectPatch {
patch := fmt.Sprintf("{'spec': {'key: '%v'}}", value)
return hivev1.SyncObjectPatch{
Name: name,
Namespace: namespace,
Kind: kind,
APIVersion: apiVersion,
ApplyMode: applyMode,
Patch: patch,
PatchType: "merge",
}
}
func testSelectorSyncSetWithResources(name string, resources ...runtime.Object) *hivev1.SelectorSyncSet {
ss := &hivev1.SelectorSyncSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: hivev1.SelectorSyncSetSpec{
ClusterDeploymentSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"region": "us-east-1"},
},
},
}
for _, r := range resources {
ss.Spec.Resources = append(ss.Spec.Resources, runtime.RawExtension{
Object: r,
})
}
return ss
}
func testMatchingSelectorSyncSetWithSecretReferences(name string, refs ...hivev1.SecretReference) *hivev1.SelectorSyncSet {
return testSelectorSyncSetWithSecretReferences(name, map[string]string{"region": "us-east-1"}, refs...)
}
func testSelectorSyncSetWithSecretReferences(name string, matchLabels map[string]string, refs ...hivev1.SecretReference) *hivev1.SelectorSyncSet {
ss := &hivev1.SelectorSyncSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: hivev1.SelectorSyncSetSpec{
ClusterDeploymentSelector: metav1.LabelSelector{
MatchLabels: matchLabels,
},
},
}
ss.Spec.SecretReferences = append(ss.Spec.SecretReferences, refs...)
return ss
}
func testCM(name, key, value string) runtime.Object {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
Annotations: map[string]string{
"hash": fmt.Sprintf("%s=%s", key, value),
},
},
Data: map[string]string{
key: value,
},
}
}
func deletedItem(name, resource string) deletedItemInfo {
return deletedItemInfo{
name: name,
namespace: testNamespace,
group: "",
version: "v1",
resource: resource,
}
}
func testCMs(prefix string, count int) []runtime.Object {
result := []runtime.Object{}
for i := 0; i < count; i++ {
key := fmt.Sprintf("%s-key-%d", prefix, i)
value := fmt.Sprintf("%s-value-%d", prefix, i)
result = append(result, testCM(fmt.Sprintf("%s-%d", prefix, i), key, value))
}
return result
}
func testSecretRef(name string) hivev1.SecretReference {
return hivev1.SecretReference{
Source: corev1.ObjectReference{
Name: name,
Namespace: testNamespace,
APIVersion: "v1",
},
Target: corev1.ObjectReference{
Name: name,
Namespace: testNamespace,
APIVersion: "v1",
},
}
}
func testSecret(name, data string) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
},
Data: map[string][]byte{
testName: []byte(data),
},
}
}
func testSecretWithOwner(name, data string) *corev1.Secret {
secret := testSecret(name, data)
secret.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "foo.io/v1",
Kind: "Foo",
},
}
return secret
}
func kubeconfigSecret() *corev1.Secret {
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: adminKubeconfigSecret,
Namespace: testNamespace,
},
Data: map[string][]byte{
adminKubeconfigSecretKey: []byte("foo"),
},
}
return s
}
func applyFailedResourceStatus(name string, resources ...runtime.Object) hivev1.SyncSetObjectStatus {
conditionTime := metav1.Now()
status := hivev1.SyncSetObjectStatus{
Name: name,
}
for _, r := range resources {
obj, _ := meta.Accessor(r)
status.Resources = append(status.Resources, hivev1.SyncStatus{
APIVersion: r.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: r.GetObjectKind().GroupVersionKind().Kind,
Resource: r.GetObjectKind().GroupVersionKind().Kind,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
Hash: objectHash(obj),
Conditions: []hivev1.SyncCondition{
{
Type: hivev1.ApplyFailureSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
})
}
return status
}
func applyFailedSecretReferenceStatus(name string, secrets ...runtime.Object) hivev1.SyncSetObjectStatus {
conditionTime := metav1.Now()
status := hivev1.SyncSetObjectStatus{
Name: name,
}
for _, s := range secrets {
obj, _ := meta.Accessor(s)
status.SecretReferences = append(status.SecretReferences, hivev1.SyncStatus{
APIVersion: s.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: s.GetObjectKind().GroupVersionKind().Kind,
Resource: secretsResource,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
Hash: "",
Conditions: []hivev1.SyncCondition{
{
Type: hivev1.ApplyFailureSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
})
}
return status
}
func deleteFailedResourceStatus(name string, resources ...runtime.Object) hivev1.SyncSetObjectStatus {
conditionTime := metav1.Now()
status := hivev1.SyncSetObjectStatus{
Name: name,
}
for _, r := range resources {
obj, _ := meta.Accessor(r)
status.Resources = append(status.Resources, hivev1.SyncStatus{
APIVersion: r.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: r.GetObjectKind().GroupVersionKind().Kind,
Resource: r.GetObjectKind().GroupVersionKind().Kind,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
Hash: objectHash(obj),
Conditions: []hivev1.SyncCondition{
{
Type: hivev1.ApplySuccessSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
{
Type: hivev1.DeletionFailedSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
})
}
return status
}
func deleteFailedSecretReferenceStatus(name string, secrets ...runtime.Object) hivev1.SyncSetObjectStatus {
conditionTime := metav1.Now()
status := hivev1.SyncSetObjectStatus{
Name: name,
}
for _, s := range secrets {
obj, _ := meta.Accessor(s)
hash, _ := controllerutils.GetChecksumOfObject(s)
status.SecretReferences = append(status.SecretReferences, hivev1.SyncStatus{
APIVersion: s.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: s.GetObjectKind().GroupVersionKind().Kind,
Resource: secretsResource,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
Hash: hash,
Conditions: []hivev1.SyncCondition{
{
Type: hivev1.ApplySuccessSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
{
Type: hivev1.DeletionFailedSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
})
}
return status
}
func failedPatchStatus(name string, patches []hivev1.SyncObjectPatch) hivev1.SyncSetObjectStatus {
conditionTime := metav1.Now()
status := hivev1.SyncSetObjectStatus{
Name: name,
}
for _, p := range patches {
b, _ := json.Marshal(p)
status.Patches = append(status.Patches, hivev1.SyncStatus{
APIVersion: p.APIVersion,
Kind: p.Kind,
Name: p.Name,
Namespace: p.Namespace,
Hash: fmt.Sprintf("%x", md5.Sum(b)),
Conditions: []hivev1.SyncCondition{
{
Type: hivev1.ApplyFailureSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
})
}
return status
}
func successfulResourceStatus(resources ...runtime.Object) hivev1.SyncSetInstanceStatus {
return successfulResourceStatusWithTime(resources, metav1.Now())
}
func successfulResourceStatusWithTime(resources []runtime.Object, conditionTime metav1.Time) hivev1.SyncSetInstanceStatus {
status := hivev1.SyncSetInstanceStatus{}
for _, r := range resources {
obj, _ := meta.Accessor(r)
status.Resources = append(status.Resources, hivev1.SyncStatus{
APIVersion: r.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: r.GetObjectKind().GroupVersionKind().Kind,
Resource: r.GetObjectKind().GroupVersionKind().Kind,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
Hash: objectHash(obj),
Conditions: []hivev1.SyncCondition{
{
Type: hivev1.ApplySuccessSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
})
}
return status
}
func successfulSecretReferenceStatus(secrets ...runtime.Object) hivev1.SyncSetInstanceStatus {
return successfulSecretReferenceStatusWithTime(secrets, metav1.Now())
}
func successfulSecretReferenceStatusWithTime(secrets []runtime.Object, conditionTime metav1.Time) hivev1.SyncSetInstanceStatus {
status := hivev1.SyncSetInstanceStatus{}
for _, s := range secrets {
obj, _ := meta.Accessor(s)
hash, _ := controllerutils.GetChecksumOfObject(s)
status.SecretReferences = append(status.SecretReferences, hivev1.SyncStatus{
APIVersion: s.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: s.GetObjectKind().GroupVersionKind().Kind,
Resource: secretsResource,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
Hash: hash,
Conditions: []hivev1.SyncCondition{
{
Type: hivev1.ApplySuccessSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
})
}
return status
}
func successfulPatchStatus(patches []hivev1.SyncObjectPatch) hivev1.SyncSetInstanceStatus {
return successfulPatchStatusWithTime(patches, metav1.Now())
}
func successfulPatchStatusWithTime(patches []hivev1.SyncObjectPatch, conditionTime metav1.Time) hivev1.SyncSetInstanceStatus {
status := hivev1.SyncSetInstanceStatus{}
for _, p := range patches {
b, _ := json.Marshal(p)
status.Patches = append(status.Patches, hivev1.SyncStatus{
APIVersion: p.APIVersion,
Kind: p.Kind,
Name: p.Name,
Namespace: p.Namespace,
Hash: fmt.Sprintf("%x", md5.Sum(b)),
Conditions: []hivev1.SyncCondition{
{
Type: hivev1.ApplySuccessSyncCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
})
}
return status
}
type fakeHelper struct {
t *testing.T
}
func (f *fakeHelper) newHelper(kubeconfig []byte, logger log.FieldLogger) Applier {
return f
}
func (f *fakeHelper) ApplyRuntimeObject(object runtime.Object, scheme *runtime.Scheme) (resource.ApplyResult, error) {
data, err := json.Marshal(object)
if err != nil {
return "", fmt.Errorf("cannot serialize runtime object: %s", err)
}
return f.Apply(data)
}
func (f *fakeHelper) Apply(data []byte) (resource.ApplyResult, error) {
info, err := f.Info(data)
if err != nil {
return "", fmt.Errorf("cannot gather resource info: %s", err)
}
if info.Name == "apply-error" {
return "", fmt.Errorf("cannot apply resource")
}
return resource.UnknownApplyResult, nil
}
func (f *fakeHelper) Info(data []byte) (*resource.Info, error) {
r, obj, err := decode(f.t, data)
if err != nil {
return nil, fmt.Errorf("cannot decode object: %s", err)
}
// Special case when the object's name is info-error
if obj.GetName() == "info-error" {
return nil, fmt.Errorf("cannot determine info")
}
return &resource.Info{
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
Kind: r.GetObjectKind().GroupVersionKind().Kind,
APIVersion: r.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Resource: r.GetObjectKind().GroupVersionKind().Kind,
}, nil
}
func (f *fakeHelper) Patch(name types.NamespacedName, kind, apiVersion string, patch []byte, patchType string) error {
p := string(patch)
if strings.Contains(p, "patch-error") {
return fmt.Errorf("cannot apply patch")
}
return nil
}
func validateSyncSetInstanceStatus(t *testing.T, actual, expected hivev1.SyncSetInstanceStatus) {
if len(actual.Resources) != len(expected.Resources) {
t.Errorf("number of resource statuses does not match, actual: %d, expected: %d", len(actual.Resources), len(expected.Resources))
return
}
if len(actual.Patches) != len(expected.Patches) {
t.Errorf("number of patch statuses does not match, actual %d, expected: %d", len(actual.Patches), len(expected.Patches))
}
if len(actual.SecretReferences) != len(expected.SecretReferences) {
t.Errorf("number of secret reference statuses does not match, actual %d, expected: %d", len(actual.SecretReferences), len(expected.SecretReferences))
}
for _, actualResource := range actual.Resources {
found := false
for _, expectedResource := range expected.Resources {
if matchesResourceStatus(actualResource, expectedResource) {
found = true
validateSyncStatus(t, actualResource, expectedResource)
break
}
}
if !found {
t.Errorf("got unexpected resource status: %s/%s (kind: %s, apiVersion: %s)",
actualResource.Namespace, actualResource.Name, actualResource.Kind, actualResource.APIVersion)
}
}
for _, actualPatch := range actual.Patches {
found := false
for _, expectedPatch := range expected.Patches {
if matchesPatchStatus(actualPatch, expectedPatch) {
found = true
validateSyncStatus(t, actualPatch, expectedPatch)
break
}
}
if !found {
t.Errorf("got unexpected syncset patch status: %s/%s (kind: %s, apiVersion: %s)",
actualPatch.Namespace, actualPatch.Name, actualPatch.Kind, actualPatch.APIVersion)
}
}
for _, actualSecretReference := range actual.SecretReferences {
found := false
for _, expectedSecretReference := range expected.SecretReferences {
if matchesSecretReferenceStatus(actualSecretReference, expectedSecretReference) {
found = true
validateSyncStatus(t, actualSecretReference, expectedSecretReference)
break
}
}
if !found {
t.Errorf("got unexpected secret reference status: %s/%s (kind: %s, apiVersion: %s)",
actualSecretReference.Namespace, actualSecretReference.Name, actualSecretReference.Kind, actualSecretReference.APIVersion)
}
}
}
func matchesResourceStatus(a, b hivev1.SyncStatus) bool {
return a.Name == b.Name &&
a.Namespace == b.Namespace &&
a.Kind == b.Kind &&
a.APIVersion == b.APIVersion
}
func matchesPatchStatus(a, b hivev1.SyncStatus) bool {
return a.Name == b.Name &&
a.Namespace == b.Namespace &&
a.Kind == b.Kind &&
a.APIVersion == b.APIVersion
}
func matchesSecretReferenceStatus(a, b hivev1.SyncStatus) bool {
return a.Name == b.Name &&
a.Namespace == b.Namespace &&
a.Kind == b.Kind &&
a.APIVersion == b.APIVersion
}
func validateSyncStatus(t *testing.T, actual, expected hivev1.SyncStatus) {
if len(actual.Conditions) != len(expected.Conditions) {
t.Errorf("number of conditions do not match for resource %s/%s (kind: %s, apiVersion: %s). Expected: %d, Actual: %d",
actual.Namespace, actual.Name, actual.Kind, actual.APIVersion, len(expected.Conditions), len(actual.Conditions))
return
}
if actual.Hash != expected.Hash {
t.Errorf("hashes don't match for resource %s/%s (kind: %s, apiVersion: %s). Expected: %s, Actual: %s",
actual.Namespace, actual.Name, actual.Kind, actual.APIVersion, expected.Hash, actual.Hash)
return
}
for _, actualCondition := range actual.Conditions {
found := false
for _, expectedCondition := range expected.Conditions {
if actualCondition.Type == expectedCondition.Type {
found = true
if actualCondition.Status != expectedCondition.Status {
t.Errorf("condition does not match, resource %s/%s (kind: %s, apiVersion: %s), condition %s. Expected: %s, Actual: %s",
actual.Namespace, actual.Name, actual.Kind, actual.APIVersion, actualCondition.Type, expectedCondition.Status, actualCondition.Status)
}
}
}
if !found {
t.Errorf("got unexpected condition %s in resource %s/%s (kind: %s, apiVersion: %s)",
actualCondition.Type, actual.Namespace, actual.Name, actual.Kind, actual.APIVersion)
}
}
}
func validateUnknownObjectCondition(t *testing.T, status hivev1.SyncSetInstanceStatus) {
if len(status.Conditions) != 1 {
t.Errorf("did not get the expected number of syncset level conditions (1)")
return
}
condition := status.Conditions[0]
if condition.Type != hivev1.UnknownObjectSyncCondition {
t.Errorf("Unexpected type for syncset level condition: %s", condition.Type)
}
if condition.Status != corev1.ConditionTrue {
t.Errorf("Unexpected condition status: %s", condition.Status)
}
}
func decode(t *testing.T, data []byte) (runtime.Object, metav1.Object, error) {
decoder := scheme.Codecs.UniversalDecoder(corev1.SchemeGroupVersion)
r, _, err := decoder.Decode(data, nil, nil)
if err != nil {
return nil, nil, err
}
obj, err := meta.Accessor(r)
if err != nil {
return nil, nil, err
}
return r, obj, nil
}
func fakeHashFunc(t *testing.T) func([]byte) string {
return func(data []byte) string {
_, obj, err := decode(t, data)
if err != nil {
return fmt.Sprintf("%x", md5.Sum(data))
}
return objectHash(obj)
}
}
func objectHash(obj metav1.Object) string {
if annotations := obj.GetAnnotations(); annotations != nil {
if hash, ok := annotations["hash"]; ok {
return hash
}
}
return fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())
}
type deletedItemInfo struct {
name string
namespace string
resource string
group string
version string
}
type fakeDynamicClient struct {
deletedItems []deletedItemInfo
}
type fakeNamespaceableClient struct {
client *fakeDynamicClient
resource schema.GroupVersionResource
namespace string
}
func (c *fakeDynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface {
return &fakeNamespaceableClient{
client: c,
resource: resource,
}
}
func (c *fakeNamespaceableClient) Namespace(ns string) dynamic.ResourceInterface {
return &fakeNamespaceableClient{
client: c.client,
resource: c.resource,
namespace: ns,
}
}
func (c *fakeNamespaceableClient) Create(obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) {
return nil, nil
}
func (c *fakeNamespaceableClient) Update(obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) {
return nil, nil
}
func (c *fakeNamespaceableClient) UpdateStatus(obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error) {
return nil, nil
}
func (c *fakeNamespaceableClient) Delete(name string, options *metav1.DeleteOptions, subresources ...string) error {
if name == "delete-error" {
return fmt.Errorf("cannot delete resource")
}
c.client.deletedItems = append(c.client.deletedItems, deletedItemInfo{
name: name,
namespace: c.namespace,
resource: c.resource.Resource,
group: c.resource.Group,
version: c.resource.Version,
})
return nil
}
func (c *fakeNamespaceableClient) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
return nil
}
func (c *fakeNamespaceableClient) Get(name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
return nil, nil
}
func (c *fakeNamespaceableClient) List(opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
return nil, nil
}
func (c *fakeNamespaceableClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return nil, nil
}
func (c *fakeNamespaceableClient) Patch(name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) {
return nil, nil
}
func syncSetInstanceForSyncSet(cd *hivev1.ClusterDeployment, syncSet *hivev1.SyncSet) *hivev1.SyncSetInstance {
ownerRef := metav1.NewControllerRef(cd, hivev1.SchemeGroupVersion.WithKind("ClusterDeployment"))
hash := computeHash(syncSet.Spec)
return &hivev1.SyncSetInstance{
ObjectMeta: metav1.ObjectMeta{
Name: syncSetInstanceNameForSyncSet(cd, syncSet),
Namespace: cd.Namespace,
OwnerReferences: []metav1.OwnerReference{*ownerRef},
},
Spec: hivev1.SyncSetInstanceSpec{
ClusterDeployment: corev1.LocalObjectReference{
Name: cd.Name,
},
SyncSet: &corev1.LocalObjectReference{
Name: syncSet.Name,
},
ResourceApplyMode: syncSet.Spec.ResourceApplyMode,
SyncSetHash: hash,
},
}
}
func syncSetInstanceForSelectorSyncSet(cd *hivev1.ClusterDeployment, selectorSyncSet *hivev1.SelectorSyncSet) *hivev1.SyncSetInstance {
ownerRef := metav1.NewControllerRef(cd, hivev1.SchemeGroupVersion.WithKind("ClusterDeployment"))
hash := computeHash(selectorSyncSet.Spec)
return &hivev1.SyncSetInstance{
ObjectMeta: metav1.ObjectMeta{
Name: syncSetInstanceNameForSelectorSyncSet(cd, selectorSyncSet),
Namespace: cd.Namespace,
OwnerReferences: []metav1.OwnerReference{*ownerRef},
},
Spec: hivev1.SyncSetInstanceSpec{
ClusterDeployment: corev1.LocalObjectReference{
Name: cd.Name,
},
SelectorSyncSet: &hivev1.SelectorSyncSetReference{
Name: selectorSyncSet.Name,
},
ResourceApplyMode: selectorSyncSet.Spec.ResourceApplyMode,
SyncSetHash: hash,
},
}
}
func syncSetInstanceNameForSyncSet(cd *hivev1.ClusterDeployment, syncSet *hivev1.SyncSet) string {
syncSetPart := helpers.GetName(syncSet.Name, "syncset", validation.DNS1123SubdomainMaxLength-validation.DNS1123LabelMaxLength)
return fmt.Sprintf("%s-%s", cd.Name, syncSetPart)
}
func syncSetInstanceNameForSelectorSyncSet(cd *hivev1.ClusterDeployment, selectorSyncSet *hivev1.SelectorSyncSet) string {
syncSetPart := helpers.GetName(selectorSyncSet.Name, "selector-syncset", validation.DNS1123SubdomainMaxLength-validation.DNS1123LabelMaxLength)
return fmt.Sprintf("%s-%s", cd.Name, syncSetPart)
}
func computeHash(data interface{}) string {
b, err := json.Marshal(data)
if err != nil {
log.Fatalf("error marshaling json: %v", err)
}
return fmt.Sprintf("%x", md5.Sum(b))
}
func TestSSIReApplyDuration(t *testing.T) {
startProbe := time.Now()
resources := []runtime.Object{
testCM("foo1", "bar", "baz"),
testCM("foo2", "bar", "baz"),
testCM("foo3", "bar", "baz"),
}
cd := testClusterDeployment()
ss := testSyncSetWithResources("aaa", resources...)
ssi := syncSetInstanceForSyncSet(cd, ss)
ssi.Status = successfulResourceStatus(resources...)
tenMinutesAgo := metav1.NewTime(time.Now().Add(-10 * time.Minute))
fortyFiveMinutesAgo := metav1.NewTime(time.Now().Add(-45 * time.Minute))
ninetyMinutesAgo := metav1.NewTime(time.Now().Add(-90 * time.Minute))
oldestResourceApplyTimes := []time.Time{
tenMinutesAgo.Time,
fortyFiveMinutesAgo.Time,
ninetyMinutesAgo.Time,
}
for i, oldestResourceApplyTime := range oldestResourceApplyTimes {
ssi.Status.Resources[i].Conditions[0].LastProbeTime = metav1.NewTime(oldestResourceApplyTime)
requeueAfter := ssiReApplyDuration(ssi)
endProbe := time.Now()
maxRequeueAfter := reapplyInterval - startProbe.Sub(oldestResourceApplyTime)
minRequeueAfter := reapplyInterval - endProbe.Sub(oldestResourceApplyTime)
if maxRequeueAfter < requeueAfter || minRequeueAfter > requeueAfter {
t.Fatalf("requeueAfter did not fall between expected times, actual: %v, expected between %v and %v", requeueAfter, minRequeueAfter, maxRequeueAfter)
}
}
}
| 1 | 9,841 | This should use `t.Errorf` instead. You want the second part of the test to run even when the first part fails. | openshift-hive | go |
@@ -715,7 +715,7 @@ int main(int argc, char **argv)
#endif
}
- if (ctx.http3 != NULL) {
+ if (ctx.protocol_selector.ratio.http3 > 0) {
h2o_quic_close_all_connections(&ctx.http3->h3);
while (h2o_quic_num_connections(&ctx.http3->h3) != 0) {
#if H2O_USE_LIBUV | 1 | /*
* Copyright (c) 2014-2019 DeNA Co., Ltd., Kazuho Oku, Fastly, Frederik
* Deweerdt, Justin Zhu, Ichito Nagata, Grant Zhang,
* Baodong Chen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <errno.h>
#ifdef LIBC_HAS_BACKTRACE
#include <execinfo.h>
#endif
#include <getopt.h>
#include <netinet/in.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "picotls.h"
#include "picotls/openssl.h"
#include "quicly.h"
#include "h2o/hostinfo.h"
#include "h2o/httpclient.h"
#include "h2o/serverutil.h"
#ifndef MIN
#define MIN(a, b) (((a) > (b)) ? (b) : (a))
#endif
#define IO_TIMEOUT 5000
static int save_http3_token_cb(quicly_save_resumption_token_t *self, quicly_conn_t *conn, ptls_iovec_t token);
static quicly_save_resumption_token_t save_http3_token = {save_http3_token_cb};
static int save_http3_ticket_cb(ptls_save_ticket_t *self, ptls_t *tls, ptls_iovec_t src);
static ptls_save_ticket_t save_http3_ticket = {save_http3_ticket_cb};
static h2o_httpclient_connection_pool_t *connpool;
static h2o_mem_pool_t pool;
struct {
const char *url;
const char *method;
struct {
h2o_iovec_t name;
h2o_iovec_t value;
} headers[256];
size_t num_headers;
size_t body_size;
h2o_url_t connect_to; /* when CONNECT method is used, req.url specifies the address of the connect proxy, and this field
specifies the address of the server to which a TCP connection should be established */
} req = {NULL, "GET"};
static unsigned cnt_left = 1, concurrency = 1;
static int chunk_size = 10;
static h2o_iovec_t iov_filler;
static int io_interval = 0, req_interval = 0;
static int ssl_verify_none = 0;
static const ptls_key_exchange_algorithm_t *h3_key_exchanges[] = {
#if PTLS_OPENSSL_HAVE_X25519
&ptls_openssl_x25519,
#endif
&ptls_openssl_secp256r1, NULL};
static h2o_http3client_ctx_t h3ctx = {
.tls = {.random_bytes = ptls_openssl_random_bytes,
.get_time = &ptls_get_time,
.key_exchanges = h3_key_exchanges,
.cipher_suites = ptls_openssl_cipher_suites,
.save_ticket = &save_http3_ticket,
},
};
static h2o_httpclient_head_cb on_connect(h2o_httpclient_t *client, const char *errstr, h2o_iovec_t *method, h2o_url_t *url,
const h2o_header_t **headers, size_t *num_headers, h2o_iovec_t *body,
h2o_httpclient_proceed_req_cb *proceed_req_cb, h2o_httpclient_properties_t *props,
h2o_url_t *origin);
static h2o_httpclient_body_cb on_head(h2o_httpclient_t *client, const char *errstr, h2o_httpclient_on_head_t *args);
static struct {
ptls_iovec_t token;
ptls_iovec_t ticket;
quicly_transport_parameters_t tp;
} http3_session;
static int save_http3_token_cb(quicly_save_resumption_token_t *self, quicly_conn_t *conn, ptls_iovec_t token)
{
free(http3_session.token.base);
http3_session.token = ptls_iovec_init(h2o_mem_alloc(token.len), token.len);
memcpy(http3_session.token.base, token.base, token.len);
return 0;
}
static int save_http3_ticket_cb(ptls_save_ticket_t *self, ptls_t *tls, ptls_iovec_t src)
{
quicly_conn_t *conn = *ptls_get_data_ptr(tls);
assert(quicly_get_tls(conn) == tls);
free(http3_session.ticket.base);
http3_session.ticket = ptls_iovec_init(h2o_mem_alloc(src.len), src.len);
memcpy(http3_session.ticket.base, src.base, src.len);
http3_session.tp = *quicly_get_remote_transport_parameters(conn);
return 0;
}
static int load_http3_session(h2o_httpclient_ctx_t *ctx, struct sockaddr *server_addr, const char *server_name, ptls_iovec_t *token,
ptls_iovec_t *ticket, quicly_transport_parameters_t *tp)
{
/* TODO respect server_addr, server_name */
if (http3_session.token.base != NULL) {
*token = ptls_iovec_init(h2o_mem_alloc(http3_session.token.len), http3_session.token.len);
memcpy(token->base, http3_session.token.base, http3_session.token.len);
}
if (http3_session.ticket.base != NULL) {
*ticket = ptls_iovec_init(h2o_mem_alloc(http3_session.ticket.len), http3_session.ticket.len);
memcpy(ticket->base, http3_session.ticket.base, http3_session.ticket.len);
*tp = http3_session.tp;
}
return 1;
}
struct st_timeout {
h2o_timer_t timeout;
void *ptr;
};
static void create_timeout(h2o_loop_t *loop, uint64_t delay_ticks, h2o_timer_cb cb, void *ptr)
{
struct st_timeout *t = h2o_mem_alloc(sizeof(*t));
*t = (struct st_timeout){{.cb = cb}, ptr};
h2o_timer_link(loop, delay_ticks, &t->timeout);
}
static void on_exit_deferred(h2o_timer_t *entry)
{
exit(1);
}
static void on_error(h2o_httpclient_ctx_t *ctx, const char *fmt, ...)
{
char errbuf[2048];
va_list args;
va_start(args, fmt);
int errlen = vsnprintf(errbuf, sizeof(errbuf), fmt, args);
va_end(args);
fprintf(stderr, "%.*s\n", errlen, errbuf);
/* defer using zero timeout to send pending GOAWAY frame */
create_timeout(ctx->loop, 0, on_exit_deferred, NULL);
}
struct st_tunnel_t {
h2o_socket_t *std_in;
h2o_tunnel_t *tunnel;
/**
* buffer that stores data inflight (i.e. that being passed to `tunnel->write` for which the completion callback has not been
* called yet)
*/
h2o_doublebuffer_t buf;
};
static void tunnel_write(struct st_tunnel_t *tunnel)
{
if (tunnel->buf.inflight || tunnel->std_in->input->size == 0)
return;
h2o_iovec_t vec = h2o_doublebuffer_prepare(&tunnel->buf, &tunnel->std_in->input, SIZE_MAX);
tunnel->tunnel->write_(tunnel->tunnel, vec.base, vec.len);
}
static void tunnel_on_write_complete(h2o_tunnel_t *_tunnel, const char *err)
{
struct st_tunnel_t *tunnel = _tunnel->data;
assert(tunnel->tunnel == _tunnel);
if (err != NULL) {
fprintf(stderr, "%s\n", err);
exit(0);
}
h2o_doublebuffer_consume(&tunnel->buf);
tunnel_write(tunnel);
}
static void tunnel_on_stdin_read(h2o_socket_t *sock, const char *err)
{
struct st_tunnel_t *tunnel = sock->data;
if (err != NULL)
exit(0);
tunnel_write(tunnel);
}
static void tunnel_on_read(h2o_tunnel_t *_tunnel, const char *err, const void *bytes, size_t len)
{
struct st_tunnel_t *tunnel = _tunnel->data;
assert(tunnel->tunnel == _tunnel);
if (len != 0)
write(1, bytes, len);
if (err != NULL) {
fprintf(stderr, "%s\n", err);
exit(0);
}
tunnel->tunnel->proceed_read(tunnel->tunnel);
}
static void tunnel_create(h2o_loop_t *loop, h2o_tunnel_t *_tunnel)
{
struct st_tunnel_t *tunnel = h2o_mem_alloc(sizeof(*tunnel));
#if H2O_USE_LIBUV
tunnel->std_in = h2o_uv__poll_create(loop, 0, (uv_close_cb)free);
#else
tunnel->std_in = h2o_evloop_socket_create(loop, 0, 0);
#endif
tunnel->std_in->data = tunnel;
tunnel->tunnel = _tunnel;
tunnel->tunnel->data = tunnel;
tunnel->tunnel->on_read = tunnel_on_read;
tunnel->tunnel->on_write_complete = tunnel_on_write_complete;
h2o_doublebuffer_init(&tunnel->buf, &h2o_socket_buffer_prototype);
h2o_socket_read_start(tunnel->std_in, tunnel_on_stdin_read);
}
static void start_request(h2o_httpclient_ctx_t *ctx)
{
h2o_url_t *url_parsed;
/* clear memory pool */
h2o_mem_clear_pool(&pool);
/* parse URL */
url_parsed = h2o_mem_alloc_pool(&pool, *url_parsed, 1);
if (h2o_url_parse(req.url, SIZE_MAX, url_parsed) != 0) {
on_error(ctx, "unrecognized type of URL: %s", req.url);
return;
}
/* initiate the request */
if (connpool == NULL) {
connpool = h2o_mem_alloc(sizeof(*connpool));
h2o_socketpool_t *sockpool = h2o_mem_alloc(sizeof(*sockpool));
h2o_socketpool_target_t *target = h2o_socketpool_create_target(url_parsed, NULL);
h2o_socketpool_init_specific(sockpool, 10, &target, 1, NULL);
h2o_socketpool_set_timeout(sockpool, IO_TIMEOUT);
h2o_socketpool_register_loop(sockpool, ctx->loop);
h2o_httpclient_connection_pool_init(connpool, sockpool);
/* obtain root */
char *root, *crt_fullpath;
if ((root = getenv("H2O_ROOT")) == NULL)
root = H2O_TO_STR(H2O_ROOT);
#define CA_PATH "/share/h2o/ca-bundle.crt"
crt_fullpath = h2o_mem_alloc(strlen(root) + strlen(CA_PATH) + 1);
sprintf(crt_fullpath, "%s%s", root, CA_PATH);
#undef CA_PATH
SSL_CTX *ssl_ctx = SSL_CTX_new(SSLv23_client_method());
SSL_CTX_load_verify_locations(ssl_ctx, crt_fullpath, NULL);
if (ssl_verify_none) {
SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_NONE, NULL);
} else {
SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
}
h2o_socketpool_set_ssl_ctx(sockpool, ssl_ctx);
SSL_CTX_free(ssl_ctx);
}
h2o_httpclient_connect(NULL, &pool, url_parsed, ctx, connpool, url_parsed,
strcmp(req.method, "CONNECT") == 0 ? h2o_httpclient_upgrade_to_connect : NULL, on_connect);
}
static void on_next_request(h2o_timer_t *entry)
{
struct st_timeout *t = H2O_STRUCT_FROM_MEMBER(struct st_timeout, timeout, entry);
h2o_httpclient_ctx_t *ctx = t->ptr;
free(t);
start_request(ctx);
}
static int on_body(h2o_httpclient_t *client, const char *errstr)
{
if (errstr != NULL && errstr != h2o_httpclient_error_is_eos) {
on_error(client->ctx, errstr);
return -1;
}
fwrite((*client->buf)->bytes, 1, (*client->buf)->size, stdout);
fflush(stdout);
h2o_buffer_consume(&(*client->buf), (*client->buf)->size);
if (errstr == h2o_httpclient_error_is_eos) {
--cnt_left;
if (cnt_left >= concurrency) {
/* next attempt */
h2o_mem_clear_pool(&pool);
ftruncate(fileno(stdout), 0); /* ignore error when stdout is a tty */
create_timeout(client->ctx->loop, req_interval, on_next_request, client->ctx);
}
}
return 0;
}
static void print_status_line(int version, int status, h2o_iovec_t msg)
{
fprintf(stderr, "HTTP/%d", (version >> 8));
if ((version & 0xff) != 0) {
fprintf(stderr, ".%d", version & 0xff);
}
fprintf(stderr, " %d", status);
if (msg.len != 0) {
fprintf(stderr, " %.*s\n", (int)msg.len, msg.base);
} else {
fprintf(stderr, "\n");
}
}
static void print_response_headers(int version, int status, h2o_iovec_t msg, h2o_header_t *headers, size_t num_headers)
{
print_status_line(version, status, msg);
for (size_t i = 0; i != num_headers; ++i) {
const char *name = headers[i].orig_name;
if (name == NULL)
name = headers[i].name->base;
fprintf(stderr, "%.*s: %.*s\n", (int)headers[i].name->len, name, (int)headers[i].value.len, headers[i].value.base);
}
fprintf(stderr, "\n");
fflush(stderr);
}
static int on_informational(h2o_httpclient_t *client, int version, int status, h2o_iovec_t msg, h2o_header_t *headers,
size_t num_headers)
{
print_response_headers(version, status, msg, headers, num_headers);
return 0;
}
h2o_httpclient_body_cb on_head(h2o_httpclient_t *client, const char *errstr, h2o_httpclient_on_head_t *args)
{
if (errstr != NULL && errstr != h2o_httpclient_error_is_eos) {
on_error(client->ctx, errstr);
return NULL;
}
print_response_headers(args->version, args->status, args->msg, args->headers, args->num_headers);
if (args->tunnel != NULL) {
tunnel_create(client->ctx->loop, args->tunnel);
return NULL;
}
if (errstr == h2o_httpclient_error_is_eos) {
on_error(client->ctx, "no body");
return NULL;
}
return on_body;
}
static size_t *remaining_req_bytes(h2o_httpclient_t *client)
{
return (size_t *)&client->data;
}
int fill_body(h2o_httpclient_t *client, h2o_iovec_t *reqbuf)
{
size_t *cur_req_body_size = remaining_req_bytes(client);
if (*cur_req_body_size > 0) {
memcpy(reqbuf, &iov_filler, sizeof(*reqbuf));
reqbuf->len = MIN(iov_filler.len, *cur_req_body_size);
*cur_req_body_size -= reqbuf->len;
return 0;
} else {
*reqbuf = h2o_iovec_init(NULL, 0);
return 1;
}
}
static void on_io_timeout(h2o_timer_t *entry)
{
struct st_timeout *t = H2O_STRUCT_FROM_MEMBER(struct st_timeout, timeout, entry);
h2o_httpclient_t *client = t->ptr;
free(t);
h2o_iovec_t reqbuf;
fill_body(client, &reqbuf);
client->write_req(client, reqbuf, *remaining_req_bytes(client) <= 0);
}
static void proceed_request(h2o_httpclient_t *client, size_t written, h2o_send_state_t send_state)
{
if (*remaining_req_bytes(client) > 0)
create_timeout(client->ctx->loop, io_interval, on_io_timeout, client);
}
h2o_httpclient_head_cb on_connect(h2o_httpclient_t *client, const char *errstr, h2o_iovec_t *_method, h2o_url_t *url,
const h2o_header_t **headers, size_t *num_headers, h2o_iovec_t *body,
h2o_httpclient_proceed_req_cb *proceed_req_cb, h2o_httpclient_properties_t *props,
h2o_url_t *origin)
{
h2o_headers_t headers_vec = {NULL};
size_t i;
if (errstr != NULL) {
on_error(client->ctx, errstr);
return NULL;
}
*_method = h2o_iovec_init(req.method, strlen(req.method));
*url = *(strcmp(req.method, "CONNECT") == 0 ? &req.connect_to : (h2o_url_t *)client->data);
for (i = 0; i != req.num_headers; ++i)
h2o_add_header_by_str(&pool, &headers_vec, req.headers[i].name.base, req.headers[i].name.len, 1, NULL,
req.headers[i].value.base, req.headers[i].value.len);
*body = h2o_iovec_init(NULL, 0);
*proceed_req_cb = NULL;
if (req.body_size > 0) {
*remaining_req_bytes(client) = req.body_size;
char *clbuf = h2o_mem_alloc_pool(&pool, char, sizeof(H2O_UINT32_LONGEST_STR) - 1);
size_t clbuf_len = sprintf(clbuf, "%zu", req.body_size);
h2o_add_header(&pool, &headers_vec, H2O_TOKEN_CONTENT_LENGTH, NULL, clbuf, clbuf_len);
*proceed_req_cb = proceed_request;
create_timeout(client->ctx->loop, io_interval, on_io_timeout, client);
}
*headers = headers_vec.entries;
*num_headers = headers_vec.size;
client->informational_cb = on_informational;
return on_head;
}
static void usage(const char *progname)
{
fprintf(stderr,
"Usage: %s [options] <url>\n"
"Options:\n"
" -2 <ratio> HTTP/2 ratio (between 0 and 100)\n"
" -3 HTTP/3-only mode\n"
" -b <size> size of request body (in bytes; default: 0)\n"
" -C <concurrency>\n"
" sets the number of requests run at once (default: 1)\n"
" -c <size> size of body chunk (in bytes; default: 10)\n"
" -d <delay> request interval (in msec; default: 0)\n"
" -H <name:value>\n"
" adds a request header\n"
" -i <delay> I/O interval between sending chunks (in msec; default: 0)\n"
" -k skip peer verification\n"
" -m <method> request method (default: GET)\n"
" -o <path> file to which the response body is written (default: stdout)\n"
" -t <times> number of requests to send the request (default: 1)\n"
" -W <bytes> receive window size (HTTP/3 only)\n"
" -x <host:port>\n"
" specifies the destination of the CONNECT request; implies\n"
" `-m CONNECT`\n"
" -h prints this help\n"
"\n",
progname);
}
static void on_sigfatal(int signo)
{
fprintf(stderr, "received fatal signal %d\n", signo);
h2o_set_signal_handler(signo, SIG_DFL);
#ifdef LIBC_HAS_BACKTRACE
void *frames[128];
int framecnt = backtrace(frames, sizeof(frames) / sizeof(frames[0]));
backtrace_symbols_fd(frames, framecnt, 2);
#endif
}
int main(int argc, char **argv)
{
h2o_set_signal_handler(SIGABRT, on_sigfatal);
h2o_set_signal_handler(SIGBUS, on_sigfatal);
h2o_set_signal_handler(SIGFPE, on_sigfatal);
h2o_set_signal_handler(SIGILL, on_sigfatal);
h2o_set_signal_handler(SIGSEGV, on_sigfatal);
h2o_multithread_queue_t *queue;
h2o_multithread_receiver_t getaddr_receiver;
h2o_httpclient_ctx_t ctx = {
.getaddr_receiver = &getaddr_receiver,
.io_timeout = IO_TIMEOUT,
.connect_timeout = IO_TIMEOUT,
.first_byte_timeout = IO_TIMEOUT,
.keepalive_timeout = IO_TIMEOUT,
.max_buffer_size = H2O_SOCKET_INITIAL_INPUT_BUFFER_SIZE * 2,
.http3 = &h3ctx,
};
int opt;
SSL_load_error_strings();
SSL_library_init();
OpenSSL_add_all_algorithms();
quicly_amend_ptls_context(&h3ctx.tls);
h3ctx.quic = quicly_spec_context;
h3ctx.quic.transport_params.max_streams_uni = 10;
h3ctx.quic.tls = &h3ctx.tls;
h3ctx.quic.save_resumption_token = &save_http3_token;
{
uint8_t random_key[PTLS_SHA256_DIGEST_SIZE];
h3ctx.tls.random_bytes(random_key, sizeof(random_key));
h3ctx.quic.cid_encryptor = quicly_new_default_cid_encryptor(
&ptls_openssl_bfecb, &ptls_openssl_aes128ecb, &ptls_openssl_sha256, ptls_iovec_init(random_key, sizeof(random_key)));
ptls_clear_memory(random_key, sizeof(random_key));
}
h3ctx.quic.stream_open = &h2o_httpclient_http3_on_stream_open;
h3ctx.load_session = load_http3_session;
#if H2O_USE_LIBUV
ctx.loop = uv_loop_new();
#else
ctx.loop = h2o_evloop_create();
#endif
#if H2O_USE_LIBUV
#else
{ /* initialize QUIC context */
int fd;
struct sockaddr_in sin;
if ((fd = socket(PF_INET, SOCK_DGRAM, 0)) == -1) {
perror("failed to create UDP socket");
exit(EXIT_FAILURE);
}
memset(&sin, 0, sizeof(sin));
if (bind(fd, (void *)&sin, sizeof(sin)) != 0) {
perror("failed to bind bind UDP socket");
exit(EXIT_FAILURE);
}
h2o_socket_t *sock = h2o_evloop_socket_create(ctx.loop, fd, H2O_SOCKET_FLAG_DONT_READ);
h2o_quic_init_context(&h3ctx.h3, ctx.loop, sock, &h3ctx.quic, NULL, h2o_httpclient_http3_notify_connection_update);
}
#endif
const char *optstring = "t:m:o:b:x:C:c:d:H:i:k2:W:h3:"
#ifdef __GNUC__
":" /* for backward compatibility, optarg of -3 is optional when using glibc */
#endif
;
while ((opt = getopt(argc, argv, optstring)) != -1) {
switch (opt) {
case 't':
if (sscanf(optarg, "%u", &cnt_left) != 1 || cnt_left < 1) {
fprintf(stderr, "count (-t) must be a number greater than zero\n");
exit(EXIT_FAILURE);
}
break;
case 'm':
req.method = optarg;
break;
case 'o':
if (freopen(optarg, "w", stdout) == NULL) {
fprintf(stderr, "failed to open file:%s:%s\n", optarg, strerror(errno));
exit(EXIT_FAILURE);
}
break;
case 'b':
req.body_size = atoi(optarg);
if (req.body_size <= 0) {
fprintf(stderr, "body size must be greater than 0\n");
exit(EXIT_FAILURE);
}
break;
case 'x':
if (h2o_url_init(&req.connect_to, NULL, h2o_iovec_init(optarg, strlen(optarg)), h2o_iovec_init(NULL, 0)) != 0 ||
req.connect_to._port == 0 || req.connect_to._port == 65535) {
fprintf(stderr, "invalid server address specified for -X\n");
exit(EXIT_FAILURE);
}
break;
case 'C':
if (sscanf(optarg, "%u", &concurrency) != 1 || concurrency < 1) {
fprintf(stderr, "concurrency (-C) must be a number greather than zero");
exit(EXIT_FAILURE);
}
break;
case 'c':
chunk_size = atoi(optarg);
if (chunk_size <= 0) {
fprintf(stderr, "chunk size must be greater than 0\n");
exit(EXIT_FAILURE);
}
break;
case 'd':
req_interval = atoi(optarg);
break;
case 'H': {
const char *colon, *value_start;
if ((colon = index(optarg, ':')) == NULL) {
fprintf(stderr, "no `:` found in -H\n");
exit(EXIT_FAILURE);
}
if (req.num_headers >= sizeof(req.headers) / sizeof(req.headers[0])) {
fprintf(stderr, "too many request headers\n");
exit(EXIT_FAILURE);
}
for (value_start = colon + 1; *value_start == ' ' || *value_start == '\t'; ++value_start)
;
req.headers[req.num_headers].name = h2o_iovec_init(optarg, colon - optarg);
req.headers[req.num_headers].value = h2o_iovec_init(value_start, strlen(value_start));
++req.num_headers;
} break;
case 'i':
io_interval = atoi(optarg);
break;
case 'k':
ssl_verify_none = 1;
break;
case '2':
if (sscanf(optarg, "%" SCNd8, &ctx.protocol_selector.ratio.http2) != 1 ||
!(0 <= ctx.protocol_selector.ratio.http2 && ctx.protocol_selector.ratio.http2 <= 100)) {
fprintf(stderr, "failed to parse HTTP/2 ratio (-2)\n");
exit(EXIT_FAILURE);
}
break;
case '3':
#if H2O_USE_LIBUV
fprintf(stderr, "HTTP/3 is currently not supported by the libuv backend.\n");
exit(EXIT_FAILURE);
#else
if (optarg == NULL) {
/* parse the optional argument (glibc extension; see above) */
if (optind < argc && ('0' <= argv[optind][0] && argv[optind][0] <= '9') &&
sscanf(argv[optind], "%" SCNd8, &ctx.protocol_selector.ratio.http3) == 1) {
++optind;
} else {
ctx.protocol_selector.ratio.http3 = 100;
}
} else {
if (sscanf(optarg, "%" SCNd8, &ctx.protocol_selector.ratio.http3) != 1)
ctx.protocol_selector.ratio.http3 = -1;
}
if (!(0 <= ctx.protocol_selector.ratio.http3 && ctx.protocol_selector.ratio.http3 <= 100)) {
fprintf(stderr, "failed to parse HTTP/3 ratio (-3)\n");
exit(EXIT_FAILURE);
}
#endif
break;
case 'W': {
uint64_t v;
if (sscanf(optarg, "%" PRIu64, &v) != 1) {
fprintf(stderr, "failed to parse HTTP/3 receive window size (-W)\n");
exit(EXIT_FAILURE);
}
h3ctx.quic.transport_params.max_stream_data.uni = v;
h3ctx.quic.transport_params.max_stream_data.bidi_local = v;
h3ctx.quic.transport_params.max_stream_data.bidi_remote = v;
} break;
case 'h':
usage(argv[0]);
exit(0);
break;
default:
exit(EXIT_FAILURE);
break;
}
}
argc -= optind;
argv += optind;
if (req.connect_to.authority.len != 0)
req.method = "CONNECT";
if (ctx.protocol_selector.ratio.http2 + ctx.protocol_selector.ratio.http3 > 100) {
fprintf(stderr, "sum of the use ratio of HTTP/2 and HTTP/3 is greater than 100");
exit(EXIT_FAILURE);
}
if (argc < 1) {
fprintf(stderr, "no URL\n");
exit(EXIT_FAILURE);
}
req.url = argv[0];
if (req.body_size != 0) {
iov_filler.base = h2o_mem_alloc(chunk_size);
memset(iov_filler.base, 'a', chunk_size);
iov_filler.len = chunk_size;
}
h2o_mem_init_pool(&pool);
/* setup context */
queue = h2o_multithread_create_queue(ctx.loop);
h2o_multithread_register_receiver(queue, ctx.getaddr_receiver, h2o_hostinfo_getaddr_receiver);
/* setup the first request(s) */
for (unsigned i = 0; i < concurrency && i < cnt_left; ++i)
start_request(&ctx);
while (cnt_left != 0) {
#if H2O_USE_LIBUV
uv_run(ctx.loop, UV_RUN_ONCE);
#else
h2o_evloop_run(ctx.loop, INT32_MAX);
#endif
}
if (ctx.http3 != NULL) {
h2o_quic_close_all_connections(&ctx.http3->h3);
while (h2o_quic_num_connections(&ctx.http3->h3) != 0) {
#if H2O_USE_LIBUV
uv_run(ctx.loop, UV_RUN_ONCE);
#else
h2o_evloop_run(ctx.loop, INT32_MAX);
#endif
}
}
return 0;
}
| 1 | 14,964 | Should this if block better be changed to `#if H2O_USE_LIBUV \n #else ... #endif`? The reason I wonder is because that's the way the QUIC context is being initilaized at the beginning of this function. Though I would not expect practical difference between the two approaches, because ATM the only case where we create QUIC connections is when the protocol selector is set to non-zero, and because when it is set to non-zero `h2o_quiy_close_all_connections` becomes a no-op. | h2o-h2o | c |
@@ -87,8 +87,11 @@ export function useReducer(reducer, initialState, init) {
init == null ? invokeOrReturn(null, initialState) : init(initialState),
action => {
- hookState._value[0] = reducer(hookState._value[0], action);
- hookState._component.setState({});
+ const nextValue = reducer(hookState._value[0], action);
+ if (!Object.is(hookState._value[0], nextValue)) {
+ hookState._value[0] = nextValue;
+ hookState._component.setState({});
+ }
}
];
} | 1 | import { options } from 'preact';
/** @type {number} */
let currentIndex;
/** @type {import('./internal').Component} */
let currentComponent;
/** @type {Array<import('./internal').Component>} */
let afterPaintEffects = [];
let oldBeforeRender = options.render;
options.render = vnode => {
if (oldBeforeRender) oldBeforeRender(vnode);
currentComponent = vnode._component;
currentIndex = 0;
if (!currentComponent.__hooks) return;
currentComponent.__hooks._pendingEffects.forEach(invokeEffect);
currentComponent.__hooks._pendingEffects = [];
};
let oldAfterDiff = options.diffed;
options.diffed = vnode => {
if (oldAfterDiff) oldAfterDiff(vnode);
const c = vnode._component;
if (!c) return;
const hooks = c.__hooks;
if (!hooks) return;
// TODO: Consider moving to a global queue. May need to move
// this to the `commit` option
hooks._pendingLayoutEffects.forEach(invokeEffect);
hooks._pendingLayoutEffects = [];
};
let oldBeforeUnmount = options.unmount;
options.unmount = vnode => {
if (oldBeforeUnmount) oldBeforeUnmount(vnode);
const c = vnode._component;
if (!c) return;
const hooks = c.__hooks;
if (!hooks) return;
hooks._list.forEach(hook => hook._cleanup && hook._cleanup());
};
/**
* Get a hook's state from the currentComponent
* @param {number} index The index of the hook to get
* @returns {import('./internal').HookState}
*/
function getHookState(index) {
// Largely inspired by:
// * https://github.com/michael-klein/funcy.js/blob/f6be73468e6ec46b0ff5aa3cc4c9baf72a29025a/src/hooks/core_hooks.mjs
// * https://github.com/michael-klein/funcy.js/blob/650beaa58c43c33a74820a3c98b3c7079cf2e333/src/renderer.mjs
// Other implementations to look at:
// * https://codesandbox.io/s/mnox05qp8
const hooks = currentComponent.__hooks || (currentComponent.__hooks = { _list: [], _pendingEffects: [], _pendingLayoutEffects: [] });
if (index >= hooks._list.length) {
hooks._list.push({});
}
return hooks._list[index];
}
export function useState(initialState) {
return useReducer(invokeOrReturn, initialState);
}
export function useReducer(reducer, initialState, init) {
/** @type {import('./internal').ReducerHookState} */
const hookState = getHookState(currentIndex++);
if (hookState._component == null) {
hookState._component = currentComponent;
hookState._value = [
init == null ? invokeOrReturn(null, initialState) : init(initialState),
action => {
hookState._value[0] = reducer(hookState._value[0], action);
hookState._component.setState({});
}
];
}
return hookState._value;
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentComponent.__hooks._pendingEffects.push(state);
afterPaint(currentComponent);
}
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useLayoutEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentComponent.__hooks._pendingLayoutEffects.push(state);
}
}
export function useRef(initialValue) {
const state = getHookState(currentIndex++);
if (state._value == null) {
state._value = { current: initialValue };
}
return state._value;
}
/**
* @param {() => any} callback
* @param {any[]} args
*/
export function useMemo(callback, args) {
/** @type {import('./internal').MemoHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._args = args;
state._callback = callback;
return state._value = callback();
}
return state._value;
}
/**
* @param {() => void} callback
* @param {any[]} args
*/
export function useCallback(callback, args) {
return useMemo(() => callback, args);
}
/**
* @param {import('./internal').PreactContext} context
*/
export function useContext(context) {
const provider = currentComponent.context[context._id];
if (provider == null) return context._defaultValue;
const state = getHookState(currentIndex++);
if (state._value == null) {
state._value = true;
provider.sub(currentComponent);
}
return provider.props.value;
}
// Note: if someone used Component.debounce = requestAnimationFrame,
// then effects will ALWAYS run on the NEXT frame instead of the current one, incurring a ~16ms delay.
// Perhaps this is not such a big deal.
/**
* Invoke a component's pending effects after the next frame renders
* @type {(component: import('./internal').Component) => void}
*/
let afterPaint = () => {};
/**
* After paint effects consumer.
*/
function flushAfterPaintEffects() {
afterPaintEffects.forEach(component => {
component._afterPaintQueued = false;
if (!component._parentDom) return;
component.__hooks._pendingEffects.forEach(invokeEffect);
component.__hooks._pendingEffects = [];
});
afterPaintEffects = [];
}
function scheduleFlushAfterPaint() {
setTimeout(flushAfterPaintEffects, 0);
}
if (typeof window !== 'undefined') {
afterPaint = (component) => {
if (!component._afterPaintQueued && (component._afterPaintQueued = true) && afterPaintEffects.push(component) === 1) {
/* istanbul ignore next */
if (options.requestAnimationFrame) {
options.requestAnimationFrame(flushAfterPaintEffects);
}
else {
requestAnimationFrame(scheduleFlushAfterPaint);
}
}
};
}
/**
* Invoke a Hook's effect
* @param {import('./internal').EffectHookState} hook
*/
function invokeEffect(hook) {
if (hook._cleanup) hook._cleanup();
const result = hook._value();
if (typeof result === 'function') hook._cleanup = result;
}
function argsChanged(oldArgs, newArgs) {
return oldArgs == null || newArgs.some((arg, index) => arg !== oldArgs[index]);
}
function invokeOrReturn(arg, f) {
return typeof f === 'function' ? f(arg) : f;
}
| 1 | 12,790 | `Object.is` is an ES6 feature of JS so I don't think we can use it here (or we have to change our browser support matrix or specify that an Object.is polyfill is pre-req of `preact/hooks`). Should we just do an `===` check in `preact/hooks` and provide a `Object.is` polyfill and version of `useReducer` in `preact/compat`? | preactjs-preact | js |
@@ -22,7 +22,11 @@ class AddUserOperation extends CommandOperation {
const options = this.options;
// Get additional values
- let roles = Array.isArray(options.roles) ? options.roles : [];
+ let roles = Array.isArray(options.roles)
+ ? options.roles
+ : typeof options.roles === 'string'
+ ? [options.roles]
+ : [];
// If not roles defined print deprecated message
// TODO: handle deprecation properly | 1 | 'use strict';
const Aspect = require('./operation').Aspect;
const CommandOperation = require('./command');
const defineAspects = require('./operation').defineAspects;
const crypto = require('crypto');
const handleCallback = require('../utils').handleCallback;
const toError = require('../utils').toError;
class AddUserOperation extends CommandOperation {
constructor(db, username, password, options) {
super(db, options);
this.username = username;
this.password = password;
}
_buildCommand() {
const db = this.db;
const username = this.username;
const password = this.password;
const options = this.options;
// Get additional values
let roles = Array.isArray(options.roles) ? options.roles : [];
// If not roles defined print deprecated message
// TODO: handle deprecation properly
if (roles.length === 0) {
console.log('Creating a user without roles is deprecated in MongoDB >= 2.6');
}
// Check the db name and add roles if needed
if (
(db.databaseName.toLowerCase() === 'admin' || options.dbName === 'admin') &&
!Array.isArray(options.roles)
) {
roles = ['root'];
} else if (!Array.isArray(options.roles)) {
roles = ['dbOwner'];
}
const digestPassword = db.s.topology.lastIsMaster().maxWireVersion >= 7;
let userPassword = password;
if (!digestPassword) {
// Use node md5 generator
const md5 = crypto.createHash('md5');
// Generate keys used for authentication
md5.update(username + ':mongo:' + password);
userPassword = md5.digest('hex');
}
// Build the command to execute
const command = {
createUser: username,
customData: options.customData || {},
roles: roles,
digestPassword
};
// No password
if (typeof password === 'string') {
command.pwd = userPassword;
}
return command;
}
execute(callback) {
const options = this.options;
// Error out if digestPassword set
if (options.digestPassword != null) {
return callback(
toError(
"The digestPassword option is not supported via add_user. Please use db.command('createUser', ...) instead for this option."
)
);
}
// Attempt to execute auth command
super.execute((err, r) => {
if (!err) {
return handleCallback(callback, err, r);
}
return handleCallback(callback, err, null);
});
}
}
defineAspects(AddUserOperation, Aspect.WRITE_OPERATION);
module.exports = AddUserOperation;
| 1 | 18,621 | This was a bug I picked up by using the TS interface as a guide, this seems like it was / is the intention, also is a bug in master (needs port) | mongodb-node-mongodb-native | js |
@@ -80,6 +80,17 @@ public interface CollectionAdminParams {
*/
String COLL_CONF = "collection.configName";
+ /**
+ * The name of the collection with which a collection is to be co-located
+ */
+ String WITH_COLLECTION = "withCollection";
+
+ /**
+ * The reverse-link to WITH_COLLECTION flag. It is stored in the cluster state of the `withCollection`
+ * and points to the collection on which the `withCollection` was specified.
+ */
+ String COLOCATED_WITH = "COLOCATED_WITH";
+
/**
* Used by cluster properties API as a wrapper key to provide defaults for collection, cluster etc.
* | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.params;
import java.util.Arrays;
import java.util.Collection;
public interface CollectionAdminParams {
/* Param used by DELETESTATUS call to clear all stored responses */
String FLUSH = "flush";
String COLLECTION = "collection";
String COUNT_PROP = "count";
String ROLE = "role";
/** Predefined system collection name. */
String SYSTEM_COLL = ".system";
/**
* A parameter to specify list of Solr nodes to be used (e.g. for collection creation or restore operation).
*/
String CREATE_NODE_SET_PARAM = "createNodeSet";
/**
* A parameter which specifies if the provided list of Solr nodes (via {@linkplain #CREATE_NODE_SET_PARAM})
* should be shuffled before being used.
*/
String CREATE_NODE_SET_SHUFFLE_PARAM = "createNodeSet.shuffle";
/**
* A parameter to specify the name of the index backup strategy to be used.
*/
String INDEX_BACKUP_STRATEGY = "indexBackup";
/**
* This constant defines the index backup strategy based on copying index files to desired location.
*/
String COPY_FILES_STRATEGY = "copy-files";
/**
* This constant defines the strategy to not copy index files (useful for meta-data only backup).
*/
String NO_INDEX_BACKUP_STRATEGY = "none";
/**
* This constant defines a list of valid index backup strategies.
*/
Collection<String> INDEX_BACKUP_STRATEGIES =
Arrays.asList(COPY_FILES_STRATEGY, NO_INDEX_BACKUP_STRATEGY);
/**
* Name of collection property to set
*/
String PROPERTY_NAME = "propertyName";
/**
* Value of collection property to set
*/
String PROPERTY_VALUE = "propertyValue";
/**
* The name of the config set to be used for a collection
*/
String COLL_CONF = "collection.configName";
/**
* Used by cluster properties API as a wrapper key to provide defaults for collection, cluster etc.
*
* e.g. {defaults:{collection:{replicationFactor:2}}}
*/
String DEFAULTS = "defaults";
/**
* Cluster wide defaults can be nested under this key e.g.
* {defaults: {cluster:{useLegacyReplicaAssignment:false}}}
*/
String CLUSTER = "cluster";
/**
* When creating a collection create also a specified alias.
*/
String ALIAS = "alias";
/**
* Specifies the target of RENAME operation.
*/
String TARGET = "target";
/**
* Prefix for {@link org.apache.solr.common.cloud.DocRouter} properties
*/
String ROUTER_PREFIX = "router.";
/** Option to follow aliases when deciding the target of a collection admin command. */
String FOLLOW_ALIASES = "followAliases";
/**
* When AddReplica is called with this set to true, then we do not try to find node assignments
* for the add replica API. If set to true, a valid "node" should be specified.
*/
String SKIP_NODE_ASSIGNMENT = "skipNodeAssignment";
}
| 1 | 39,186 | It would be helpful to explicit here what this really means and assumes. | apache-lucene-solr | java |
@@ -46,6 +46,9 @@ type AddressKey struct {
PriKey keypair.PrivateKey
}
+//ExpectedBalances records expectd balances of admins and delegates
+var ExpectedBalances map[string]*big.Int
+
// LoadAddresses loads key pairs from key pair path and construct addresses
func LoadAddresses(keypairsPath string, chainID uint32) ([]*AddressKey, error) {
// Load Senders' public/private key pairs | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package util
import (
"context"
"encoding/hex"
"io/ioutil"
"math/big"
"math/rand"
"sync"
"time"
"github.com/cenkalti/backoff"
"github.com/pkg/errors"
"go.uber.org/zap"
"gopkg.in/yaml.v2"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
)
// KeyPairs indicate the keypair of accounts getting transfers from Creator in genesis block
type KeyPairs struct {
Pairs []KeyPair `yaml:"pkPairs"`
}
// KeyPair contains the public and private key of an address
type KeyPair struct {
PK string `yaml:"pubKey"`
SK string `yaml:"priKey"`
}
// AddressKey contains the encoded address and private key of an account
type AddressKey struct {
EncodedAddr string
PriKey keypair.PrivateKey
}
// LoadAddresses loads key pairs from key pair path and construct addresses
func LoadAddresses(keypairsPath string, chainID uint32) ([]*AddressKey, error) {
// Load Senders' public/private key pairs
keyPairBytes, err := ioutil.ReadFile(keypairsPath)
if err != nil {
return nil, errors.Wrap(err, "failed to read key pairs file")
}
var keypairs KeyPairs
if err := yaml.Unmarshal(keyPairBytes, &keypairs); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal key pairs bytes")
}
// Construct iotex addresses from loaded key pairs
addrKeys := make([]*AddressKey, 0)
for _, pair := range keypairs.Pairs {
pk, err := keypair.HexStringToPublicKey(pair.PK)
if err != nil {
return nil, errors.Wrap(err, "failed to decode public key")
}
sk, err := keypair.HexStringToPrivateKey(pair.SK)
if err != nil {
return nil, errors.Wrap(err, "failed to decode private key")
}
addr, err := address.FromBytes(pk.Hash())
if err != nil {
return nil, err
}
addrKeys = append(addrKeys, &AddressKey{EncodedAddr: addr.String(), PriKey: sk})
}
return addrKeys, nil
}
// InitCounter initializes the map of nonce counter of each address
func InitCounter(client iotexapi.APIServiceClient, addrKeys []*AddressKey) (map[string]uint64, error) {
counter := make(map[string]uint64)
for _, addrKey := range addrKeys {
addr := addrKey.EncodedAddr
err := backoff.Retry(func() error {
acctDetails, err := client.GetAccount(context.Background(), &iotexapi.GetAccountRequest{Address: addr})
if err != nil {
return err
}
counter[addr] = acctDetails.GetAccountMeta().PendingNonce
return nil
}, backoff.NewExponentialBackOff())
if err != nil {
return nil, errors.Wrapf(err, "failed to get address details of %s", addrKey.EncodedAddr)
}
}
return counter, nil
}
// InjectByAps injects Actions in APS Mode
func InjectByAps(
wg *sync.WaitGroup,
aps float64,
counter map[string]uint64,
transferGasLimit int,
transferGasPrice int,
transferPayload string,
voteGasLimit int,
voteGasPrice int,
contract string,
executionAmount int,
executionGasLimit int,
executionGasPrice int,
executionData string,
client iotexapi.APIServiceClient,
admins []*AddressKey,
delegates []*AddressKey,
duration time.Duration,
retryNum int,
retryInterval int,
resetInterval int,
) {
timeout := time.After(duration)
tick := time.Tick(time.Duration(1/float64(aps)*1000000) * time.Microsecond)
reset := time.Tick(time.Duration(resetInterval) * time.Second)
rand.Seed(time.Now().UnixNano())
loop:
for {
select {
case <-timeout:
break loop
case <-reset:
for _, admin := range admins {
addr := admin.EncodedAddr
err := backoff.Retry(func() error {
acctDetails, err := client.GetAccount(context.Background(), &iotexapi.GetAccountRequest{Address: addr})
if err != nil {
return err
}
counter[addr] = acctDetails.GetAccountMeta().PendingNonce
return nil
}, backoff.NewExponentialBackOff())
if err != nil {
log.L().Fatal("Failed to inject actions by APS",
zap.Error(err),
zap.String("addr", admin.EncodedAddr))
}
}
for _, delegate := range delegates {
addr := delegate.EncodedAddr
err := backoff.Retry(func() error {
acctDetails, err := client.GetAccount(context.Background(), &iotexapi.GetAccountRequest{Address: addr})
if err != nil {
return err
}
counter[addr] = acctDetails.GetAccountMeta().PendingNonce
return nil
}, backoff.NewExponentialBackOff())
if err != nil {
log.L().Fatal("Failed to inject actions by APS",
zap.Error(err),
zap.String("addr", delegate.EncodedAddr))
}
}
case <-tick:
wg.Add(1)
switch rand := rand.Intn(3); rand {
case 0:
sender, recipient, nonce := createTransferInjection(counter, delegates)
go injectTransfer(wg, client, sender, recipient, nonce, uint64(transferGasLimit),
big.NewInt(int64(transferGasPrice)), transferPayload, retryNum, retryInterval)
case 1:
sender, recipient, nonce := createVoteInjection(counter, admins, delegates)
go injectVote(wg, client, sender, recipient, nonce, uint64(voteGasLimit),
big.NewInt(int64(voteGasPrice)), retryNum, retryInterval)
case 2:
executor, nonce := createExecutionInjection(counter, delegates)
go injectExecInteraction(wg, client, executor, contract, nonce, big.NewInt(int64(executionAmount)),
uint64(executionGasLimit), big.NewInt(int64(executionGasPrice)),
executionData, retryNum, retryInterval)
}
}
}
}
// InjectByInterval injects Actions in Interval Mode
func InjectByInterval(
transferNum int,
transferGasLimit int,
transferGasPrice int,
transferPayload string,
voteNum int,
voteGasLimit int,
voteGasPrice int,
executionNum int,
contract string,
executionAmount int,
executionGasLimit int,
executionGasPrice int,
executionData string,
interval int,
counter map[string]uint64,
client iotexapi.APIServiceClient,
admins []*AddressKey,
delegates []*AddressKey,
retryNum int,
retryInterval int,
) {
rand.Seed(time.Now().UnixNano())
for transferNum > 0 && voteNum > 0 && executionNum > 0 {
sender, recipient, nonce := createTransferInjection(counter, delegates)
injectTransfer(nil, client, sender, recipient, nonce, uint64(transferGasLimit),
big.NewInt(int64(transferGasPrice)), transferPayload, retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
sender, recipient, nonce = createVoteInjection(counter, admins, delegates)
injectVote(nil, client, sender, recipient, nonce, uint64(voteGasLimit),
big.NewInt(int64(voteGasPrice)), retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
executor, nonce := createExecutionInjection(counter, delegates)
injectExecInteraction(nil, client, executor, contract, nonce, big.NewInt(int64(executionAmount)),
uint64(executionGasLimit), big.NewInt(int64(executionGasPrice)), executionData, retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
transferNum--
voteNum--
executionNum--
}
switch {
case transferNum > 0 && voteNum > 0:
for transferNum > 0 && voteNum > 0 {
sender, recipient, nonce := createTransferInjection(counter, delegates)
injectTransfer(nil, client, sender, recipient, nonce, uint64(transferGasLimit),
big.NewInt(int64(transferGasPrice)), transferPayload, retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
sender, recipient, nonce = createVoteInjection(counter, admins, delegates)
injectVote(nil, client, sender, recipient, nonce, uint64(voteGasLimit),
big.NewInt(int64(voteGasPrice)), retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
transferNum--
voteNum--
}
case transferNum > 0 && executionNum > 0:
for transferNum > 0 && executionNum > 0 {
sender, recipient, nonce := createTransferInjection(counter, delegates)
injectTransfer(nil, client, sender, recipient, nonce, uint64(transferGasLimit),
big.NewInt(int64(transferGasPrice)), transferPayload, retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
executor, nonce := createExecutionInjection(counter, delegates)
injectExecInteraction(nil, client, executor, contract, nonce, big.NewInt(int64(executionAmount)),
uint64(executionGasLimit), big.NewInt(int64(executionGasPrice)), executionData, retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
transferNum--
executionNum--
}
case voteNum > 0 && executionNum > 0:
for voteNum > 0 && executionNum > 0 {
sender, recipient, nonce := createVoteInjection(counter, admins, delegates)
injectVote(nil, client, sender, recipient, nonce, uint64(voteGasLimit),
big.NewInt(int64(voteGasPrice)), retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
executor, nonce := createExecutionInjection(counter, delegates)
injectExecInteraction(nil, client, executor, contract, nonce, big.NewInt(int64(executionAmount)),
uint64(executionGasLimit), big.NewInt(int64(executionGasPrice)), executionData, retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
voteNum--
executionNum--
}
}
switch {
case transferNum > 0:
for transferNum > 0 {
sender, recipient, nonce := createTransferInjection(counter, delegates)
injectTransfer(nil, client, sender, recipient, nonce, uint64(transferGasLimit),
big.NewInt(int64(transferGasPrice)), transferPayload, retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
transferNum--
}
case voteNum > 0:
for voteNum > 0 {
sender, recipient, nonce := createVoteInjection(counter, admins, delegates)
injectVote(nil, client, sender, recipient, nonce, uint64(voteGasLimit),
big.NewInt(int64(voteGasPrice)), retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
voteNum--
}
case executionNum > 0:
for executionNum > 0 {
executor, nonce := createExecutionInjection(counter, delegates)
injectExecInteraction(nil, client, executor, contract, nonce, big.NewInt(int64(executionAmount)),
uint64(executionGasLimit), big.NewInt(int64(executionGasPrice)), executionData, retryNum, retryInterval)
time.Sleep(time.Second * time.Duration(interval))
executionNum--
}
}
}
// DeployContract deploys a smart contract before starting action injections
func DeployContract(
client iotexapi.APIServiceClient,
counter map[string]uint64,
delegates []*AddressKey,
executionGasLimit int,
executionGasPrice int,
executionData string,
retryNum int,
retryInterval int,
) (hash.Hash256, error) {
executor, nonce := createExecutionInjection(counter, delegates)
selp, execution, err := createSignedExecution(executor, action.EmptyAddress, nonce, big.NewInt(0),
uint64(executionGasLimit), big.NewInt(int64(executionGasPrice)), executionData)
if err != nil {
return hash.ZeroHash256, errors.Wrap(err, "failed to create signed execution")
}
log.L().Info("Created signed execution")
injectExecution(selp, execution, client, retryNum, retryInterval)
return selp.Hash(), nil
}
func injectTransfer(
wg *sync.WaitGroup,
c iotexapi.APIServiceClient,
sender *AddressKey,
recipient *AddressKey,
nonce uint64,
gasLimit uint64,
gasPrice *big.Int,
payload string,
retryNum int,
retryInterval int,
) {
amount := int64(0)
for amount == int64(0) {
amount = int64(rand.Intn(5))
}
selp, _, err := createSignedTransfer(sender, recipient, unit.ConvertIotxToRau(amount), nonce, gasLimit,
gasPrice, payload)
if err != nil {
log.L().Fatal("Failed to inject transfer", zap.Error(err))
}
log.L().Info("Created signed transfer")
bo := backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Duration(retryInterval)*time.Second), uint64(retryNum))
if err := backoff.Retry(func() error {
_, err := c.SendAction(context.Background(), &iotexapi.SendActionRequest{Action: selp.Proto()})
return err
}, bo); err != nil {
log.L().Error("Failed to inject transfer", zap.Error(err))
}
if wg != nil {
wg.Done()
}
}
func injectVote(
wg *sync.WaitGroup,
c iotexapi.APIServiceClient,
sender *AddressKey,
recipient *AddressKey,
nonce uint64,
gasLimit uint64,
gasPrice *big.Int,
retryNum int,
retryInterval int,
) {
selp, _, err := createSignedVote(sender, recipient, nonce, gasLimit, gasPrice)
if err != nil {
log.L().Fatal("Failed to inject vote", zap.Error(err))
}
log.L().Info("Created signed vote")
bo := backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Duration(retryInterval)*time.Second), uint64(retryNum))
if err := backoff.Retry(func() error {
_, err := c.SendAction(context.Background(), &iotexapi.SendActionRequest{Action: selp.Proto()})
return err
}, bo); err != nil {
log.L().Error("Failed to inject vote", zap.Error(err))
}
if wg != nil {
wg.Done()
}
}
func injectExecInteraction(
wg *sync.WaitGroup,
c iotexapi.APIServiceClient,
executor *AddressKey,
contract string,
nonce uint64,
amount *big.Int,
gasLimit uint64,
gasPrice *big.Int,
data string,
retryNum int,
retryInterval int,
) {
selp, execution, err := createSignedExecution(executor, contract, nonce, amount, gasLimit, gasPrice, data)
if err != nil {
log.L().Fatal("Failed to inject execution", zap.Error(err))
}
log.L().Info("Created signed execution")
injectExecution(selp, execution, c, retryNum, retryInterval)
if wg != nil {
wg.Done()
}
}
// Helper function to get the sender, recipient, and nonce of next injected transfer
func createTransferInjection(
counter map[string]uint64,
addrs []*AddressKey,
) (*AddressKey, *AddressKey, uint64) {
sender := addrs[rand.Intn(len(addrs))]
recipient := addrs[rand.Intn(len(addrs))]
nonce := counter[sender.EncodedAddr]
counter[sender.EncodedAddr]++
return sender, recipient, nonce
}
// Helper function to get the sender, recipient, and nonce of next injected vote
func createVoteInjection(
counter map[string]uint64,
admins []*AddressKey,
delegates []*AddressKey,
) (*AddressKey, *AddressKey, uint64) {
sender := admins[rand.Intn(len(admins))]
recipient := delegates[rand.Intn(len(delegates))]
nonce := counter[sender.EncodedAddr]
counter[sender.EncodedAddr]++
return sender, recipient, nonce
}
// Helper function to get the executor and nonce of next injected execution
func createExecutionInjection(
counter map[string]uint64,
addrs []*AddressKey,
) (*AddressKey, uint64) {
executor := addrs[rand.Intn(len(addrs))]
nonce := counter[executor.EncodedAddr]
counter[executor.EncodedAddr]++
return executor, nonce
}
// Helper function to create and sign a transfer
func createSignedTransfer(
sender *AddressKey,
recipient *AddressKey,
amount *big.Int,
nonce uint64,
gasLimit uint64,
gasPrice *big.Int,
payload string,
) (action.SealedEnvelope, *action.Transfer, error) {
transferPayload, err := hex.DecodeString(payload)
if err != nil {
return action.SealedEnvelope{}, nil, errors.Wrapf(err, "failed to decode payload %s", payload)
}
transfer, err := action.NewTransfer(
nonce, amount, recipient.EncodedAddr, transferPayload, gasLimit, gasPrice)
if err != nil {
return action.SealedEnvelope{}, nil, errors.Wrap(err, "failed to create raw transfer")
}
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(nonce).
SetGasPrice(gasPrice).
SetGasLimit(gasLimit).
SetAction(transfer).Build()
selp, err := action.Sign(elp, sender.PriKey)
if err != nil {
return action.SealedEnvelope{}, nil, errors.Wrapf(err, "failed to sign transfer %v", elp)
}
return selp, transfer, nil
}
// Helper function to create and sign a vote
func createSignedVote(
voter *AddressKey,
votee *AddressKey,
nonce uint64,
gasLimit uint64,
gasPrice *big.Int,
) (action.SealedEnvelope, *action.Vote, error) {
vote, err := action.NewVote(nonce, votee.EncodedAddr, gasLimit, gasPrice)
if err != nil {
return action.SealedEnvelope{}, nil, errors.Wrap(err, "failed to create raw vote")
}
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(nonce).
SetGasPrice(gasPrice).
SetGasLimit(gasLimit).
SetAction(vote).Build()
selp, err := action.Sign(elp, voter.PriKey)
if err != nil {
return action.SealedEnvelope{}, nil, errors.Wrapf(err, "failed to sign vote %v", elp)
}
return selp, vote, nil
}
// Helper function to create and sign an execution
func createSignedExecution(
executor *AddressKey,
contract string,
nonce uint64,
amount *big.Int,
gasLimit uint64,
gasPrice *big.Int,
data string,
) (action.SealedEnvelope, *action.Execution, error) {
executionData, err := hex.DecodeString(data)
if err != nil {
return action.SealedEnvelope{}, nil, errors.Wrapf(err, "failed to decode data %s", data)
}
execution, err := action.NewExecution(contract, nonce, amount, gasLimit, gasPrice, executionData)
if err != nil {
return action.SealedEnvelope{}, nil, errors.Wrap(err, "failed to create raw execution")
}
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(nonce).
SetGasPrice(gasPrice).
SetGasLimit(gasLimit).
SetAction(execution).Build()
selp, err := action.Sign(elp, executor.PriKey)
if err != nil {
return action.SealedEnvelope{}, nil, errors.Wrapf(err, "failed to sign execution %v", elp)
}
return selp, execution, nil
}
func injectExecution(
selp action.SealedEnvelope,
execution *action.Execution,
c iotexapi.APIServiceClient,
retryNum int,
retryInterval int,
) {
bo := backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Duration(retryInterval)*time.Second), uint64(retryNum))
if err := backoff.Retry(func() error {
_, err := c.SendAction(context.Background(), &iotexapi.SendActionRequest{Action: selp.Proto()})
return err
}, bo); err != nil {
log.L().Error("Failed to inject execution", zap.Error(err))
}
}
| 1 | 16,222 | `ExpectedBalances` is a global variable (from `gochecknoglobals`) | iotexproject-iotex-core | go |
@@ -1,7 +1,7 @@
module RSpec
module Core
module Formatters
- # This class extracts code snippets by looking at the backtrace of the passed error
+ # Extracts code snippets by looking at the backtrace of the passed error and applies synax highlighting and line numbers using html.
class SnippetExtractor
class NullConverter; def convert(code, pre); code; end; end
| 1 | module RSpec
module Core
module Formatters
# This class extracts code snippets by looking at the backtrace of the passed error
class SnippetExtractor
class NullConverter; def convert(code, pre); code; end; end
begin
require 'syntax/convertors/html'
@@converter = Syntax::Convertors::HTML.for_syntax "ruby"
rescue LoadError
@@converter = NullConverter.new
end
def snippet(backtrace)
raw_code, line = snippet_for(backtrace[0])
highlighted = @@converter.convert(raw_code, false)
highlighted << "\n<span class=\"comment\"># gem install syntax to get syntax highlighting</span>" if @@converter.is_a?(NullConverter)
post_process(highlighted, line)
end
def snippet_for(error_line)
if error_line =~ /(.*):(\d+)/
file = $1
line = $2.to_i
[lines_around(file, line), line]
else
["# Couldn't get snippet for #{error_line}", 1]
end
end
def lines_around(file, line)
if File.file?(file)
lines = File.read(file).split("\n")
min = [0, line-3].max
max = [line+1, lines.length-1].min
selected_lines = []
selected_lines.join("\n")
lines[min..max].join("\n")
else
"# Couldn't get snippet for #{file}"
end
rescue SecurityError
"# Couldn't get snippet for #{file}"
end
def post_process(highlighted, offending_line)
new_lines = []
highlighted.split("\n").each_with_index do |line, i|
new_line = "<span class=\"linenum\">#{offending_line+i-2}</span>#{line}"
new_line = "<span class=\"offending\">#{new_line}</span>" if i == 2
new_lines << new_line
end
new_lines.join("\n")
end
end
end
end
end
| 1 | 8,377 | This is another place where I'd feel more comfortable with declaring the class private. We can always make it public in the future if someone makes a case for that, but I prefer to err on the side of privateness for things like this that 99% of RSpec users won't have a reason to use. | rspec-rspec-core | rb |
@@ -57,11 +57,14 @@ func addAccountToMemResolver(s *Server, pub, jwtclaim string) {
s.mu.Unlock()
}
-func createClient(t *testing.T, s *Server, akp nkeys.KeyPair) (*client, *bufio.Reader, string) {
+func createClient(t *testing.T, s *Server, akp nkeys.KeyPair, optIssuerAccount string) (*client, *bufio.Reader, string) {
t.Helper()
nkp, _ := nkeys.CreateUser()
pub, _ := nkp.PublicKey()
nuc := jwt.NewUserClaims(pub)
+ if optIssuerAccount != "" {
+ nuc.IssuerAccount = optIssuerAccount
+ }
ujwt, err := nuc.Encode(akp)
if err != nil {
t.Fatalf("Error generating user JWT: %v", err) | 1 | // Copyright 2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bufio"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
"github.com/nats-io/jwt"
"github.com/nats-io/nkeys"
)
var (
// This matches ./configs/nkeys_jwts/test.seed
oSeed = []byte("SOAFYNORQLQFJYBYNUGC5D7SH2MXMUX5BFEWWGHN3EK4VGG5TPT5DZP7QU")
aSeed = []byte("SAANRM6JVDEYZTR6DXCWUSDDJHGOHAFITXEQBSEZSY5JENTDVRZ6WNKTTY")
)
func opTrustBasicSetup() *Server {
kp, _ := nkeys.FromSeed(oSeed)
pub, _ := kp.PublicKey()
opts := defaultServerOptions
opts.TrustedKeys = []string{pub}
s, _, _, _ := rawSetup(opts)
return s
}
func buildMemAccResolver(s *Server) {
mr := &MemAccResolver{}
s.mu.Lock()
s.accResolver = mr
s.mu.Unlock()
}
func addAccountToMemResolver(s *Server, pub, jwtclaim string) {
s.mu.Lock()
s.accResolver.Store(pub, jwtclaim)
s.mu.Unlock()
}
func createClient(t *testing.T, s *Server, akp nkeys.KeyPair) (*client, *bufio.Reader, string) {
t.Helper()
nkp, _ := nkeys.CreateUser()
pub, _ := nkp.PublicKey()
nuc := jwt.NewUserClaims(pub)
ujwt, err := nuc.Encode(akp)
if err != nil {
t.Fatalf("Error generating user JWT: %v", err)
}
c, cr, l := newClientForServer(s)
// Sign Nonce
var info nonceInfo
json.Unmarshal([]byte(l[5:]), &info)
sigraw, _ := nkp.Sign([]byte(info.Nonce))
sig := base64.RawURLEncoding.EncodeToString(sigraw)
cs := fmt.Sprintf("CONNECT {\"jwt\":%q,\"sig\":\"%s\"}\r\nPING\r\n", ujwt, sig)
return c, cr, cs
}
// Helper function to generate an async parser and a quit chan. This allows us to
// parse multiple control statements in same go routine since by default these are
// not protected in the server.
func genAsyncParser(c *client) (func(string), chan bool) {
pab := make(chan []byte, 16)
pas := func(cs string) { pab <- []byte(cs) }
quit := make(chan bool)
go func() {
for {
select {
case cs := <-pab:
c.parse(cs)
case <-quit:
return
}
}
}()
return pas, quit
}
func TestJWTUser(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
// Check to make sure we would have an authTimer
if !s.info.AuthRequired {
t.Fatalf("Expect the server to require auth")
}
c, cr, _ := newClientForServer(s)
// Don't send jwt field, should fail.
go c.parse([]byte("CONNECT {\"verbose\":true,\"pedantic\":true}\r\nPING\r\n"))
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
okp, _ := nkeys.FromSeed(oSeed)
// Create an account that will be expired.
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
c, cr, cs := createClient(t, s, akp)
// PING needed to flush the +OK/-ERR to us.
// This should fail too since no account resolver is defined.
go c.parse([]byte(cs))
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
// Ok now let's walk through and make sure all is good.
// We will set the account resolver by hand to a memory resolver.
buildMemAccResolver(s)
addAccountToMemResolver(s, apub, ajwt)
c, cr, cs = createClient(t, s, akp)
go c.parse([]byte(cs))
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
func TestJWTUserBadTrusted(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
// Check to make sure we would have an authTimer
if !s.info.AuthRequired {
t.Fatalf("Expect the server to require auth")
}
// Now place bad trusted key
s.mu.Lock()
s.trustedKeys = []string{"bad"}
s.mu.Unlock()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create an account that will be expired.
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, apub, ajwt)
c, cr, cs := createClient(t, s, akp)
go c.parse([]byte(cs))
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
}
// Test that if a user tries to connect with an expired user JWT we do the right thing.
func TestJWTUserExpired(t *testing.T) {
okp, _ := nkeys.FromSeed(oSeed)
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
// Create a new user that we will make sure has expired.
nkp, _ := nkeys.CreateUser()
pub, _ := nkp.PublicKey()
nuc := jwt.NewUserClaims(pub)
nuc.IssuedAt = time.Now().Add(-10 * time.Second).Unix()
nuc.Expires = time.Now().Add(-2 * time.Second).Unix()
jwt, err := nuc.Encode(akp)
if err != nil {
t.Fatalf("Error generating user JWT: %v", err)
}
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
addAccountToMemResolver(s, apub, ajwt)
c, cr, l := newClientForServer(s)
// Sign Nonce
var info nonceInfo
json.Unmarshal([]byte(l[5:]), &info)
sigraw, _ := nkp.Sign([]byte(info.Nonce))
sig := base64.RawURLEncoding.EncodeToString(sigraw)
// PING needed to flush the +OK/-ERR to us.
// This should fail too since no account resolver is defined.
cs := fmt.Sprintf("CONNECT {\"jwt\":%q,\"sig\":\"%s\",\"verbose\":true,\"pedantic\":true}\r\nPING\r\n", jwt, sig)
go c.parse([]byte(cs))
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
}
func TestJWTUserExpiresAfterConnect(t *testing.T) {
okp, _ := nkeys.FromSeed(oSeed)
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
// Create a new user that we will make sure has expired.
nkp, _ := nkeys.CreateUser()
pub, _ := nkp.PublicKey()
nuc := jwt.NewUserClaims(pub)
nuc.IssuedAt = time.Now().Unix()
nuc.Expires = time.Now().Add(time.Second).Unix()
jwt, err := nuc.Encode(akp)
if err != nil {
t.Fatalf("Error generating user JWT: %v", err)
}
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
addAccountToMemResolver(s, apub, ajwt)
c, cr, l := newClientForServer(s)
// Sign Nonce
var info nonceInfo
json.Unmarshal([]byte(l[5:]), &info)
sigraw, _ := nkp.Sign([]byte(info.Nonce))
sig := base64.RawURLEncoding.EncodeToString(sigraw)
// PING needed to flush the +OK/-ERR to us.
// This should fail too since no account resolver is defined.
cs := fmt.Sprintf("CONNECT {\"jwt\":%q,\"sig\":\"%s\",\"verbose\":true,\"pedantic\":true}\r\nPING\r\n", jwt, sig)
go c.parse([]byte(cs))
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "+OK") {
t.Fatalf("Expected an OK, got: %v", l)
}
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG")
}
// Now we should expire after 1 second or so.
time.Sleep(1250 * time.Millisecond)
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
if !strings.Contains(l, "Expired") {
t.Fatalf("Expected 'Expired' to be in the error")
}
}
func TestJWTUserPermissionClaims(t *testing.T) {
okp, _ := nkeys.FromSeed(oSeed)
nkp, _ := nkeys.CreateUser()
pub, _ := nkp.PublicKey()
nuc := jwt.NewUserClaims(pub)
nuc.Permissions.Pub.Allow.Add("foo")
nuc.Permissions.Pub.Allow.Add("bar")
nuc.Permissions.Pub.Deny.Add("baz")
nuc.Permissions.Sub.Allow.Add("foo")
nuc.Permissions.Sub.Allow.Add("bar")
nuc.Permissions.Sub.Deny.Add("baz")
akp, _ := nkeys.FromSeed(aSeed)
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
jwt, err := nuc.Encode(akp)
if err != nil {
t.Fatalf("Error generating user JWT: %v", err)
}
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
addAccountToMemResolver(s, apub, ajwt)
c, cr, l := newClientForServer(s)
// Sign Nonce
var info nonceInfo
json.Unmarshal([]byte(l[5:]), &info)
sigraw, _ := nkp.Sign([]byte(info.Nonce))
sig := base64.RawURLEncoding.EncodeToString(sigraw)
// PING needed to flush the +OK/-ERR to us.
// This should fail too since no account resolver is defined.
cs := fmt.Sprintf("CONNECT {\"jwt\":%q,\"sig\":\"%s\",\"verbose\":true,\"pedantic\":true}\r\nPING\r\n", jwt, sig)
go c.parse([]byte(cs))
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "+OK") {
t.Fatalf("Expected an OK, got: %v", l)
}
// Now check client to make sure permissions transferred.
c.mu.Lock()
defer c.mu.Unlock()
if c.perms == nil {
t.Fatalf("Expected client permissions to be set")
}
if lpa := c.perms.pub.allow.Count(); lpa != 2 {
t.Fatalf("Expected 2 publish allow subjects, got %d", lpa)
}
if lpd := c.perms.pub.deny.Count(); lpd != 1 {
t.Fatalf("Expected 1 publish deny subjects, got %d", lpd)
}
if lsa := c.perms.sub.allow.Count(); lsa != 2 {
t.Fatalf("Expected 2 subscribe allow subjects, got %d", lsa)
}
if lsd := c.perms.sub.deny.Count(); lsd != 1 {
t.Fatalf("Expected 1 subscribe deny subjects, got %d", lsd)
}
}
func TestJWTAccountExpired(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create an account that will be expired.
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
nac.IssuedAt = time.Now().Add(-10 * time.Second).Unix()
nac.Expires = time.Now().Add(-2 * time.Second).Unix()
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, apub, ajwt)
// Create a new user
c, cr, cs := createClient(t, s, akp)
go c.parse([]byte(cs))
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
}
func TestJWTAccountExpiresAfterConnect(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create an account that will expire.
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
nac.IssuedAt = time.Now().Unix()
nac.Expires = time.Now().Add(time.Second).Unix()
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, apub, ajwt)
// Create a new user
c, cr, cs := createClient(t, s, akp)
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
go c.parse([]byte(cs))
expectPong(cr)
// Now we should expire after 1 second or so.
time.Sleep(1250 * time.Millisecond)
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error, got %q", l)
}
if !strings.Contains(l, "Expired") {
t.Fatalf("Expected 'Expired' to be in the error")
}
// Now make sure that accounts that have expired return an error.
c, cr, cs = createClient(t, s, akp)
go c.parse([]byte(cs))
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
}
func TestJWTAccountRenew(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create an account that has expired.
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
nac.IssuedAt = time.Now().Add(-10 * time.Second).Unix()
nac.Expires = time.Now().Add(-2 * time.Second).Unix()
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, apub, ajwt)
// Create a new user
c, cr, cs := createClient(t, s, akp)
go c.parse([]byte(cs))
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
// Now update with new expiration
nac.IssuedAt = time.Now().Unix()
nac.Expires = time.Now().Add(5 * time.Second).Unix()
ajwt, err = nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
// Update the account
addAccountToMemResolver(s, apub, ajwt)
acc, _ := s.LookupAccount(apub)
if acc == nil {
t.Fatalf("Expected to retrive the account")
}
s.updateAccountClaims(acc, nac)
// Now make sure we can connect.
c, cr, cs = createClient(t, s, akp)
go c.parse([]byte(cs))
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got: %q", l)
}
}
func TestJWTAccountRenewFromResolver(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create an account that has expired.
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
nac.IssuedAt = time.Now().Add(-10 * time.Second).Unix()
nac.Expires = time.Now().Add(time.Second).Unix()
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, apub, ajwt)
// Force it to be loaded by the server and start the expiration timer.
acc, _ := s.LookupAccount(apub)
if acc == nil {
t.Fatalf("Could not retrieve account for %q", apub)
}
// Create a new user
c, cr, cs := createClient(t, s, akp)
// Wait for expiration.
time.Sleep(1250 * time.Millisecond)
go c.parse([]byte(cs))
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
// Now update with new expiration
nac.IssuedAt = time.Now().Unix()
nac.Expires = time.Now().Add(5 * time.Second).Unix()
ajwt, err = nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
// Update the account
addAccountToMemResolver(s, apub, ajwt)
// Make sure the too quick update suppression does not bite us.
acc.updated = time.Now().Add(-1 * time.Hour)
// Do not update the account directly. The resolver should
// happen automatically.
// Now make sure we can connect.
c, cr, cs = createClient(t, s, akp)
go c.parse([]byte(cs))
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got: %q", l)
}
}
func TestJWTAccountBasicImportExport(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
// Now create Exports.
streamExport := &jwt.Export{Subject: "foo", Type: jwt.Stream}
streamExport2 := &jwt.Export{Subject: "private", Type: jwt.Stream, TokenReq: true}
serviceExport := &jwt.Export{Subject: "req.echo", Type: jwt.Service, TokenReq: true}
serviceExport2 := &jwt.Export{Subject: "req.add", Type: jwt.Service, TokenReq: true}
fooAC.Exports.Add(streamExport, streamExport2, serviceExport, serviceExport2)
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
acc, _ := s.LookupAccount(fooPub)
if acc == nil {
t.Fatalf("Expected to retrieve the account")
}
// Check to make sure exports transferred over.
if les := len(acc.exports.streams); les != 2 {
t.Fatalf("Expected exports streams len of 2, got %d", les)
}
if les := len(acc.exports.services); les != 2 {
t.Fatalf("Expected exports services len of 2, got %d", les)
}
_, ok := acc.exports.streams["foo"]
if !ok {
t.Fatalf("Expected to map a stream export")
}
se, ok := acc.exports.services["req.echo"]
if !ok || se == nil {
t.Fatalf("Expected to map a service export")
}
if !se.tokenReq {
t.Fatalf("Expected the service export to require tokens")
}
barKP, _ := nkeys.CreateAccount()
barPub, _ := barKP.PublicKey()
barAC := jwt.NewAccountClaims(barPub)
streamImport := &jwt.Import{Account: fooPub, Subject: "foo", To: "import.foo", Type: jwt.Stream}
serviceImport := &jwt.Import{Account: fooPub, Subject: "req.echo", Type: jwt.Service}
barAC.Imports.Add(streamImport, serviceImport)
barJWT, err := barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
acc, _ = s.LookupAccount(barPub)
if acc == nil {
t.Fatalf("Expected to retrieve the account")
}
if les := len(acc.imports.streams); les != 1 {
t.Fatalf("Expected imports streams len of 1, got %d", les)
}
// Our service import should have failed without a token.
if les := len(acc.imports.services); les != 0 {
t.Fatalf("Expected imports services len of 0, got %d", les)
}
// Now add in a bad activation token.
barAC = jwt.NewAccountClaims(barPub)
serviceImport = &jwt.Import{Account: fooPub, Subject: "req.echo", Token: "not a token", Type: jwt.Service}
barAC.Imports.Add(serviceImport)
barJWT, err = barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
s.updateAccountClaims(acc, barAC)
// Our service import should have failed with a bad token.
if les := len(acc.imports.services); les != 0 {
t.Fatalf("Expected imports services len of 0, got %d", les)
}
// Now make a correct one.
barAC = jwt.NewAccountClaims(barPub)
serviceImport = &jwt.Import{Account: fooPub, Subject: "req.echo", Type: jwt.Service}
activation := jwt.NewActivationClaims(barPub)
activation.ImportSubject = "req.echo"
activation.ImportType = jwt.Service
actJWT, err := activation.Encode(fooKP)
if err != nil {
t.Fatalf("Error generating activation token: %v", err)
}
serviceImport.Token = actJWT
barAC.Imports.Add(serviceImport)
barJWT, err = barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
s.updateAccountClaims(acc, barAC)
// Our service import should have succeeded.
if les := len(acc.imports.services); les != 1 {
t.Fatalf("Expected imports services len of 1, got %d", les)
}
// Now test url
barAC = jwt.NewAccountClaims(barPub)
serviceImport = &jwt.Import{Account: fooPub, Subject: "req.add", Type: jwt.Service}
activation = jwt.NewActivationClaims(barPub)
activation.ImportSubject = "req.add"
activation.ImportType = jwt.Service
actJWT, err = activation.Encode(fooKP)
if err != nil {
t.Fatalf("Error generating activation token: %v", err)
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(actJWT))
}))
defer ts.Close()
serviceImport.Token = ts.URL
barAC.Imports.Add(serviceImport)
barJWT, err = barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
s.updateAccountClaims(acc, barAC)
// Our service import should have succeeded. Should be the only one since we reset.
if les := len(acc.imports.services); les != 1 {
t.Fatalf("Expected imports services len of 1, got %d", les)
}
// Now streams
barAC = jwt.NewAccountClaims(barPub)
streamImport = &jwt.Import{Account: fooPub, Subject: "private", To: "import.private", Type: jwt.Stream}
barAC.Imports.Add(streamImport)
barJWT, err = barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
s.updateAccountClaims(acc, barAC)
// Our stream import should have not succeeded.
if les := len(acc.imports.streams); les != 0 {
t.Fatalf("Expected imports services len of 0, got %d", les)
}
// Now add in activation.
barAC = jwt.NewAccountClaims(barPub)
streamImport = &jwt.Import{Account: fooPub, Subject: "private", To: "import.private", Type: jwt.Stream}
activation = jwt.NewActivationClaims(barPub)
activation.ImportSubject = "private"
activation.ImportType = jwt.Stream
actJWT, err = activation.Encode(fooKP)
if err != nil {
t.Fatalf("Error generating activation token: %v", err)
}
streamImport.Token = actJWT
barAC.Imports.Add(streamImport)
barJWT, err = barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
s.updateAccountClaims(acc, barAC)
// Our stream import should have not succeeded.
if les := len(acc.imports.streams); les != 1 {
t.Fatalf("Expected imports services len of 1, got %d", les)
}
}
func TestJWTAccountImportExportUpdates(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
streamExport := &jwt.Export{Subject: "foo", Type: jwt.Stream}
fooAC.Exports.Add(streamExport)
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
barKP, _ := nkeys.CreateAccount()
barPub, _ := barKP.PublicKey()
barAC := jwt.NewAccountClaims(barPub)
streamImport := &jwt.Import{Account: fooPub, Subject: "foo", To: "import", Type: jwt.Stream}
barAC.Imports.Add(streamImport)
barJWT, err := barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
// Create a client.
c, cr, cs := createClient(t, s, barKP)
parseAsync, quit := genAsyncParser(c)
defer func() { quit <- true }()
parseAsync(cs)
expectPong(cr)
parseAsync("SUB import.foo 1\r\nPING\r\n")
expectPong(cr)
checkShadow := func(expected int) {
t.Helper()
c.mu.Lock()
defer c.mu.Unlock()
sub := c.subs["1"]
if ls := len(sub.shadow); ls != expected {
t.Fatalf("Expected shadows to be %d, got %d", expected, ls)
}
}
// We created a SUB on foo which should create a shadow subscription.
checkShadow(1)
// Now update bar and remove the import which should make the shadow go away.
barAC = jwt.NewAccountClaims(barPub)
barJWT, _ = barAC.Encode(okp)
addAccountToMemResolver(s, barPub, barJWT)
acc, _ := s.LookupAccount(barPub)
s.updateAccountClaims(acc, barAC)
checkShadow(0)
// Now add it back and make sure the shadow comes back.
streamImport = &jwt.Import{Account: string(fooPub), Subject: "foo", To: "import", Type: jwt.Stream}
barAC.Imports.Add(streamImport)
barJWT, _ = barAC.Encode(okp)
addAccountToMemResolver(s, barPub, barJWT)
s.updateAccountClaims(acc, barAC)
checkShadow(1)
// Now change export and make sure it goes away as well. So no exports anymore.
fooAC = jwt.NewAccountClaims(fooPub)
fooJWT, _ = fooAC.Encode(okp)
addAccountToMemResolver(s, fooPub, fooJWT)
acc, _ = s.LookupAccount(fooPub)
s.updateAccountClaims(acc, fooAC)
checkShadow(0)
// Now add it in but with permission required.
streamExport = &jwt.Export{Subject: "foo", Type: jwt.Stream, TokenReq: true}
fooAC.Exports.Add(streamExport)
fooJWT, _ = fooAC.Encode(okp)
addAccountToMemResolver(s, fooPub, fooJWT)
s.updateAccountClaims(acc, fooAC)
checkShadow(0)
// Now put it back as normal.
fooAC = jwt.NewAccountClaims(fooPub)
streamExport = &jwt.Export{Subject: "foo", Type: jwt.Stream}
fooAC.Exports.Add(streamExport)
fooJWT, _ = fooAC.Encode(okp)
addAccountToMemResolver(s, fooPub, fooJWT)
s.updateAccountClaims(acc, fooAC)
checkShadow(1)
}
func TestJWTAccountImportActivationExpires(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
streamExport := &jwt.Export{Subject: "foo", Type: jwt.Stream, TokenReq: true}
fooAC.Exports.Add(streamExport)
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
acc, _ := s.LookupAccount(fooPub)
if acc == nil {
t.Fatalf("Expected to retrieve the account")
}
barKP, _ := nkeys.CreateAccount()
barPub, _ := barKP.PublicKey()
barAC := jwt.NewAccountClaims(barPub)
streamImport := &jwt.Import{Account: fooPub, Subject: "foo", To: "import.", Type: jwt.Stream}
activation := jwt.NewActivationClaims(barPub)
activation.ImportSubject = "foo"
activation.ImportType = jwt.Stream
activation.IssuedAt = time.Now().Add(-10 * time.Second).Unix()
activation.Expires = time.Now().Add(time.Second).Unix()
actJWT, err := activation.Encode(fooKP)
if err != nil {
t.Fatalf("Error generating activation token: %v", err)
}
streamImport.Token = actJWT
barAC.Imports.Add(streamImport)
barJWT, err := barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
// Create a client.
c, cr, cs := createClient(t, s, barKP)
parseAsync, quit := genAsyncParser(c)
defer func() { quit <- true }()
parseAsync(cs)
expectPong(cr)
parseAsync("SUB import.foo 1\r\nPING\r\n")
expectPong(cr)
checkShadow := func(expected int) {
t.Helper()
c.mu.Lock()
defer c.mu.Unlock()
sub := c.subs["1"]
if ls := len(sub.shadow); ls != expected {
t.Fatalf("Expected shadows to be %d, got %d", expected, ls)
}
}
// We created a SUB on foo which should create a shadow subscription.
checkShadow(1)
time.Sleep(1250 * time.Millisecond)
// Should have expired and been removed.
checkShadow(0)
}
func TestJWTAccountLimitsSubs(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
fooAC.Limits.Subs = 10
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
// Create a client.
c, cr, cs := createClient(t, s, fooKP)
parseAsync, quit := genAsyncParser(c)
defer func() { quit <- true }()
parseAsync(cs)
expectPong(cr)
// Check to make sure we have the limit set.
// Account first
fooAcc, _ := s.LookupAccount(fooPub)
fooAcc.mu.RLock()
if fooAcc.msubs != 10 {
fooAcc.mu.RUnlock()
t.Fatalf("Expected account to have msubs of 10, got %d", fooAcc.msubs)
}
fooAcc.mu.RUnlock()
// Now test that the client has limits too.
c.mu.Lock()
if c.msubs != 10 {
c.mu.Unlock()
t.Fatalf("Expected client msubs to be 10, got %d", c.msubs)
}
c.mu.Unlock()
// Now make sure its enforced.
/// These should all work ok.
for i := 0; i < 10; i++ {
parseAsync(fmt.Sprintf("SUB foo %d\r\nPING\r\n", i))
expectPong(cr)
}
// This one should fail.
parseAsync("SUB foo 22\r\n")
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR") {
t.Fatalf("Expected an ERR, got: %v", l)
}
if !strings.Contains(l, "maximum subscriptions exceeded") {
t.Fatalf("Expected an ERR for max subscriptions exceeded, got: %v", l)
}
// Now update the claims and expect if max is lower to be disconnected.
fooAC.Limits.Subs = 5
fooJWT, err = fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
s.updateAccountClaims(fooAcc, fooAC)
l, _ = cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR") {
t.Fatalf("Expected an ERR, got: %v", l)
}
if !strings.Contains(l, "maximum subscriptions exceeded") {
t.Fatalf("Expected an ERR for max subscriptions exceeded, got: %v", l)
}
}
func TestJWTAccountLimitsSubsButServerOverrides(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
// override with server setting of 2.
opts := s.getOpts()
opts.MaxSubs = 2
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
fooAC.Limits.Subs = 10
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
fooAcc, _ := s.LookupAccount(fooPub)
fooAcc.mu.RLock()
if fooAcc.msubs != 10 {
fooAcc.mu.RUnlock()
t.Fatalf("Expected account to have msubs of 10, got %d", fooAcc.msubs)
}
fooAcc.mu.RUnlock()
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
// Create a client.
c, cr, cs := createClient(t, s, fooKP)
parseAsync, quit := genAsyncParser(c)
defer func() { quit <- true }()
parseAsync(cs)
expectPong(cr)
parseAsync("SUB foo 1\r\nSUB bar 2\r\nSUB baz 3\r\nPING\r\n")
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
if !strings.Contains(l, "maximum subscriptions exceeded") {
t.Fatalf("Expected an ERR for max subscriptions exceeded, got: %v", l)
}
// Read last PONG so does not hold up test.
cr.ReadString('\n')
}
func TestJWTAccountLimitsMaxPayload(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
fooAC.Limits.Payload = 8
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
// Create a client.
c, cr, cs := createClient(t, s, fooKP)
parseAsync, quit := genAsyncParser(c)
defer func() { quit <- true }()
parseAsync(cs)
expectPong(cr)
// Check to make sure we have the limit set.
// Account first
fooAcc, _ := s.LookupAccount(fooPub)
fooAcc.mu.RLock()
if fooAcc.mpay != 8 {
fooAcc.mu.RUnlock()
t.Fatalf("Expected account to have mpay of 8, got %d", fooAcc.mpay)
}
fooAcc.mu.RUnlock()
// Now test that the client has limits too.
c.mu.Lock()
if c.mpay != 8 {
c.mu.Unlock()
t.Fatalf("Expected client to have mpay of 10, got %d", c.mpay)
}
c.mu.Unlock()
parseAsync("PUB foo 4\r\nXXXX\r\nPING\r\n")
expectPong(cr)
parseAsync("PUB foo 10\r\nXXXXXXXXXX\r\nPING\r\n")
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
if !strings.Contains(l, "Maximum Payload") {
t.Fatalf("Expected an ERR for max payload violation, got: %v", l)
}
}
func TestJWTAccountLimitsMaxPayloadButServerOverrides(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
// override with server setting of 4.
opts := s.getOpts()
opts.MaxPayload = 4
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
fooAC.Limits.Payload = 8
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
// Create a client.
c, cr, cs := createClient(t, s, fooKP)
parseAsync, quit := genAsyncParser(c)
defer func() { quit <- true }()
parseAsync(cs)
expectPong(cr)
parseAsync("PUB foo 6\r\nXXXXXX\r\nPING\r\n")
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "-ERR ") {
t.Fatalf("Expected an error")
}
if !strings.Contains(l, "Maximum Payload") {
t.Fatalf("Expected an ERR for max payload violation, got: %v", l)
}
}
// NOTE: For now this is single server, will change to adapt for network wide.
// TODO(dlc) - Make cluster/gateway aware.
func TestJWTAccountLimitsMaxConns(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
fooAC.Limits.Conn = 8
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
newClient := func(expPre string) {
t.Helper()
// Create a client.
c, cr, cs := createClient(t, s, fooKP)
go c.parse([]byte(cs))
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, expPre) {
t.Fatalf("Expected a response starting with %q", expPre)
}
}
for i := 0; i < 8; i++ {
newClient("PONG")
}
// Now this one should fail.
newClient("-ERR ")
}
// This will test that we can switch from a public export to a private
// one and back with export claims to make sure the claim update mechanism
// is working properly.
func TestJWTAccountServiceImportAuthSwitch(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
serviceExport := &jwt.Export{Subject: "ngs.usage.*", Type: jwt.Service}
fooAC.Exports.Add(serviceExport)
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
barKP, _ := nkeys.CreateAccount()
barPub, _ := barKP.PublicKey()
barAC := jwt.NewAccountClaims(barPub)
serviceImport := &jwt.Import{Account: fooPub, Subject: "ngs.usage", To: "ngs.usage.DEREK", Type: jwt.Service}
barAC.Imports.Add(serviceImport)
barJWT, err := barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
expectMsg := func(cr *bufio.Reader, sub, pay string) {
t.Helper()
l, _ := cr.ReadString('\n')
expected := "MSG " + sub
if !strings.HasPrefix(l, expected) {
t.Fatalf("Expected %q, got %q", expected, l)
}
l, _ = cr.ReadString('\n')
if l != pay+"\r\n" {
t.Fatalf("Expected %q, got %q", pay, l)
}
expectPong(cr)
}
// Create a client that will send the request
ca, cra, csa := createClient(t, s, barKP)
parseAsyncA, quitA := genAsyncParser(ca)
defer func() { quitA <- true }()
parseAsyncA(csa)
expectPong(cra)
// Create the client that will respond to the requests.
cb, crb, csb := createClient(t, s, fooKP)
parseAsyncB, quitB := genAsyncParser(cb)
defer func() { quitB <- true }()
parseAsyncB(csb)
expectPong(crb)
// Create Subscriber.
parseAsyncB("SUB ngs.usage.* 1\r\nPING\r\n")
expectPong(crb)
// Send Request
parseAsyncA("PUB ngs.usage 2\r\nhi\r\nPING\r\n")
expectPong(cra)
// We should receive the request mapped into our account. PING needed to flush.
parseAsyncB("PING\r\n")
expectMsg(crb, "ngs.usage.DEREK", "hi")
// Now update to make the export private.
fooACPrivate := jwt.NewAccountClaims(fooPub)
serviceExport = &jwt.Export{Subject: "ngs.usage.*", Type: jwt.Service, TokenReq: true}
fooACPrivate.Exports.Add(serviceExport)
fooJWTPrivate, err := fooACPrivate.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWTPrivate)
acc, _ := s.LookupAccount(fooPub)
s.updateAccountClaims(acc, fooACPrivate)
// Send Another Request
parseAsyncA("PUB ngs.usage 2\r\nhi\r\nPING\r\n")
expectPong(cra)
// We should not receive the request this time.
parseAsyncB("PING\r\n")
expectPong(crb)
// Now put it back again to public and make sure it works again.
addAccountToMemResolver(s, fooPub, fooJWT)
s.updateAccountClaims(acc, fooAC)
// Send Request
parseAsyncA("PUB ngs.usage 2\r\nhi\r\nPING\r\n")
expectPong(cra)
// We should receive the request mapped into our account. PING needed to flush.
parseAsyncB("PING\r\n")
expectMsg(crb, "ngs.usage.DEREK", "hi")
}
func TestJWTAccountServiceImportExpires(t *testing.T) {
s := opTrustBasicSetup()
defer s.Shutdown()
buildMemAccResolver(s)
okp, _ := nkeys.FromSeed(oSeed)
// Create accounts and imports/exports.
fooKP, _ := nkeys.CreateAccount()
fooPub, _ := fooKP.PublicKey()
fooAC := jwt.NewAccountClaims(fooPub)
serviceExport := &jwt.Export{Subject: "foo", Type: jwt.Service}
fooAC.Exports.Add(serviceExport)
fooJWT, err := fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
barKP, _ := nkeys.CreateAccount()
barPub, _ := barKP.PublicKey()
barAC := jwt.NewAccountClaims(barPub)
serviceImport := &jwt.Import{Account: fooPub, Subject: "foo", Type: jwt.Service}
barAC.Imports.Add(serviceImport)
barJWT, err := barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
expectPong := func(cr *bufio.Reader) {
t.Helper()
l, _ := cr.ReadString('\n')
if !strings.HasPrefix(l, "PONG") {
t.Fatalf("Expected a PONG, got %q", l)
}
}
expectMsg := func(cr *bufio.Reader, sub, pay string) {
t.Helper()
l, _ := cr.ReadString('\n')
expected := "MSG " + sub
if !strings.HasPrefix(l, expected) {
t.Fatalf("Expected %q, got %q", expected, l)
}
l, _ = cr.ReadString('\n')
if l != pay+"\r\n" {
t.Fatalf("Expected %q, got %q", pay, l)
}
expectPong(cr)
}
// Create a client that will send the request
ca, cra, csa := createClient(t, s, barKP)
parseAsyncA, quitA := genAsyncParser(ca)
defer func() { quitA <- true }()
parseAsyncA(csa)
expectPong(cra)
// Create the client that will respond to the requests.
cb, crb, csb := createClient(t, s, fooKP)
parseAsyncB, quitB := genAsyncParser(cb)
defer func() { quitB <- true }()
parseAsyncB(csb)
expectPong(crb)
// Create Subscriber.
parseAsyncB("SUB foo 1\r\nPING\r\n")
expectPong(crb)
// Send Request
parseAsyncA("PUB foo 2\r\nhi\r\nPING\r\n")
expectPong(cra)
// We should receive the request. PING needed to flush.
parseAsyncB("PING\r\n")
expectMsg(crb, "foo", "hi")
// Now update the exported service to require auth.
fooAC = jwt.NewAccountClaims(fooPub)
serviceExport = &jwt.Export{Subject: "foo", Type: jwt.Service, TokenReq: true}
fooAC.Exports.Add(serviceExport)
fooJWT, err = fooAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, fooPub, fooJWT)
acc, _ := s.LookupAccount(fooPub)
s.updateAccountClaims(acc, fooAC)
// Send Another Request
parseAsyncA("PUB foo 2\r\nhi\r\nPING\r\n")
expectPong(cra)
// We should not receive the request this time.
parseAsyncB("PING\r\n")
expectPong(crb)
// Now get an activation token such that it will work, but will expire.
barAC = jwt.NewAccountClaims(barPub)
serviceImport = &jwt.Import{Account: fooPub, Subject: "foo", Type: jwt.Service}
activation := jwt.NewActivationClaims(barPub)
activation.ImportSubject = "foo"
activation.ImportType = jwt.Service
activation.IssuedAt = time.Now().Add(-10 * time.Second).Unix()
activation.Expires = time.Now().Add(time.Second).Unix()
actJWT, err := activation.Encode(fooKP)
if err != nil {
t.Fatalf("Error generating activation token: %v", err)
}
serviceImport.Token = actJWT
barAC.Imports.Add(serviceImport)
barJWT, err = barAC.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
addAccountToMemResolver(s, barPub, barJWT)
acc, _ = s.LookupAccount(barPub)
s.updateAccountClaims(acc, barAC)
// Now it should work again.
// Send Another Request
parseAsyncA("PUB foo 3\r\nhi2\r\nPING\r\n")
expectPong(cra)
// We should receive the request. PING needed to flush.
parseAsyncB("PING\r\n")
expectMsg(crb, "foo", "hi2")
// Now wait for it to expire, then retry.
time.Sleep(1250 * time.Millisecond)
// Send Another Request
parseAsyncA("PUB foo 3\r\nhi3\r\nPING\r\n")
expectPong(cra)
// We should receive the request. PING needed to flush.
parseAsyncB("PING\r\n")
expectPong(crb)
}
func TestAccountURLResolver(t *testing.T) {
kp, _ := nkeys.FromSeed(oSeed)
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
ajwt, err := nac.Encode(kp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(ajwt))
}))
defer ts.Close()
confTemplate := `
listen: -1
resolver: URL("%s/ngs/v1/accounts/jwt/")
`
conf := createConfFile(t, []byte(fmt.Sprintf(confTemplate, ts.URL)))
defer os.Remove(conf)
s, opts := RunServerWithConfig(conf)
pub, _ := kp.PublicKey()
opts.TrustedKeys = []string{pub}
defer s.Shutdown()
acc, _ := s.LookupAccount(apub)
if acc == nil {
t.Fatalf("Expected to receive an account")
}
if acc.Name != apub {
t.Fatalf("Account name did not match claim key")
}
}
func TestAccountURLResolverTimeout(t *testing.T) {
kp, _ := nkeys.FromSeed(oSeed)
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
ajwt, err := nac.Encode(kp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
basePath := "/ngs/v1/accounts/jwt/"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == basePath {
w.Write([]byte("ok"))
return
}
// Purposely be slow on account lookup.
time.Sleep(2*time.Second + 200*time.Millisecond)
w.Write([]byte(ajwt))
}))
defer ts.Close()
confTemplate := `
listen: -1
resolver: URL("%s%s")
`
conf := createConfFile(t, []byte(fmt.Sprintf(confTemplate, ts.URL, basePath)))
defer os.Remove(conf)
s, opts := RunServerWithConfig(conf)
pub, _ := kp.PublicKey()
opts.TrustedKeys = []string{pub}
defer s.Shutdown()
acc, _ := s.LookupAccount(apub)
if acc != nil {
t.Fatalf("Expected to not receive an account due to timeout")
}
}
| 1 | 8,925 | Feels like this should be left as is and add a new createClientWithIssuers or something like that. Avoid all the "" | nats-io-nats-server | go |
@@ -129,6 +129,9 @@ public class SPRequestHandler {
private void handleError(String error) {
SalesforceSDKLogger.e(TAG, "Error received from IDP app: " + error);
+ if (authCallback != null) {
+ authCallback.receivedErrorResponse(error);
+ }
}
private void handleSuccess(String code, String loginUrl) { | 1 | /*
* Copyright (c) 2017-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.auth.idp;
import android.app.Activity;
import android.content.Intent;
import android.net.Uri;
import android.os.AsyncTask;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.HttpAccess;
import com.salesforce.androidsdk.auth.OAuth2;
import com.salesforce.androidsdk.config.BootConfig;
import com.salesforce.androidsdk.security.SalesforceKeyGenerator;
import com.salesforce.androidsdk.ui.LoginActivity;
import com.salesforce.androidsdk.util.SalesforceSDKLogger;
import java.net.URI;
/**
* This class performs requests made by the SP app based on the IDP app's response.
*
* @author bhariharan
*/
public class SPRequestHandler {
public static final int IDP_REQUEST_CODE = 375;
private static final String TAG = "SPRequestHandler";
private String codeVerifier;
private String codeChallenge;
private SPConfig spConfig;
private LoginActivity.SPAuthCallback authCallback;
/**
* Parameterized constructor.
*
* @param loginUrl Login URL.
* @param authCallback Auth callback.
*/
public SPRequestHandler(String loginUrl, LoginActivity.SPAuthCallback authCallback) {
this(loginUrl, null, authCallback);
}
/**
* Parameterized constructor.
*
* @param loginUrl Login URL.
* @param userHint User hint. Must be of the format 'orgId:userId', both being 18-char IDs.
* @param authCallback Auth callback.
*/
public SPRequestHandler(String loginUrl, String userHint, LoginActivity.SPAuthCallback authCallback) {
codeVerifier = SalesforceKeyGenerator.getRandom128ByteKey();
codeChallenge = SalesforceKeyGenerator.getSHA256Hash(codeVerifier);
spConfig = buildSPConfig(loginUrl, userHint);
this.authCallback = authCallback;
}
/**
* Launches the IDP app.
*
* @param context Activity context.
*/
public void launchIDPApp(Activity context) {
final Intent intent = new Intent(Intent.ACTION_VIEW);
intent.addCategory(Intent.CATEGORY_DEFAULT);
intent.setData(Uri.parse(SalesforceSDKManager.getInstance().getIDPAppURIScheme()));
intent.putExtra(IDPCodeGeneratorActivity.SP_CONFIG_BUNDLE_KEY, spConfig.toBundle());
context.startActivityForResult(intent, IDP_REQUEST_CODE);
}
/**
* Handles the response from the IDP app.
*
* @param resultCode Result code.
* @param data Data returned from the IDP app.
*/
public void handleIDPResponse(int resultCode, Intent data) {
if (data == null) {
handleError("No result received from IDP app");
} else if (resultCode == Activity.RESULT_CANCELED) {
final String error = data.getStringExtra(IDPCodeGeneratorActivity.ERROR_KEY);
handleError(error);
} else if (resultCode == Activity.RESULT_OK) {
final String code = data.getStringExtra(IDPCodeGeneratorActivity.CODE_KEY);
final String loginUrl = data.getStringExtra(IDPCodeGeneratorActivity.LOGIN_URL_KEY);
handleSuccess(code, loginUrl);
}
}
/**
* Returns the SP config associated with this app.
*
* @return SPConfig instance.
*/
public SPConfig getSpConfig() {
return spConfig;
}
private SPConfig buildSPConfig(String loginUrl, String userHint) {
final BootConfig bootConfig = BootConfig.getBootConfig(SalesforceSDKManager.getInstance().getAppContext());
return new SPConfig(bootConfig.getRemoteAccessConsumerKey(), bootConfig.getOauthRedirectURI(),
codeChallenge, bootConfig.getOauthScopes(), loginUrl, userHint);
}
private void handleError(String error) {
SalesforceSDKLogger.e(TAG, "Error received from IDP app: " + error);
}
private void handleSuccess(String code, String loginUrl) {
new TokenEndpointTask(code, loginUrl).execute();
}
private class TokenEndpointTask extends AsyncTask<Void, Void, OAuth2.TokenEndpointResponse> {
private String code;
private String loginUrl;
public TokenEndpointTask(String code, String loginUrl) {
this.code = code;
this.loginUrl = loginUrl;
}
@Override
protected OAuth2.TokenEndpointResponse doInBackground(Void... nothings) {
OAuth2.TokenEndpointResponse tokenResponse = null;
try {
tokenResponse = OAuth2.getSPCredentials(HttpAccess.DEFAULT,
URI.create(loginUrl), spConfig.getOauthClientId(), code, codeVerifier,
spConfig.getOauthCallbackUrl());
} catch (Exception e) {
SalesforceSDKLogger.e(TAG, "Exception occurred while making token request", e);
handleError(e.toString());
}
return tokenResponse;
}
@Override
protected void onPostExecute(OAuth2.TokenEndpointResponse tokenResponse) {
if (authCallback != null && tokenResponse != null) {
authCallback.receivedTokenResponse(tokenResponse);
}
}
}
}
| 1 | 16,562 | Minor unrelated bug in the IDP flow where the error wasn't getting displayed as a `Toast`. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -3,6 +3,7 @@ var assert = require('assert');
var Transform = require('stream').Transform;
const MongoError = require('../../lib/core').MongoError;
var MongoNetworkError = require('../../lib/core').MongoNetworkError;
+var mongoErrorContextSymbol = require('../../lib/core').mongoErrorContextSymbol;
var setupDatabase = require('./shared').setupDatabase;
var delay = require('./shared').delay;
var co = require('co'); | 1 | 'use strict';
var assert = require('assert');
var Transform = require('stream').Transform;
const MongoError = require('../../lib/core').MongoError;
var MongoNetworkError = require('../../lib/core').MongoNetworkError;
var setupDatabase = require('./shared').setupDatabase;
var delay = require('./shared').delay;
var co = require('co');
var mock = require('mongodb-mock-server');
const chai = require('chai');
const expect = chai.expect;
const sinon = require('sinon');
chai.use(require('chai-subset'));
// Define the pipeline processing changes
var pipeline = [
{ $addFields: { addedField: 'This is a field added using $addFields' } },
{ $project: { documentKey: false } },
{ $addFields: { comment: 'The documentKey field has been projected out of this document.' } }
];
describe('Change Streams', function() {
before(function() {
return setupDatabase(this.configuration, ['integration_tests']);
});
beforeEach(function() {
const configuration = this.configuration;
const client = configuration.newClient();
return client
.connect()
.then(() => {
const db = client.db('integration_tests');
return db.createCollection('test');
})
.then(
() => client.close(),
() => client.close()
);
});
afterEach(() => mock.cleanup());
it('Should close the listeners after the cursor is closed', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
let closed = false;
const close = _err => {
if (closed) {
return;
}
closed = true;
return client.close(() => done(_err));
};
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
const coll = client.db('integration_tests').collection('listenertest');
const changeStream = coll.watch();
changeStream.on('change', () => {
const internalCursor = changeStream.cursor;
expect(internalCursor.listenerCount('data')).to.equal(1);
changeStream.close(err => {
expect(internalCursor.listenerCount('data')).to.equal(0);
close(err);
});
});
setTimeout(() => coll.insertOne({ x: 1 }));
changeStream.on('error', err => close(err));
});
}
});
it('Should create a Change Stream on a collection and emit `change` events', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
expect(err).to.not.exist;
const collection = client.db('integration_tests').collection('docsDataEvent');
const changeStream = collection.watch(pipeline);
changeStream.cursor.once('response', () => {
// Trigger the first database event
collection.insertOne({ d: 4 }, function(err) {
assert.ifError(err);
// Trigger the second database event
collection.updateOne({ d: 4 }, { $inc: { d: 2 } }, function(err) {
assert.ifError(err);
});
});
});
let count = 0;
const cleanup = _err => {
changeStream.removeAllListeners('change');
changeStream.close(err => client.close(cerr => done(_err || err || cerr)));
};
// Attach first event listener
changeStream.on('change', function(change) {
try {
if (count === 0) {
count += 1;
expect(change).to.containSubset({
operationType: 'insert',
fullDocument: { d: 4 },
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
comment: 'The documentKey field has been projected out of this document.'
});
expect(change).to.not.have.property('documentKey');
return;
}
expect(change).to.containSubset({
operationType: 'update',
updateDescription: {
updatedFields: { d: 6 }
}
});
cleanup();
} catch (e) {
cleanup(e);
}
});
});
}
});
it(
'Should create a Change Stream on a collection and get change events through imperative callback form',
{
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var collection = client.db('integration_tests').collection('docsCallback');
var changeStream = collection.watch(pipeline);
// Fetch the change notification
changeStream.hasNext(function(err, hasNext) {
assert.ifError(err);
assert.equal(true, hasNext);
changeStream.next(function(err, change) {
assert.ifError(err);
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.e, 5);
assert.equal(change.ns.db, 'integration_tests');
assert.equal(change.ns.coll, 'docsCallback');
assert.ok(!change.documentKey);
assert.equal(
change.comment,
'The documentKey field has been projected out of this document.'
);
// Trigger the second database event
collection.update({ e: 5 }, { $inc: { e: 2 } }, function(err) {
assert.ifError(err);
changeStream.hasNext(function(err, hasNext) {
assert.ifError(err);
assert.equal(true, hasNext);
changeStream.next(function(err, change) {
assert.ifError(err);
assert.equal(change.operationType, 'update');
// Close the change stream
changeStream.close(err => client.close(cerr => done(err || cerr)));
});
});
});
});
});
// Trigger the first database event
// NOTE: this needs to be triggered after the changeStream call so
// that the cursor is run
collection.insert({ e: 5 }, function(err, result) {
assert.ifError(err);
assert.equal(result.insertedCount, 1);
});
});
}
}
);
it('Should support creating multiple simultaneous Change Streams', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var theDatabase = client.db('integration_tests');
var theCollection1 = theDatabase.collection('simultaneous1');
var theCollection2 = theDatabase.collection('simultaneous2');
var thisChangeStream1, thisChangeStream2, thisChangeStream3;
setTimeout(() => {
theCollection1.insert({ a: 1 }).then(function() {
return theCollection2.insert({ a: 1 });
});
});
Promise.resolve()
.then(function() {
thisChangeStream1 = theCollection1.watch([{ $addFields: { changeStreamNumber: 1 } }]);
thisChangeStream2 = theCollection2.watch([{ $addFields: { changeStreamNumber: 2 } }]);
thisChangeStream3 = theCollection2.watch([{ $addFields: { changeStreamNumber: 3 } }]);
return Promise.all([
thisChangeStream1.hasNext(),
thisChangeStream2.hasNext(),
thisChangeStream3.hasNext()
]);
})
.then(function(hasNexts) {
// Check all the Change Streams have a next item
assert.ok(hasNexts[0]);
assert.ok(hasNexts[1]);
assert.ok(hasNexts[2]);
return Promise.all([
thisChangeStream1.next(),
thisChangeStream2.next(),
thisChangeStream3.next()
]);
})
.then(function(changes) {
// Check the values of the change documents are correct
assert.equal(changes[0].operationType, 'insert');
assert.equal(changes[1].operationType, 'insert');
assert.equal(changes[2].operationType, 'insert');
assert.equal(changes[0].fullDocument.a, 1);
assert.equal(changes[1].fullDocument.a, 1);
assert.equal(changes[2].fullDocument.a, 1);
assert.equal(changes[0].ns.db, 'integration_tests');
assert.equal(changes[1].ns.db, 'integration_tests');
assert.equal(changes[2].ns.db, 'integration_tests');
assert.equal(changes[0].ns.coll, 'simultaneous1');
assert.equal(changes[1].ns.coll, 'simultaneous2');
assert.equal(changes[2].ns.coll, 'simultaneous2');
assert.equal(changes[0].changeStreamNumber, 1);
assert.equal(changes[1].changeStreamNumber, 2);
assert.equal(changes[2].changeStreamNumber, 3);
return Promise.all([
thisChangeStream1.close(),
thisChangeStream2.close(),
thisChangeStream3.close()
]);
})
.then(() => client.close())
.then(function() {
done();
})
.catch(function(err) {
assert.ifError(err);
});
});
}
});
it('Should properly close Change Stream cursor', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var theDatabase = client.db('integration_tests');
var thisChangeStream = theDatabase.collection('changeStreamCloseTest').watch(pipeline);
assert.equal(thisChangeStream.isClosed(), false);
assert.equal(thisChangeStream.cursor.isClosed(), false);
thisChangeStream.close(function(err) {
assert.ifError(err);
// Check the cursor is closed
assert.equal(thisChangeStream.isClosed(), true);
assert.ok(!thisChangeStream.cursor);
client.close(() => done());
});
});
}
});
it(
'Should error when attempting to create a Change Stream with a forbidden aggregation pipeline stage',
{
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var theDatabase = client.db('integration_tests');
var changeStream = theDatabase
.collection('forbiddenStageTest')
.watch([{ $alksdjfhlaskdfjh: 2 }]);
changeStream.next(function(err) {
assert.ok(err);
assert.ok(err.message);
// assert.ok(err.message.indexOf('SOME ERROR MESSAGE HERE ONCE SERVER-29137 IS DONE') > -1);
changeStream.close(err => client.close(cerr => done(err || cerr)));
});
});
}
}
);
it.skip('Should cache the change stream resume token using imperative callback form', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var theDatabase = client.db('integration_tests');
var thisChangeStream = theDatabase.collection('cacheResumeTokenCallback').watch(pipeline);
// Trigger the first database event
setTimeout(() => {
theDatabase
.collection('cacheResumeTokenCallback')
.insert({ b: 2 }, function(err, result) {
assert.ifError(err);
assert.equal(result.insertedCount, 1);
});
});
// Fetch the change notification
thisChangeStream.hasNext(function(err, hasNext) {
assert.ifError(err);
assert.equal(true, hasNext);
thisChangeStream.next(function(err, change) {
assert.ifError(err);
assert.deepEqual(thisChangeStream.resumeToken, change._id);
// Close the change stream
thisChangeStream.close(err => client.close(cerr => done(err || cerr)));
});
});
});
}
});
it.skip('Should cache the change stream resume token using promises', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function() {
var configuration = this.configuration;
const client = configuration.newClient();
return client.connect().then(function() {
var theDatabase = client.db('integration_tests');
var thisChangeStream = theDatabase.collection('cacheResumeTokenPromise').watch(pipeline);
setTimeout(() => {
// Trigger the first database event
theDatabase.collection('cacheResumeTokenPromise').insert({ b: 2 }, function(err, result) {
assert.ifError(err);
assert.equal(result.insertedCount, 1);
// Fetch the change notification
});
});
return thisChangeStream
.hasNext()
.then(function(hasNext) {
assert.equal(true, hasNext);
return thisChangeStream.next();
})
.then(function(change) {
assert.deepEqual(thisChangeStream.resumeToken, change._id);
// Close the change stream
return thisChangeStream.close().then(() => client.close());
});
});
}
});
it.skip('Should cache the change stream resume token using event listeners', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var theDatabase = client.db('integration_tests');
var thisChangeStream = theDatabase.collection('cacheResumeTokenListener').watch(pipeline);
thisChangeStream.once('change', function(change) {
assert.deepEqual(thisChangeStream.resumeToken, change._id);
// Close the change stream
thisChangeStream.close().then(() => client.close(done));
});
setTimeout(() => {
// Trigger the first database event
theDatabase
.collection('cacheResumeTokenListener')
.insert({ b: 2 }, function(err, result) {
assert.ifError(err);
assert.equal(result.insertedCount, 1);
});
});
});
}
});
it(
'Should error if resume token projected out of change stream document using imperative callback form',
{
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var theDatabase = client.db('integration_tests');
var thisChangeStream = theDatabase
.collection('resumetokenProjectedOutCallback')
.watch([{ $project: { _id: false } }]);
// Trigger the first database event
setTimeout(() => {
theDatabase
.collection('resumetokenProjectedOutCallback')
.insert({ b: 2 }, function(err, result) {
expect(err).to.not.exist;
expect(result.insertedCount).to.equal(1);
});
}, 250);
// Fetch the change notification
thisChangeStream.next(function(err) {
expect(err).to.exist;
// Close the change stream
thisChangeStream.close().then(() => client.close(done));
});
});
}
}
);
it('Should error if resume token projected out of change stream document using event listeners', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var theDatabase = client.db('integration_tests');
var thisChangeStream = theDatabase
.collection('resumetokenProjectedOutListener')
.watch([{ $project: { _id: false } }]);
// Fetch the change notification
thisChangeStream.on('change', function() {
assert.ok(false);
});
thisChangeStream.on('error', function(err) {
expect(err).to.exist;
thisChangeStream.close(() => client.close(done));
});
// Trigger the first database event
setTimeout(() => {
theDatabase
.collection('resumetokenProjectedOutListener')
.insert({ b: 2 }, function(err, result) {
assert.ifError(err);
assert.equal(result.insertedCount, 1);
});
});
});
}
});
it('Should invalidate change stream on collection rename using event listeners', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var database = client.db('integration_tests');
var changeStream = database
.collection('invalidateListeners')
.watch(pipeline, { batchSize: 1 });
// Attach first event listener
changeStream.once('change', function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, 1);
assert.equal(change.ns.db, 'integration_tests');
assert.equal(change.ns.coll, 'invalidateListeners');
assert.ok(!change.documentKey);
assert.equal(
change.comment,
'The documentKey field has been projected out of this document.'
);
// Attach second event listener
changeStream.on('change', function(change) {
if (change.operationType === 'invalidate') {
// now expect the server to close the stream
changeStream.once('close', () => client.close(done));
}
});
// Trigger the second database event
setTimeout(() => {
database
.collection('invalidateListeners')
.rename('renamedDocs', { dropTarget: true }, function(err) {
assert.ifError(err);
});
}, 250);
});
// Trigger the first database event
setTimeout(() => {
database.collection('invalidateListeners').insert({ a: 1 }, function(err) {
assert.ifError(err);
});
});
});
}
});
it('Should invalidate change stream on database drop using imperative callback form', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
var database = client.db('integration_tests');
var changeStream = database.collection('invalidateCallback').watch(pipeline);
// Trigger the first database event
setTimeout(() => {
database.collection('invalidateCallback').insert({ a: 1 }, function(err) {
assert.ifError(err);
});
});
return changeStream.next(function(err, change) {
assert.ifError(err);
assert.equal(change.operationType, 'insert');
database.dropDatabase(function(err) {
assert.ifError(err);
function completeStream() {
changeStream.hasNext(function(err, hasNext) {
assert.equal(hasNext, false);
assert.equal(changeStream.isClosed(), true);
client.close(done);
});
}
function checkInvalidate() {
changeStream.next(function(err, change) {
assert.ifError(err);
// Check the cursor invalidation has occured
if (change.operationType === 'invalidate') {
return completeStream();
}
checkInvalidate();
});
}
checkInvalidate();
});
});
});
}
});
it('Should invalidate change stream on collection drop using promises', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
const client = configuration.newClient();
function checkInvalidate(changeStream) {
return changeStream.next().then(change => {
if (change.operationType === 'invalidate') {
return Promise.resolve();
}
return checkInvalidate(changeStream);
});
}
client.connect(function(err, client) {
assert.ifError(err);
var database = client.db('integration_tests');
var changeStream = database.collection('invalidateCollectionDropPromises').watch(pipeline);
// Trigger the first database event
setTimeout(() => {
return database
.collection('invalidateCollectionDropPromises')
.insert({ a: 1 })
.then(function() {
return delay(200);
});
}, 200);
return changeStream
.next()
.then(function(change) {
assert.equal(change.operationType, 'insert');
return database.dropCollection('invalidateCollectionDropPromises');
})
.then(() => checkInvalidate(changeStream))
.then(() => changeStream.hasNext())
.then(function(hasNext) {
assert.equal(hasNext, false);
assert.equal(changeStream.isClosed(), true);
client.close(done);
})
.catch(function(err) {
assert.ifError(err);
});
});
}
});
it('Should return MongoNetworkError after first retry attempt fails using promises', {
metadata: {
requires: {
generators: true,
topology: 'single',
mongodb: '>=3.5.10'
}
},
test: function(done) {
var configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
// Contain mock server
var primaryServer = null;
// Default message fields
var defaultFields = {
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 4,
minWireVersion: 0,
ok: 1,
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002']
};
co(function*() {
primaryServer = yield mock.createServer(32000, 'localhost');
primaryServer.setMessageHandler(request => {
var doc = request.document;
if (doc.ismaster) {
request.reply(
Object.assign(
{
ismaster: true,
secondary: false,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
},
defaultFields
)
);
} else {
// kill the connection, simulating a network error
request.connection.destroy();
}
});
});
const mockServerURL = 'mongodb://localhost:32000/';
const client = configuration.newClient(mockServerURL);
client.connect(function(err, client) {
assert.ifError(err);
var database = client.db('integration_tests');
var collection = database.collection('MongoNetworkErrorTestPromises');
var changeStream = collection.watch(pipeline);
return changeStream
.next()
.then(function() {
// We should never execute this line because calling thisChangeStream.next() should throw an error
throw new Error(
'ChangeStream.next() returned a change document but it should have returned a MongoNetworkError'
);
})
.catch(function(err) {
assert.ok(
err instanceof MongoNetworkError,
'error was not instance of MongoNetworkError'
);
assert.ok(err.message);
assert.ok(err.message.indexOf('closed') > -1);
changeStream.close(function(err) {
assert.ifError(err);
changeStream.close();
// running = false;
primaryServer.destroy();
client.close(() => mock.cleanup(() => done()));
});
})
.catch(err => done(err));
});
}
});
it('Should return MongoNetworkError after first retry attempt fails using callbacks', {
metadata: {
requires: {
generators: true,
topology: 'single',
mongodb: '>=3.5.10'
}
},
test: function(done) {
var configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
// Contain mock server
var primaryServer = null;
// Default message fields
var defaultFields = {
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 4,
minWireVersion: 0,
ok: 1,
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002']
};
// Die
var die = false;
co(function*() {
primaryServer = yield mock.createServer(32000, 'localhost');
primaryServer.setMessageHandler(request => {
var doc = request.document;
if (die) {
request.connection.destroy();
} else if (doc.ismaster) {
request.reply(
Object.assign(
{
ismaster: true,
secondary: false,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
},
defaultFields
)
);
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
// Do not respond to other requests
});
});
const client = configuration.newClient('mongodb://localhost:32000/', {
socketTimeoutMS: 500,
validateOptions: true
});
client.connect(function(err, client) {
assert.ifError(err);
var theDatabase = client.db('integration_tests');
var theCollection = theDatabase.collection('MongoNetworkErrorTestPromises');
var thisChangeStream = theCollection.watch(pipeline);
thisChangeStream.next(function(err, change) {
assert.ok(err instanceof MongoNetworkError);
assert.ok(err.message);
assert.ok(err.message.indexOf('timed out') > -1);
assert.equal(
change,
null,
'ChangeStream.next() returned a change document but it should have returned a MongoNetworkError'
);
thisChangeStream.close(function(err) {
assert.ifError(err);
thisChangeStream.close();
client.close(() => mock.cleanup(() => done()));
});
});
});
}
});
it('Should resume Change Stream when a resumable error is encountered', {
metadata: {
requires: {
generators: true,
topology: 'single',
mongodb: '>=3.5.10'
}
},
test: function(done) {
var configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
const Timestamp = configuration.require.Timestamp;
const Long = configuration.require.Long;
// Contain mock server
var primaryServer = null;
// Default message fields
var defaultFields = {
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 4,
minWireVersion: 0,
ok: 1,
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002']
};
// Die
var callsToGetMore = 0;
// Boot the mock
co(function*() {
primaryServer = yield mock.createServer(32000, 'localhost');
var counter = 0;
primaryServer.setMessageHandler(request => {
var doc = request.document;
// Create a server that responds to the initial aggregation to connect to the server, but not to subsequent getMore requests
if (doc.ismaster) {
request.reply(
Object.assign(
{
ismaster: true,
secondary: false,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
},
defaultFields
)
);
} else if (doc.getMore) {
callsToGetMore++;
} else if (doc.aggregate) {
var changeDoc = {
_id: {
ts: new Timestamp(4, 1501511802),
ns: 'integration_tests.docsDataEvent',
_id: new ObjectId('597f407a8fd4abb616feca93')
},
operationType: 'insert',
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
fullDocument: {
_id: new ObjectId('597f407a8fd4abb616feca93'),
a: 1,
counter: counter++
}
};
request.reply({
ok: 1,
cursor: {
id: new Long(1407, 1407),
firstBatch: [changeDoc]
}
});
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
});
});
let finalError = undefined;
const client = configuration.newClient('mongodb://localhost:32000/', {
socketTimeoutMS: 500,
validateOptions: true
});
client
.connect()
.then(client => {
var database = client.db('integration_tests');
var collection = database.collection('MongoNetworkErrorTestPromises');
var changeStream = collection.watch(pipeline);
return changeStream
.next()
.then(function(change) {
assert.ok(change);
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.counter, 0);
// Add a tag to the cursor
changeStream.cursor.track = 1;
return changeStream.next();
})
.then(function(change) {
assert.ok(change);
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.counter, 1);
// Check this cursor doesn't have the tag added earlier (therefore it is a new cursor)
assert.notEqual(changeStream.cursor.track, 1);
// Check that only one getMore call was made
assert.equal(callsToGetMore, 1);
return Promise.all([changeStream.close(), primaryServer.destroy]).then(() =>
client.close()
);
});
})
.catch(err => (finalError = err))
.then(() => mock.cleanup())
.catch(err => (finalError = err))
.then(() => done(finalError));
}
});
it('Should resume from point in time using user-provided resumeAfter', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function() {
var configuration = this.configuration;
const client = configuration.newClient();
return client.connect().then(client => {
var database = client.db('integration_tests');
var collection = database.collection('resumeAfterTest2');
var firstChangeStream, secondChangeStream;
var resumeToken;
var docs = [{ a: 0 }, { a: 1 }, { a: 2 }];
// Trigger the first database event
firstChangeStream = collection.watch(pipeline);
setTimeout(() => {
return collection
.insert(docs[0])
.then(function(result) {
assert.equal(result.insertedCount, 1);
return collection.insert(docs[1]);
})
.then(function(result) {
assert.equal(result.insertedCount, 1);
return collection.insert(docs[2]);
})
.then(function(result) {
assert.equal(result.insertedCount, 1);
return delay(200);
});
});
return firstChangeStream
.hasNext()
.then(function(hasNext) {
assert.equal(true, hasNext);
return firstChangeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[0].a);
// Save the resumeToken
resumeToken = change._id;
return firstChangeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[1].a);
return firstChangeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[2].a);
return firstChangeStream.close();
})
.then(function() {
secondChangeStream = collection.watch(pipeline, {
resumeAfter: resumeToken
});
return delay(200);
})
.then(function() {
return secondChangeStream.hasNext();
})
.then(function(hasNext) {
assert.equal(true, hasNext);
return secondChangeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[1].a);
return secondChangeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[2].a);
return secondChangeStream.close();
})
.then(() => client.close());
});
}
});
it('Should support full document lookup', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function() {
var configuration = this.configuration;
const client = configuration.newClient();
return client.connect().then(client => {
var database = client.db('integration_tests');
var collection = database.collection('fullDocumentLookup');
var changeStream = collection.watch(pipeline, {
fullDocument: 'updateLookup'
});
setTimeout(() => {
return collection.insert({ f: 128 }).then(function(result) {
assert.equal(result.insertedCount, 1);
});
});
return changeStream
.hasNext()
.then(function(hasNext) {
assert.equal(true, hasNext);
return changeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.f, 128);
assert.equal(change.ns.db, database.databaseName);
assert.equal(change.ns.coll, collection.collectionName);
assert.ok(!change.documentKey);
assert.equal(
change.comment,
'The documentKey field has been projected out of this document.'
);
return collection.update({ f: 128 }, { $set: { c: 2 } });
})
.then(function() {
return changeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'update');
// Check the correct fullDocument is present
assert.ok(change.fullDocument);
assert.equal(change.fullDocument.f, 128);
assert.equal(change.fullDocument.c, 2);
return changeStream.close().then(() => client.close());
});
});
}
});
it('Should support full document lookup with deleted documents', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function() {
var configuration = this.configuration;
const client = configuration.newClient();
return client.connect().then(client => {
var database = client.db('integration_tests');
var collection = database.collection('fullLookupTest');
var changeStream = collection.watch(pipeline, {
fullDocument: 'updateLookup'
});
// Trigger the first database event
setTimeout(() => {
return collection
.insert({ i: 128 })
.then(function(result) {
assert.equal(result.insertedCount, 1);
return collection.deleteOne({ i: 128 });
})
.then(function(result) {
assert.equal(result.result.n, 1);
});
});
return changeStream
.hasNext()
.then(function(hasNext) {
assert.equal(true, hasNext);
return changeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.i, 128);
assert.equal(change.ns.db, database.databaseName);
assert.equal(change.ns.coll, collection.collectionName);
assert.ok(!change.documentKey);
assert.equal(
change.comment,
'The documentKey field has been projected out of this document.'
);
// Trigger the second database event
return collection.update({ i: 128 }, { $set: { c: 2 } });
})
.then(function() {
return changeStream.hasNext();
})
.then(function(hasNext) {
assert.equal(true, hasNext);
return changeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'delete');
// Check the full lookedUpDocument is present
assert.equal(change.lookedUpDocument, null);
return changeStream.close();
})
.then(() => client.close());
});
}
});
it('Should create Change Streams with correct read preferences', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function() {
var configuration = this.configuration;
var ReadPreference = configuration.require.ReadPreference;
const client = configuration.newClient();
return client.connect().then(client => {
// Should get preference from database
var database = client.db('integration_tests', {
readPreference: ReadPreference.PRIMARY_PREFERRED
});
var changeStream0 = database.collection('docs0').watch(pipeline);
assert.deepEqual(
changeStream0.cursor.readPreference.preference,
ReadPreference.PRIMARY_PREFERRED
);
// Should get preference from collection
var collection = database.collection('docs1', {
readPreference: ReadPreference.SECONDARY_PREFERRED
});
var changeStream1 = collection.watch(pipeline);
assert.deepEqual(
changeStream1.cursor.readPreference.preference,
ReadPreference.SECONDARY_PREFERRED
);
// Should get preference from Change Stream options
var changeStream2 = collection.watch(pipeline, {
readPreference: ReadPreference.NEAREST
});
assert.deepEqual(changeStream2.cursor.readPreference.preference, ReadPreference.NEAREST);
return Promise.all([
changeStream0.close(),
changeStream1.close(),
changeStream2.close()
]).then(() => client.close());
});
}
});
it('Should support piping of Change Streams', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const stream = require('stream');
const client = configuration.newClient();
client.connect(function(err, client) {
assert.ifError(err);
const theDatabase = client.db('integration_tests');
const theCollection = theDatabase.collection('pipeTest');
const thisChangeStream = theCollection.watch(pipeline);
const outStream = new stream.PassThrough({ objectMode: true });
// Make a stream transforming to JSON and piping to the file
thisChangeStream.stream({ transform: JSON.stringify }).pipe(outStream);
function close(_err) {
thisChangeStream.close(err => client.close(cErr => done(_err || err || cErr)));
}
outStream
.on('data', data => {
try {
const parsedEvent = JSON.parse(data);
assert.equal(parsedEvent.fullDocument.a, 1);
close();
} catch (e) {
close(e);
}
})
.on('error', close);
setTimeout(() => {
theCollection.insert({ a: 1 }, function(err) {
assert.ifError(err);
});
});
});
}
});
it.skip('Should resume piping of Change Streams when a resumable error is encountered', {
metadata: {
requires: {
generators: true,
topology: 'single',
mongodb: '>=3.5.10'
}
},
test: function(done) {
var configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
const Timestamp = configuration.require.Timestamp;
const Long = configuration.require.Long;
// Contain mock server
var primaryServer = null;
// Default message fields
var defaultFields = {
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 4,
minWireVersion: 0,
ok: 1,
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002']
};
co(function*() {
primaryServer = yield mock.createServer();
var counter = 0;
primaryServer.setMessageHandler(request => {
var doc = request.document;
// Create a server that responds to the initial aggregation to connect to the server, but not to subsequent getMore requests
if (doc.ismaster) {
request.reply(
Object.assign(
{
ismaster: true,
secondary: false,
me: primaryServer.uri(),
primary: primaryServer.uri(),
tags: { loc: 'ny' }
},
defaultFields
)
);
} else if (doc.getMore) {
var changeDoc = {
cursor: {
id: new Long(1407, 1407),
nextBatch: [
{
_id: {
ts: new Timestamp(4, 1501511802),
ns: 'integration_tests.docsDataEvent',
_id: new ObjectId('597f407a8fd4abb616feca93')
},
operationType: 'insert',
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
fullDocument: {
_id: new ObjectId('597f407a8fd4abb616feca93'),
a: 1,
counter: counter++
}
}
]
},
ok: 1
};
request.reply(changeDoc, {
cursorId: new Long(1407, 1407)
});
} else if (doc.aggregate) {
changeDoc = {
_id: {
ts: new Timestamp(4, 1501511802),
ns: 'integration_tests.docsDataEvent',
_id: new ObjectId('597f407a8fd4abb616feca93')
},
operationType: 'insert',
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
fullDocument: {
_id: new ObjectId('597f407a8fd4abb616feca93'),
a: 1,
counter: counter++
}
};
request.reply({
ok: 1,
cursor: {
id: new Long(1407, 1407),
firstBatch: [changeDoc]
}
});
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
});
const client = configuration.newClient(`mongodb://${primaryServer.uri()}/`, {
socketTimeoutMS: 500,
validateOptions: true
});
client.connect(function(err, client) {
assert.ifError(err);
var fs = require('fs');
var theDatabase = client.db('integration_tests5');
var theCollection = theDatabase.collection('MongoNetworkErrorTestPromises');
var thisChangeStream = theCollection.watch(pipeline);
var filename = '/tmp/_nodemongodbnative_resumepipe.txt';
var outStream = fs.createWriteStream(filename);
thisChangeStream.stream({ transform: JSON.stringify }).pipe(outStream);
// Listen for changes to the file
var watcher = fs.watch(filename, function(eventType) {
assert.equal(eventType, 'change');
var fileContents = fs.readFileSync(filename, 'utf8');
var parsedFileContents = JSON.parse(fileContents);
assert.equal(parsedFileContents.fullDocument.a, 1);
watcher.close();
thisChangeStream.close(function(err) {
assert.ifError(err);
mock.cleanup(() => done());
});
});
});
});
}
});
it('Should support piping of Change Streams through multiple pipes', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var crypto = require('crypto');
const client = configuration.newClient(configuration.url(), {
poolSize: 1,
autoReconnect: false
});
client.connect(function(err, client) {
assert.ifError(err);
var cipher = crypto.createCipher('aes192', 'a password');
var decipher = crypto.createDecipher('aes192', 'a password');
var theDatabase = client.db('integration_tests');
var theCollection = theDatabase.collection('multiPipeTest');
var thisChangeStream = theCollection.watch(pipeline);
// Make a stream transforming to JSON and piping to the file
var basicStream = thisChangeStream.pipe(
new Transform({
transform: (data, encoding, callback) => callback(null, JSON.stringify(data)),
objectMode: true
})
);
var pipedStream = basicStream.pipe(cipher).pipe(decipher);
var dataEmitted = '';
pipedStream.on('data', function(data) {
dataEmitted += data.toString();
// Work around poor compatibility with crypto cipher
thisChangeStream.cursor.emit('end');
});
pipedStream.on('end', function() {
var parsedData = JSON.parse(dataEmitted.toString());
assert.equal(parsedData.operationType, 'insert');
assert.equal(parsedData.fullDocument.a, 1407);
basicStream.emit('close');
thisChangeStream.close(err => client.close(cErr => done(err || cErr)));
});
pipedStream.on('error', function(err) {
done(err);
});
setTimeout(() => {
theCollection.insert({ a: 1407 }, function(err) {
if (err) done(err);
});
});
});
}
});
it('Should resume after a killCursors command is issued for its child cursor', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
const collectionName = 'resumeAfterKillCursor';
let db;
let coll;
let changeStream;
function close(e) {
changeStream.close(() => client.close(() => done(e)));
}
client
.connect()
.then(() => (db = client.db('integration_tests')))
.then(() => (coll = db.collection(collectionName)))
.then(() => (changeStream = coll.watch()))
.then(() => ({ p: changeStream.next() }))
.then(x => coll.insertOne({ darmok: 'jalad' }).then(() => x.p))
.then(() =>
db.command({
killCursors: collectionName,
cursors: [changeStream.cursor.cursorState.cursorId]
})
)
.then(() => coll.insertOne({ shaka: 'walls fell' }))
.then(() => changeStream.next())
.then(change => {
expect(change).to.have.property('operationType', 'insert');
expect(change).to.have.nested.property('fullDocument.shaka', 'walls fell');
})
.then(
() => close(),
e => close(e)
);
}
});
it('should maintain change stream options on resume', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
const collectionName = 'resumeAfterKillCursor';
let db;
let coll;
let changeStream;
function close(e) {
changeStream.close(() => client.close(() => done(e)));
}
const changeStreamOptions = {
fullDocument: 'updateLookup',
collation: { maxVariable: 'punct' },
maxAwaitTimeMS: 20000,
batchSize: 200
};
client
.connect()
.then(() => (db = client.db('integration_tests')))
.then(() => (coll = db.collection(collectionName)))
.then(() => (changeStream = coll.watch([], changeStreamOptions)))
.then(() => {
expect(changeStream.cursor.resumeOptions).to.containSubset(changeStreamOptions);
})
.then(
() => close(),
e => close(e)
);
}
});
it('Should include a startAtOperationTime field when resuming if no changes have been received', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.7.3' } },
test: function(done) {
const configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
const Timestamp = configuration.require.Timestamp;
const Long = configuration.require.Long;
const OPERATION_TIME = new Timestamp(4, 1501511802);
const makeIsMaster = server => ({
__nodejs_mock_server__: true,
ismaster: true,
secondary: false,
me: server.uri(),
primary: server.uri(),
tags: { loc: 'ny' },
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 7,
minWireVersion: 0,
ok: 1,
hosts: [server.uri()],
operationTime: OPERATION_TIME,
$clusterTime: {
clusterTime: OPERATION_TIME
}
});
const AGGREGATE_RESPONSE = {
ok: 1,
cursor: {
firstBatch: [],
id: new Long('9064341847921713401'),
ns: 'test.test'
},
operationTime: OPERATION_TIME,
$clusterTime: {
clusterTime: OPERATION_TIME
}
};
const CHANGE_DOC = {
_id: {
ts: OPERATION_TIME,
ns: 'integration_tests.docsDataEvent',
_id: new ObjectId('597f407a8fd4abb616feca93')
},
operationType: 'insert',
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
fullDocument: {
_id: new ObjectId('597f407a8fd4abb616feca93'),
a: 1,
counter: 0
}
};
const GET_MORE_RESPONSE = {
ok: 1,
cursor: {
nextBatch: [CHANGE_DOC],
id: new Long('9064341847921713401'),
ns: 'test.test'
},
cursorId: new Long('9064341847921713401')
};
const dbName = 'integration_tests';
const collectionName = 'resumeWithStartAtOperationTime';
const connectOptions = {
validateOptions: true,
monitorCommands: true
};
let getMoreCounter = 0;
let changeStream;
let server;
let client;
let finish = err => {
finish = () => {};
Promise.resolve()
.then(() => changeStream && changeStream.close())
.then(() => client && client.close())
.then(() => done(err));
};
function primaryServerHandler(request) {
try {
const doc = request.document;
if (doc.ismaster) {
return request.reply(makeIsMaster(server));
} else if (doc.aggregate) {
return request.reply(AGGREGATE_RESPONSE);
} else if (doc.getMore) {
if (getMoreCounter++ === 0) {
request.reply({ ok: 0 });
return;
}
request.reply(GET_MORE_RESPONSE);
} else if (doc.endSessions) {
request.reply({ ok: 1 });
} else if (doc.killCursors) {
request.reply({ ok: 1 });
}
} catch (e) {
finish(e);
}
}
const started = [];
mock
.createServer()
.then(_server => (server = _server))
.then(() => server.setMessageHandler(primaryServerHandler))
.then(() => (client = configuration.newClient(`mongodb://${server.uri()}`, connectOptions)))
.then(() => client.connect())
.then(() => {
client.on('commandStarted', e => {
if (e.commandName === 'aggregate') {
started.push(e);
}
});
})
.then(() => client.db(dbName))
.then(db => db.collection(collectionName))
.then(col => col.watch(pipeline))
.then(_changeStream => (changeStream = _changeStream))
.then(() => changeStream.next())
.then(() => {
const first = started[0].command;
expect(first).to.have.nested.property('pipeline[0].$changeStream');
const firstStage = first.pipeline[0].$changeStream;
expect(firstStage).to.not.have.property('resumeAfter');
expect(firstStage).to.not.have.property('startAtOperationTime');
const second = started[1].command;
expect(second).to.have.nested.property('pipeline[0].$changeStream');
const secondStage = second.pipeline[0].$changeStream;
expect(secondStage).to.not.have.property('resumeAfter');
expect(secondStage).to.have.property('startAtOperationTime');
expect(secondStage.startAtOperationTime.equals(OPERATION_TIME)).to.be.ok;
})
.then(
() => finish(),
err => finish(err)
);
}
});
it('should not resume when error includes error label NonRetryableChangeStreamError', function() {
let server;
let client;
let changeStream;
function teardown(e) {
return Promise.resolve()
.then(() => changeStream && changeStream.close())
.catch(() => {})
.then(() => client && client.close())
.catch(() => {})
.then(() => e && Promise.reject(e));
}
const db = 'foobar';
const coll = 'foobar';
const ns = `${db}.${coll}`;
let aggregateCount = 0;
let getMoreCount = 0;
function messageHandler(request) {
const doc = request.document;
if (doc.ismaster) {
request.reply(
Object.assign({}, mock.DEFAULT_ISMASTER_36, {
ismaster: true,
secondary: false,
me: server.uri(),
primary: server.uri()
})
);
} else if (doc.aggregate) {
aggregateCount += 1;
request.reply({
ok: 1,
cursor: {
firstBatch: [],
id: 1,
ns
}
});
} else if (doc.getMore) {
if (getMoreCount === 0) {
getMoreCount += 1;
request.reply({
ok: 0,
errorLabels: ['NonRetryableChangeStreamError']
});
} else {
getMoreCount += 1;
request.reply({
ok: 1,
cursor: {
nextBatch: [
{
_id: {},
operationType: 'insert',
ns: { db, coll },
fullDocument: { a: 1 }
}
],
id: 1,
ns
}
});
}
} else {
request.reply({ ok: 1 });
}
}
return mock
.createServer()
.then(_server => (server = _server))
.then(() => server.setMessageHandler(messageHandler))
.then(() => (client = this.configuration.newClient(`mongodb://${server.uri()}`)))
.then(() => client.connect())
.then(
() =>
(changeStream = client
.db(db)
.collection(coll)
.watch())
)
.then(() => changeStream.next())
.then(
() => Promise.reject('Expected changeStream to not resume'),
err => {
expect(err).to.be.an.instanceOf(MongoError);
expect(err.hasErrorLabel('NonRetryableChangeStreamError')).to.be.true;
expect(aggregateCount).to.equal(1);
expect(getMoreCount).to.equal(1);
}
)
.then(() => teardown(), teardown);
});
it('should emit close event after error event', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
const closeSpy = sinon.spy();
client.connect(function(err, client) {
expect(err).to.not.exist;
const db = client.db('integration_tests');
const coll = db.collection('event_test');
// This will cause an error because the _id will be projected out, which causes the following error:
// "A change stream document has been received that lacks a resume token (_id)."
const changeStream = coll.watch([{ $project: { _id: false } }]);
changeStream.on('change', changeDoc => {
expect(changeDoc).to.be.null;
});
changeStream.on('error', err => {
expect(err).to.exist;
changeStream.close(() => {
expect(closeSpy.calledOnce).to.be.true;
client.close(done);
});
});
changeStream.on('close', closeSpy);
// Trigger the first database event
setTimeout(() => {
coll.insertOne({ a: 1 }, (err, result) => {
expect(err).to.not.exist;
expect(result.insertedCount).to.equal(1);
});
});
});
}
});
describe('should properly handle a changeStream event being processed mid-close', function() {
let client, coll;
function write() {
return Promise.resolve()
.then(() => coll.insertOne({ a: 1 }))
.then(() => coll.insertOne({ b: 2 }))
.then(() => coll.insertOne({ c: 3 }));
}
beforeEach(function() {
client = this.configuration.newClient();
return client.connect().then(_client => {
client = _client;
coll = client.db(this.configuration.db).collection('tester');
});
});
afterEach(function() {
coll = undefined;
if (client) {
return client.close().then(() => {
client = undefined;
});
}
});
it.skip('when invoked with promises', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
test: function() {
function read() {
const changeStream = coll.watch();
return Promise.resolve()
.then(() => changeStream.next())
.then(() => changeStream.next())
.then(() => {
const nextP = changeStream.next();
return changeStream.close().then(() => nextP);
});
}
return Promise.all([read(), write()]).then(
() => Promise.reject(new Error('Expected operation to fail with error')),
err => expect(err.message).to.equal('ChangeStream is closed')
);
}
});
it.skip('when invoked with callbacks', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
test: function(done) {
const changeStream = coll.watch();
changeStream.next(() => {
changeStream.next(() => {
changeStream.next(err => {
let _err = null;
try {
expect(err.message).to.equal('ChangeStream is closed');
} catch (e) {
_err = e;
} finally {
done(_err);
}
});
changeStream.close();
});
});
write().catch(() => {});
}
});
it('when invoked using eventEmitter API', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.5.10' } },
test: function(done) {
let closed = false;
const close = _err => {
if (closed) {
return;
}
closed = true;
return done(_err);
};
const changeStream = coll.watch();
let counter = 0;
changeStream.on('change', () => {
counter += 1;
if (counter === 2) {
changeStream.close();
setTimeout(() => close());
} else if (counter >= 3) {
close(new Error('Should not have received more than 2 events'));
}
});
changeStream.on('error', err => close(err));
setTimeout(() => write().catch(() => {}));
}
});
});
describe('resumeToken', function() {
class MockServerManager {
constructor(config, commandIterators) {
this.config = config;
this.cmdList = new Set(['ismaster', 'endSessions', 'aggregate', 'getMore']);
this.database = 'test_db';
this.collection = 'test_coll';
this.ns = `${this.database}.${this.collection}`;
this._timestampCounter = 0;
this.cursorId = new this.config.require.Long('9064341847921713401');
this.commandIterators = commandIterators;
this.promise = this.init();
}
init() {
return mock.createServer().then(server => {
this.server = server;
this.server.setMessageHandler(request => {
const doc = request.document;
const opname = Object.keys(doc)[0];
let response = { ok: 0 };
if (this.cmdList.has(opname) && this[opname]) {
response = this[opname](doc);
}
request.reply(this.applyOpTime(response));
});
this.client = this.config.newClient(this.mongodbURI, { monitorCommands: true });
return this.client.connect().then(() => {
this.apm = { started: [], succeeded: [], failed: [] };
[
['commandStarted', this.apm.started],
['commandSucceeded', this.apm.succeeded],
['commandFailed', this.apm.failed]
].forEach(opts => {
const eventName = opts[0];
const target = opts[1];
this.client.on(eventName, e => {
if (e.commandName === 'aggregate' || e.commandName === 'getMore') {
target.push(e);
}
});
});
});
});
}
makeChangeStream(options) {
this.changeStream = this.client
.db(this.database)
.collection(this.collection)
.watch(options);
this.resumeTokenChangedEvents = [];
this.changeStream.on('resumeTokenChanged', resumeToken => {
this.resumeTokenChangedEvents.push({ resumeToken });
});
return this.changeStream;
}
teardown(e) {
let promise = Promise.resolve();
if (this.changeStream) {
promise = promise.then(() => this.changeStream.close()).catch();
}
if (this.client) {
promise = promise.then(() => this.client.close()).catch();
}
return promise.then(function() {
if (e) {
throw e;
}
});
}
ready() {
return this.promise;
}
get mongodbURI() {
return `mongodb://${this.server.uri()}`;
}
// Handlers for specific commands
ismaster() {
const uri = this.server.uri();
return Object.assign({}, mock.DEFAULT_ISMASTER_36, {
ismaster: true,
secondary: false,
me: uri,
primary: uri,
setName: 'rs',
localTime: new Date(),
ok: 1,
hosts: [uri]
});
}
endSessions() {
return { ok: 1 };
}
aggregate() {
let cursor;
try {
cursor = this._buildCursor('aggregate', 'firstBatch');
} catch (e) {
return { ok: 0, errmsg: e.message };
}
return {
ok: 1,
cursor
};
}
getMore() {
let cursor;
try {
cursor = this._buildCursor('getMore', 'nextBatch');
} catch (e) {
return { ok: 0, errmsg: e.message };
}
return {
ok: 1,
cursor,
cursorId: this.cursorId
};
}
// Helpers
timestamp() {
return new this.config.require.Timestamp(this._timestampCounter++, Date.now());
}
applyOpTime(obj) {
const operationTime = this.timestamp();
return Object.assign({}, obj, {
$clusterTime: { clusterTime: operationTime },
operationTime
});
}
_buildCursor(type, batchKey) {
const config = this.commandIterators[type].next().value;
if (!config) {
throw new Error('no more config for ' + type);
}
const batch = Array.from({ length: config.numDocuments || 0 }).map(() =>
this.changeEvent()
);
const cursor = {
[batchKey]: batch,
id: this.cursorId,
ns: this.ns
};
if (config.postBatchResumeToken) {
cursor.postBatchResumeToken = this.resumeToken();
}
return cursor;
}
changeEvent(operationType, fullDocument) {
fullDocument = fullDocument || {};
return {
_id: this.resumeToken(),
operationType,
ns: {
db: this.database,
coll: this.collection
},
fullDocument
};
}
resumeToken() {
return {
ts: this.timestamp(),
ns: this.namespace,
_id: new this.config.require.ObjectId()
};
}
}
// For a ChangeStream under these conditions:
// Running against a server >=4.0.7.
// The batch is empty or has been iterated to the last document.
// Expected result:
// getResumeToken must return the postBatchResumeToken from the current command response.
describe('for emptied batch on server >= 4.0.7', function() {
it('must return the postBatchResumeToken from the current command response', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: true };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: true };
})()
});
return manager
.ready()
.then(() => {
return manager.makeChangeStream().next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
const tokens = manager.resumeTokenChangedEvents.map(e => e.resumeToken);
const successes = manager.apm.succeeded.map(e => {
try {
return e.reply.cursor;
} catch (e) {
return {};
}
});
expect(successes).to.have.a.lengthOf(2);
expect(successes[0]).to.have.a.property('postBatchResumeToken');
expect(successes[1]).to.have.a.property('postBatchResumeToken');
expect(successes[1]).to.have.a.nested.property('nextBatch[0]._id');
expect(tokens).to.have.a.lengthOf(2);
expect(tokens[0]).to.deep.equal(successes[0].postBatchResumeToken);
expect(tokens[1])
.to.deep.equal(successes[1].postBatchResumeToken)
.and.to.not.deep.equal(successes[1].nextBatch[0]._id);
});
});
});
// For a ChangeStream under these conditions:
// Running against a server <4.0.7.
// The batch is empty or has been iterated to the last document.
// Expected result:
// getResumeToken must return the _id of the last document returned if one exists.
// getResumeToken must return startAfter from the initial aggregate if the option was specified.
// getResumeToken must return resumeAfter from the initial aggregate if the option was specified.
// If neither the startAfter nor resumeAfter options were specified, the getResumeToken result must be empty.
describe('for emptied batch on server <= 4.0.7', function() {
it('must return the _id of the last document returned if one exists', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: false };
})()
});
return manager
.ready()
.then(() => manager.makeChangeStream().next())
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
const tokens = manager.resumeTokenChangedEvents.map(e => e.resumeToken);
const successes = manager.apm.succeeded.map(e => {
try {
return e.reply.cursor;
} catch (e) {
return {};
}
});
expect(successes).to.have.a.lengthOf(2);
expect(successes[1]).to.have.a.nested.property('nextBatch[0]._id');
expect(tokens).to.have.a.lengthOf(1);
expect(tokens[0]).to.deep.equal(successes[1].nextBatch[0]._id);
});
});
it('must return startAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
const startAfter = manager.resumeToken();
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream({ startAfter, resumeAfter });
let counter = 0;
changeStream.cursor.on('response', () => {
if (counter === 1) {
token = changeStream.resumeToken;
resolve();
}
counter += 1;
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token)
.to.deep.equal(startAfter)
.and.to.not.deep.equal(resumeAfter);
});
});
it('must return resumeAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream({ resumeAfter });
let counter = 0;
changeStream.cursor.on('response', () => {
if (counter === 1) {
token = changeStream.resumeToken;
resolve();
}
counter += 1;
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.deep.equal(resumeAfter);
});
});
it('must be empty if neither the startAfter nor resumeAfter options were specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream();
let counter = 0;
changeStream.cursor.on('response', () => {
if (counter === 1) {
token = changeStream.resumeToken;
resolve();
}
counter += 1;
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.not.exist;
});
});
});
// For a ChangeStream under these conditions:
// The batch is not empty.
// The batch has been iterated up to but not including the last element.
// Expected result:
// getResumeToken must return the _id of the previous document returned.
describe('for non-empty batch iterated up to but not including the last element', function() {
it('must return the _id of the previous document returned', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 2, postBatchResumeToken: true };
})(),
getMore: (function*() {})()
});
return manager
.ready()
.then(() => {
return manager.makeChangeStream().next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
const tokens = manager.resumeTokenChangedEvents.map(e => e.resumeToken);
const successes = manager.apm.succeeded.map(e => {
try {
return e.reply.cursor;
} catch (e) {
return {};
}
});
expect(successes).to.have.a.lengthOf(1);
expect(successes[0]).to.have.a.nested.property('firstBatch[0]._id');
expect(successes[0]).to.have.a.property('postBatchResumeToken');
expect(tokens).to.have.a.lengthOf(1);
expect(tokens[0])
.to.deep.equal(successes[0].firstBatch[0]._id)
.and.to.not.deep.equal(successes[0].postBatchResumeToken);
});
});
});
// For a ChangeStream under these conditions:
// The batch is not empty.
// The batch hasn’t been iterated at all.
// Only the initial aggregate command has been executed.
// Expected result:
// getResumeToken must return startAfter from the initial aggregate if the option was specified.
// getResumeToken must return resumeAfter from the initial aggregate if the option was specified.
// If neither the startAfter nor resumeAfter options were specified, the getResumeToken result must be empty.
describe('for non-empty non-iterated batch where only the initial aggregate command has been executed', function() {
it('must return startAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
const startAfter = manager.resumeToken();
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream({ startAfter, resumeAfter });
changeStream.cursor.once('response', () => {
token = changeStream.resumeToken;
resolve();
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token)
.to.deep.equal(startAfter)
.and.to.not.deep.equal(resumeAfter);
});
});
it('must return resumeAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream({ resumeAfter });
changeStream.cursor.once('response', () => {
token = changeStream.resumeToken;
resolve();
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.deep.equal(resumeAfter);
});
});
it('must be empty if neither the startAfter nor resumeAfter options were specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream();
changeStream.cursor.once('response', () => {
token = changeStream.resumeToken;
resolve();
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.not.exist;
});
});
});
// For a ChangeStream under these conditions:
// Running against a server >=4.0.7.
// The batch is not empty.
// The batch hasn’t been iterated at all.
// The stream has iterated beyond a previous batch and a getMore command has just been executed.
// Expected result:
// getResumeToken must return the postBatchResumeToken from the previous command response.
describe('for non-empty non-iterated batch where getMore has just been executed against server >=4.0.7', function() {
it('must return the postBatchResumeToken from the previous command response', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 1, postBatchResumeToken: true };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: true };
})()
});
let token;
const startAfter = manager.resumeToken();
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return manager.makeChangeStream({ startAfter, resumeAfter }).next();
})
.then(() => {
manager.changeStream.cursor.once('response', () => {
token = manager.changeStream.resumeToken;
});
// Note: this is expected to fail
return manager.changeStream.next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
const successes = manager.apm.succeeded.map(e => {
try {
return e.reply.cursor;
} catch (e) {
return {};
}
});
expect(successes).to.have.a.lengthOf(2);
expect(successes[0]).to.have.a.property('postBatchResumeToken');
expect(successes[0]).to.have.a.nested.property('firstBatch[0]._id');
expect(token)
.to.deep.equal(successes[0].postBatchResumeToken)
.and.to.not.deep.equal(successes[0].firstBatch[0]._id)
.and.to.not.deep.equal(startAfter)
.and.to.not.deep.equal(resumeAfter);
});
});
});
// For a ChangeStream under these conditions:
// Running against a server <4.0.7.
// The batch is not empty.
// The batch hasn’t been iterated at all.
// The stream has iterated beyond a previous batch and a getMore command has just been executed.
// Expected result:
// getResumeToken must return the _id of the previous document returned if one exists.
// getResumeToken must return startAfter from the initial aggregate if the option was specified.
// getResumeToken must return resumeAfter from the initial aggregate if the option was specified.
// If neither the startAfter nor resumeAfter options were specified, the getResumeToken result must be empty.
describe('for non-empty non-iterated batch where getMore has just been executed against server < 4.0.7', function() {
it('must return the _id of the previous document returned if one exists', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 1, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: false };
})()
});
let token;
const startAfter = manager.resumeToken();
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return manager.makeChangeStream({ startAfter, resumeAfter }).next();
})
.then(() => {
manager.changeStream.cursor.once('response', () => {
token = manager.changeStream.resumeToken;
});
// Note: this is expected to fail
return manager.changeStream.next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
const successes = manager.apm.succeeded.map(e => {
try {
return e.reply.cursor;
} catch (e) {
return {};
}
});
expect(successes).to.have.a.lengthOf(2);
expect(successes[0]).to.have.a.nested.property('firstBatch[0]._id');
expect(token)
.to.deep.equal(successes[0].firstBatch[0]._id)
.and.to.not.deep.equal(startAfter)
.and.to.not.deep.equal(resumeAfter);
});
});
it('must return startAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: false };
})()
});
let token;
const startAfter = manager.resumeToken();
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
const changeStream = manager.makeChangeStream({ startAfter, resumeAfter });
let counter = 0;
changeStream.cursor.on('response', () => {
if (counter === 1) {
token = changeStream.resumeToken;
}
counter += 1;
});
// Note: this is expected to fail
return changeStream.next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token)
.to.deep.equal(startAfter)
.and.to.not.deep.equal(resumeAfter);
});
});
it('must return resumeAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: false };
})()
});
let token;
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
const changeStream = manager.makeChangeStream({ resumeAfter });
let counter = 0;
changeStream.cursor.on('response', () => {
if (counter === 1) {
token = changeStream.resumeToken;
}
counter += 1;
});
// Note: this is expected to fail
return changeStream.next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.deep.equal(resumeAfter);
});
});
it('must be empty if neither the startAfter nor resumeAfter options were specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: false };
})()
});
let token;
return manager
.ready()
.then(() => {
const changeStream = manager.makeChangeStream();
let counter = 0;
changeStream.cursor.on('response', () => {
if (counter === 1) {
token = changeStream.resumeToken;
}
counter += 1;
});
// Note: this is expected to fail
return changeStream.next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.not.exist;
});
});
});
});
});
| 1 | 17,439 | I thought we got rid of this thing? | mongodb-node-mongodb-native | js |
@@ -83,15 +83,8 @@ class TaintNodeData
public $column_to;
/**
- * @param string $label
- * @param string $entry_path_type
- * @param ?string $entry_path_description
* @param int $line_from
* @param int $line_to
- * @param string $file_name
- * @param string $file_path
- * @param string $snippet
- * @param string $selected_text
* @param int $from
* @param int $to
* @param int $snippet_from | 1 | <?php
namespace Psalm\Internal\Analyzer;
/**
* @psalm-immutable
*/
class TaintNodeData
{
/**
* @var int
*/
public $line_from;
/**
* @var int
*/
public $line_to;
/**
* @var string
*/
public $label;
/**
* @var string
*/
public $entry_path_type;
/**
* @var ?string
*/
public $entry_path_description;
/**
* @var string
*/
public $file_name;
/**
* @var string
*/
public $file_path;
/**
* @var string
*/
public $snippet;
/**
* @var string
*/
public $selected_text;
/**
* @var int
*/
public $from;
/**
* @var int
*/
public $to;
/**
* @var int
*/
public $snippet_from;
/**
* @var int
*/
public $snippet_to;
/**
* @var int
*/
public $column_from;
/**
* @var int
*/
public $column_to;
/**
* @param string $label
* @param string $entry_path_type
* @param ?string $entry_path_description
* @param int $line_from
* @param int $line_to
* @param string $file_name
* @param string $file_path
* @param string $snippet
* @param string $selected_text
* @param int $from
* @param int $to
* @param int $snippet_from
* @param int $snippet_to
* @param int $column_from
* @param int $column_to
*/
public function __construct(
$label,
$entry_path_type,
$entry_path_description,
$line_from,
$line_to,
$file_name,
$file_path,
$snippet,
$selected_text,
$from,
$to,
$snippet_from,
$snippet_to,
$column_from,
$column_to
) {
$this->label = $label;
$this->entry_path_type = $entry_path_type;
$this->entry_path_description = $entry_path_description;
$this->line_from = $line_from;
$this->line_to = $line_to;
$this->file_name = $file_name;
$this->file_path = $file_path;
$this->snippet = $snippet;
$this->selected_text = $selected_text;
$this->from = $from;
$this->to = $to;
$this->snippet_from = $snippet_from;
$this->snippet_to = $snippet_to;
$this->column_from = $column_from;
$this->column_to = $column_to;
}
}
| 1 | 8,976 | Please convert int params as well. | vimeo-psalm | php |
@@ -84,7 +84,7 @@ namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
var lastComponent = components.Last();
messageLines.Add(
string.Format(
- CultureInfo.InvariantCulture, "{0}:{1} {2} {3}: {4}",
+ CultureInfo.InvariantCulture, "{0}{1}: {2} {3}: {4}",
lastComponent.Uri.AbsolutePath,
lastComponent.Region.FormatForVisualStudio(),
result.Kind.FormatForVisualStudio(), | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.using System;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
{
public static class ExtensionMethods
{
public static string FormatForVisualStudio(this Region region)
{
if (region.StartLine < 0)
{
throw new NotImplementedException();
}
// VS supports the following formatting options:
// (startLine)
// (startLine-endLine)
// (startLine,startColumn)
// (startLine,startColumn-endColumn)
// (startLine,startColumn,endLine,endColumn)
bool multiline = region.EndLine > region.StartLine;
bool multicolumn = (multiline || region.EndColumn > region.StartColumn);
if (multiline)
{
if (multicolumn && (region.StartColumn > 1 || region.EndColumn > 1))
{
// (startLine,startColumn,endLine,endColumn)
return
"(" +
region.StartLine.ToString() + "," +
(region.StartColumn > 0 ? region.StartColumn.ToString() : "1") + "," +
region.EndLine.ToString() + "," +
(region.EndColumn > 0 ? region.EndColumn.ToString() : "1") +
")";
}
// (startLine-endLine)
return
"(" +
region.StartLine.ToString() + "-" + region.EndLine.ToString() +
")";
}
if (multicolumn)
{
// (startLine,startColumn-endColumn)
return
"(" +
region.StartLine.ToString() + "," +
region.StartColumn.ToString() + "-" +
region.EndColumn.ToString() +
")";
}
if (region.StartColumn > 1)
{
// (startLine,startColumn)
return
"(" +
region.StartLine.ToString() + "," + region.StartColumn.ToString() +
")";
}
// (startLine)
return
"(" +
region.StartLine.ToString() + "," + region.StartColumn.ToString() +
")";
}
public static string FormatForVisualStudio(this Result result, IRuleDescriptor rule)
{
var messageLines = new List<string>();
foreach (var location in result.Locations)
{
var components = location.ResultFile ?? location.AnalysisTarget;
var lastComponent = components.Last();
messageLines.Add(
string.Format(
CultureInfo.InvariantCulture, "{0}:{1} {2} {3}: {4}",
lastComponent.Uri.AbsolutePath,
lastComponent.Region.FormatForVisualStudio(),
result.Kind.FormatForVisualStudio(),
result.RuleId,
result.GetMessageText(rule)
));
}
return string.Join(Environment.NewLine, messageLines);
}
public static string FormatForVisualStudio(this ResultKind kind)
{
switch (kind)
{
case ResultKind.Error:
case ResultKind.ConfigurationError:
case ResultKind.InternalError:
return "error";
case ResultKind.Warning:
return "warning";
default:
return "info";
}
}
/// <summary>
/// Completely populate all Region property members. Missing data
/// is computed based on the values that are already present.
/// </summary>
/// <param name="region"></param>
/// <param name="newLineIndex"></param>
public static void Populate(this Region region, NewLineIndex newLineIndex)
{
// TODO: we need charOffset and byteOffset to be expressed as
// nullable types in order to differentiate between text
// and binary file regions. For text files, we need to populate
// startLine, etc. based on document offset. For now, we'll
// assume we're always looking at text files
if (region.StartLine == 0)
{
OffsetInfo offsetInfo = newLineIndex.GetOffsetInfoForOffset(region.CharOffset);
region.StartLine = offsetInfo.LineNumber;
region.StartColumn = offsetInfo.ColumnNumber;
offsetInfo = newLineIndex.GetOffsetInfoForOffset(region.CharOffset + region.Length);
region.StartLine = offsetInfo.LineNumber;
region.EndColumn = offsetInfo.ColumnNumber;
}
else
{
// Make endColumn and endLine explicit, if not expressed
if (region.EndLine == 0) { region.EndLine = region.StartLine; }
if (region.EndColumn == 0) { region.EndColumn = region.StartColumn; }
LineInfo lineInfo = newLineIndex.GetLineInfoForLine(region.StartLine);
region.CharOffset = lineInfo.StartOffset + (region.StartColumn - 1);
lineInfo = newLineIndex.GetLineInfoForLine(region.EndLine);
region.Length = lineInfo.StartOffset + (region.EndColumn - 1) - region.CharOffset;
}
}
public static string GetMessageText(this Result result, IRuleDescriptor rule, bool concise = false)
{
if (concise && !string.IsNullOrEmpty(result.ShortMessage))
{
return result.ShortMessage;
}
string text = result.FullMessage;
if (string.IsNullOrEmpty(text))
{
Debug.Assert(rule != null);
string ruleId = result.RuleId;
string formatSpecifierId = result.FormattedMessage.SpecifierId;
string formatSpecifier;
string[] arguments = new string[result.FormattedMessage.Arguments.Count];
result.FormattedMessage.Arguments.CopyTo(arguments, 0);
Debug.Assert(rule.FormatSpecifiers.ContainsKey(formatSpecifierId));
formatSpecifier = rule.FormatSpecifiers[formatSpecifierId];
#if DEBUG
int argumentsCount = result.FormattedMessage.Arguments.Count;
for (int i = 0; i < argumentsCount; i++)
{
// If this assert fires, there are too many arguments for the specifier
// or there is an argument is skipped or not consumed in the specifier
Debug.Assert(formatSpecifier.Contains("{" + i.ToString() + "}"));
}
#endif
text = string.Format(formatSpecifier, arguments);
#if DEBUG
// If this assert fires, an insufficient # of arguments might
// have been provided to String.Format.
Debug.Assert(!text.Contains("{"));
#endif
}
if (concise)
{
text = GetFirstSentence(text);
}
return text;
}
public static string GetFirstSentence(string text)
{
int length = 0;
bool withinQuotes = false;
bool withinParentheses = false;
foreach (char ch in text)
{
length++;
switch (ch)
{
case '\'':
{
// we'll ignore everything within parenthized text
if (!withinParentheses)
{
withinQuotes = !withinQuotes;
}
break;
}
case '(':
{
if (!withinQuotes)
{
withinParentheses = true;
}
break;
}
case ')':
{
if (!withinQuotes)
{
withinParentheses = false;
}
break;
}
case '\n':
case '\r':
case '.':
{
if (withinQuotes || withinParentheses) { continue; }
return text.Substring(0, length).TrimEnd('\r', '\n');
}
}
}
return text;
}
}
}
| 1 | 10,123 | Fixed bug in implementation. Now I can run the validator from the VS Tools menu, double-click on an output line, and navigate to the site of the issue. | microsoft-sarif-sdk | .cs |
@@ -230,6 +230,18 @@ namespace Datadog.Trace.Configuration
/// </summary>
public const string DiagnosticSourceEnabled = "DD_DIAGNOSTIC_SOURCE_ENABLED";
+ /// <summary>
+ /// Configuration key for the application's servers http statuses to set spans as errors by.
+ /// </summary>
+ /// <seealso cref="TracerSettings.HttpServerErrorStatuses"/>
+ public const string HttpServerErrors = "DD_HTTP_SERVER_ERROR_STATUSES";
+
+ /// <summary>
+ /// Configuration key for the application's client http statuses to set spans as errors by.
+ /// </summary>
+ /// <seealso cref="TracerSettings.HttpClientErrorStatuses"/>
+ public const string HttpClientErrors = "DD_HTTP_CLIENT_ERROR_STATUSES";
+
/// <summary>
/// String format patterns used to match integration-specific configuration keys.
/// </summary> | 1 | namespace Datadog.Trace.Configuration
{
/// <summary>
/// String constants for standard Datadog configuration keys.
/// </summary>
public static class ConfigurationKeys
{
/// <summary>
/// Configuration key for the path to the configuration file.
/// Can only be set with an environment variable
/// or in the <c>app.config</c>/<c>web.config</c> file.
/// </summary>
public const string ConfigurationFileName = "DD_TRACE_CONFIG_FILE";
/// <summary>
/// Configuration key for the application's environment. Sets the "env" tag on every <see cref="Span"/>.
/// </summary>
/// <seealso cref="TracerSettings.Environment"/>
public const string Environment = "DD_ENV";
/// <summary>
/// Configuration key for the application's default service name.
/// Used as the service name for top-level spans,
/// and used to determine service name of some child spans.
/// </summary>
/// <seealso cref="TracerSettings.ServiceName"/>
public const string ServiceName = "DD_SERVICE";
/// <summary>
/// Configuration key for the application's version. Sets the "version" tag on every <see cref="Span"/>.
/// </summary>
/// <seealso cref="TracerSettings.ServiceVersion"/>
public const string ServiceVersion = "DD_VERSION";
/// <summary>
/// Configuration key for enabling or disabling the Tracer.
/// Default is value is true (enabled).
/// </summary>
/// <seealso cref="TracerSettings.TraceEnabled"/>
public const string TraceEnabled = "DD_TRACE_ENABLED";
/// <summary>
/// Configuration key for enabling or disabling the Tracer's debug mode.
/// Default is value is false (disabled).
/// </summary>
/// <seealso cref="TracerSettings.DebugEnabled"/>
public const string DebugEnabled = "DD_TRACE_DEBUG";
/// <summary>
/// Configuration key for a list of integrations to disable. All other integrations remain enabled.
/// Default is empty (all integrations are enabled).
/// Supports multiple values separated with semi-colons.
/// </summary>
/// <seealso cref="TracerSettings.DisabledIntegrationNames"/>
public const string DisabledIntegrations = "DD_DISABLED_INTEGRATIONS";
/// <summary>
/// Configuration key for the Agent host where the Tracer can send traces.
/// Overridden by <see cref="AgentUri"/> if present.
/// Default value is "localhost".
/// </summary>
/// <seealso cref="TracerSettings.AgentUri"/>
public const string AgentHost = "DD_AGENT_HOST";
/// <summary>
/// Configuration key for the Agent port where the Tracer can send traces.
/// Default value is 8126.
/// </summary>
/// <seealso cref="TracerSettings.AgentUri"/>
public const string AgentPort = "DD_TRACE_AGENT_PORT";
/// <summary>
/// Sibling setting for <see cref="AgentPort"/>.
/// Used to force a specific port binding for the Trace Agent.
/// Default value is 8126.
/// </summary>
/// <seealso cref="TracerSettings.AgentUri"/>
public const string TraceAgentPortKey = "DD_APM_RECEIVER_PORT";
/// <summary>
/// Configuration key for the Agent URL where the Tracer can send traces.
/// Overrides values in <see cref="AgentHost"/> and <see cref="AgentPort"/> if present.
/// Default value is "http://localhost:8126".
/// </summary>
/// <seealso cref="TracerSettings.AgentUri"/>
public const string AgentUri = "DD_TRACE_AGENT_URL";
/// <summary>
/// Configuration key for enabling or disabling default Analytics.
/// </summary>
/// <seealso cref="TracerSettings.AnalyticsEnabled"/>
public const string GlobalAnalyticsEnabled = "DD_TRACE_ANALYTICS_ENABLED";
/// <summary>
/// Configuration key for a list of tags to be applied globally to spans.
/// </summary>
/// <seealso cref="TracerSettings.GlobalTags"/>
public const string GlobalTags = "DD_TAGS";
/// <summary>
/// Configuration key for a map of header keys to tag names.
/// Automatically apply header values as tags on traces.
/// </summary>
/// <seealso cref="TracerSettings.HeaderTags"/>
public const string HeaderTags = "DD_TRACE_HEADER_TAGS";
/// <summary>
/// Configuration key for setting the size of the trace buffer
/// </summary>
public const string QueueSize = "DD_TRACE_QUEUE_SIZE";
/// <summary>
/// Configuration key for enabling or disabling the automatic injection
/// of correlation identifiers into the logging context.
/// </summary>
/// <seealso cref="TracerSettings.LogsInjectionEnabled"/>
public const string LogsInjectionEnabled = "DD_LOGS_INJECTION";
/// <summary>
/// Configuration key for setting the number of traces allowed
/// to be submitted per second.
/// </summary>
/// <seealso cref="TracerSettings.MaxTracesSubmittedPerSecond"/>
public const string MaxTracesSubmittedPerSecond = "DD_MAX_TRACES_PER_SECOND";
/// <summary>
/// Configuration key for enabling or disabling the diagnostic log at startup
/// </summary>
/// <seealso cref="TracerSettings.StartupDiagnosticLogEnabled"/>
public const string StartupDiagnosticLogEnabled = "DD_TRACE_STARTUP_LOGS";
/// <summary>
/// Configuration key for setting custom sampling rules based on regular expressions.
/// Semi-colon separated list of sampling rules.
/// The rule is matched in order of specification. The first match in a list is used.
///
/// Per entry:
/// The item "sample_rate" is required in decimal format.
/// The item "service" is optional in regular expression format, to match on service name.
/// The item "name" is optional in regular expression format, to match on operation name.
///
/// To give a rate of 50% to any traces in a service starting with the text "cart":
/// '[{"sample_rate":0.5, "service":"cart.*"}]'
///
/// To give a rate of 20% to any traces which have an operation name of "http.request":
/// '[{"sample_rate":0.2, "name":"http.request"}]'
///
/// To give a rate of 100% to any traces within a service named "background" and with an operation name of "sql.query":
/// '[{"sample_rate":1.0, "service":"background", "name":"sql.query"}]
///
/// To give a rate of 10% to all traces
/// '[{"sample_rate":0.1}]'
///
/// To configure multiple rules, separate by semi-colon and order from most specific to least specific:
/// '[{"sample_rate":0.5, "service":"cart.*"}, {"sample_rate":0.2, "name":"http.request"}, {"sample_rate":1.0, "service":"background", "name":"sql.query"}, {"sample_rate":0.1}]'
///
/// If no rules are specified, or none match, default internal sampling logic will be used.
/// </summary>
/// <seealso cref="TracerSettings.CustomSamplingRules"/>
public const string CustomSamplingRules = "DD_TRACE_SAMPLING_RULES";
/// <summary>
/// Configuration key for setting the global rate for the sampler.
/// </summary>
public const string GlobalSamplingRate = "DD_TRACE_SAMPLE_RATE";
/// <summary>
/// Configuration key for the DogStatsd port where the Tracer can send metrics.
/// Default value is 8125.
/// </summary>
public const string DogStatsdPort = "DD_DOGSTATSD_PORT";
/// <summary>
/// Configuration key for enabling or disabling internal metrics sent to DogStatsD.
/// Default value is <c>false</c> (disabled).
/// </summary>
public const string TracerMetricsEnabled = "DD_TRACE_METRICS_ENABLED";
/// <summary>
/// Configuration key for enabling or disabling runtime metrics sent to DogStatsD.
/// Default value is <c>false</c> (disabled).
/// </summary>
public const string RuntimeMetricsEnabled = "DD_RUNTIME_METRICS_ENABLED";
/// <summary>
/// Configuration key for setting the approximate maximum size,
/// in bytes, for Tracer log files.
/// Default value is 10 MB.
/// </summary>
public const string MaxLogFileSize = "DD_MAX_LOGFILE_SIZE";
/// <summary>
/// Configuration key for setting the path to the .NET Tracer native log file.
/// This also determines the output folder of the .NET Tracer managed log files.
/// Overridden by <see cref="LogDirectory"/> if present.
/// </summary>
public const string ProfilerLogPath = "DD_TRACE_LOG_PATH";
/// <summary>
/// Configuration key for setting the directory of the .NET Tracer logs.
/// Overrides the value in <see cref="ProfilerLogPath"/> if present.
/// Default value is "%ProgramData%"\Datadog .NET Tracer\logs\" on Windows
/// or "/var/log/datadog/dotnet/" on Linux.
/// </summary>
public const string LogDirectory = "DD_TRACE_LOG_DIRECTORY";
/// <summary>
/// Configuration key for when a standalone instance of the Trace Agent needs to be started.
/// </summary>
public const string TraceAgentPath = "DD_TRACE_AGENT_PATH";
/// <summary>
/// Configuration key for arguments to pass to the Trace Agent process.
/// </summary>
public const string TraceAgentArgs = "DD_TRACE_AGENT_ARGS";
/// <summary>
/// Configuration key for when a standalone instance of DogStatsD needs to be started.
/// </summary>
public const string DogStatsDPath = "DD_DOGSTATSD_PATH";
/// <summary>
/// Configuration key for arguments to pass to the DogStatsD process.
/// </summary>
public const string DogStatsDArgs = "DD_DOGSTATSD_ARGS";
/// <summary>
/// Configuration key for enabling or disabling the use of System.Diagnostics.DiagnosticSource.
/// Default value is <c>true</c> (enabled).
/// </summary>
public const string DiagnosticSourceEnabled = "DD_DIAGNOSTIC_SOURCE_ENABLED";
/// <summary>
/// String format patterns used to match integration-specific configuration keys.
/// </summary>
public static class Integrations
{
/// <summary>
/// Configuration key pattern for enabling or disabling an integration.
/// </summary>
public const string Enabled = "DD_TRACE_{0}_ENABLED";
/// <summary>
/// Configuration key pattern for enabling or disabling Analytics in an integration.
/// </summary>
public const string AnalyticsEnabled = "DD_TRACE_{0}_ANALYTICS_ENABLED";
/// <summary>
/// Configuration key pattern for setting Analytics sampling rate in an integration.
/// </summary>
public const string AnalyticsSampleRate = "DD_TRACE_{0}_ANALYTICS_SAMPLE_RATE";
}
/// <summary>
/// String constants for debug configuration keys.
/// </summary>
internal static class Debug
{
/// <summary>
/// Configuration key for forcing the automatic instrumentation to only use the mdToken method lookup mechanism.
/// </summary>
public const string ForceMdTokenLookup = "DD_TRACE_DEBUG_LOOKUP_MDTOKEN";
/// <summary>
/// Configuration key for forcing the automatic instrumentation to only use the fallback method lookup mechanism.
/// </summary>
public const string ForceFallbackLookup = "DD_TRACE_DEBUG_LOOKUP_FALLBACK";
}
}
}
| 1 | 18,547 | Can we rename this field to `HttpServerErrorCodes` or `HttpServerErrorStatuses`? It will contain a list of status _codes_, not a list of _errors_. (Personally I prefer "codes" over "statuses", but we can't change `DD_HTTP_SERVER_ERROR_CODES`.) | DataDog-dd-trace-dotnet | .cs |
@@ -134,8 +134,17 @@ struct flb_systemd_config *flb_systemd_config_create(struct flb_input_instance *
sd_journal_add_disjunction(ctx->j);
}
- /* Always seek to head */
- sd_journal_seek_head(ctx->j);
+ /* Seek to head by default or tail if specified in configuration */
+ tmp = flb_input_get_property("read_from_tail", i_ins);
+ if (tmp && strcasecmp(tmp, "true") == 0) {
+ sd_journal_seek_tail(ctx->j);
+
+ /* Skip last entry */
+ sd_journal_next_skip(ctx->j, 1);
+ }
+ else {
+ sd_journal_seek_head(ctx->j);
+ }
/* Check if we have a cursor in our database */
if (ctx->db) { | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2015-2017 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fluent-bit/flb_info.h>
#include <fluent-bit/flb_config.h>
#include <fluent-bit/flb_input.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "systemd_db.h"
#include "systemd_config.h"
struct flb_systemd_config *flb_systemd_config_create(struct flb_input_instance *i_ins,
struct flb_config *config)
{
int ret;
char *tmp;
struct stat st;
struct mk_list *head;
struct flb_config_prop *prop;
struct flb_systemd_config *ctx;
/* Allocate space for the configuration */
ctx = flb_calloc(1, sizeof(struct flb_systemd_config));
if (!ctx) {
flb_errno();
return NULL;
}
/* Create the channel manager */
ret = pipe(ctx->ch_manager);
if (ret == -1) {
flb_errno();
flb_free(ctx);
return NULL;
}
/* Config: path */
tmp = flb_input_get_property("path", i_ins);
if (tmp) {
ret = stat(tmp, &st);
if (ret == -1) {
flb_errno();
flb_free(ctx);
flb_error("[in_systemd] given path %s is invalid", tmp);
return NULL;
}
if (!S_ISDIR(st.st_mode)) {
flb_errno();
flb_free(ctx);
flb_error("[in_systemd] given path is not a directory: %s", tmp);
return NULL;
}
ctx->path = flb_strdup(tmp);
}
else {
ctx->path = NULL;
}
/* Open the Journal */
if (ctx->path) {
ret = sd_journal_open_directory(&ctx->j, ctx->path, 0);
}
else {
ret = sd_journal_open(&ctx->j, SD_JOURNAL_LOCAL_ONLY);
}
if (ret != 0) {
flb_free(ctx);
flb_error("[in_systemd] could not open the Journal");
return NULL;
}
ctx->fd = sd_journal_get_fd(ctx->j);
ctx->i_ins = i_ins;
/* Tag settings */
tmp = strchr(i_ins->tag, '*');
if (tmp) {
ctx->dynamic_tag = FLB_TRUE;
}
else {
ctx->dynamic_tag = FLB_FALSE;
}
ctx->i_ins->flags |= FLB_INPUT_DYN_TAG;
/* Database file */
tmp = flb_input_get_property("db", i_ins);
if (tmp) {
ctx->db = flb_systemd_db_open(tmp, i_ins, config);
if (!ctx->db) {
flb_error("[in_systemd] could not open/create database");
}
}
/* Max number of entries per notification */
tmp = flb_input_get_property("max_entries", i_ins);
if (tmp) {
ctx->max_entries = atoi(tmp);
}
else {
ctx->max_entries = FLB_SYSTEND_ENTRIES;
}
/* Load Systemd filters, iterate all properties */
mk_list_foreach(head, &i_ins->properties) {
prop = mk_list_entry(head, struct flb_config_prop, _head);
if (strcasecmp(prop->key, "systemd_filter") != 0) {
continue;
}
flb_debug("[in_systemd] add filter: %s", prop->val);
/* Apply filter/match */
sd_journal_add_match(ctx->j, prop->val, 0);
sd_journal_add_disjunction(ctx->j);
}
/* Always seek to head */
sd_journal_seek_head(ctx->j);
/* Check if we have a cursor in our database */
if (ctx->db) {
tmp = flb_systemd_db_get_cursor(ctx);
if (tmp) {
ret = sd_journal_seek_cursor(ctx->j, tmp);
if (ret == 0) {
flb_info("[in_systemd] seek_cursor=%.40s... OK", tmp);
/* Skip the first entry, already processed */
sd_journal_next_skip(ctx->j, 1);
}
else {
flb_warn("[in_systemd] seek_cursor failed");
}
flb_free(tmp);
}
}
return ctx;
}
int flb_systemd_config_destroy(struct flb_systemd_config *ctx)
{
/* Close context */
if (ctx->j) {
sd_journal_close(ctx->j);
}
if (ctx->path) {
flb_free(ctx->path);
}
if (ctx->db) {
flb_systemd_db_close(ctx->db);
}
close(ctx->ch_manager[0]);
close(ctx->ch_manager[1]);
flb_free(ctx);
return 0;
}
| 1 | 8,798 | would you please use: flb_utils_bool(...) here ?, that function wraps the on/off/true/false stuff. | fluent-fluent-bit | c |
@@ -178,7 +178,8 @@ namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
analyzeOptions.Verbose,
targets,
analyzeOptions.ComputeTargetsHash,
- Prerelease)),
+ Prerelease,
+ null)),
(ex) =>
{
Errors.LogExceptionCreatingLogFile(context, filePath, ex); | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections.Generic;
using System.IO;
using Microsoft.CodeAnalysis.Sarif.Sdk;
namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
{
public abstract class AnalyzeCommandBase<TContext, TOptions> : PlugInDriverCommand<TOptions>
where TContext : IAnalysisContext, new()
where TOptions : IAnalyzeOptions
{
private TContext rootContext;
public Exception ExecutionException { get; set; }
public RuntimeConditions RuntimeErrors { get; set; }
public static bool RaiseUnhandledExceptionInDriverCode { get; set; }
public override int Run(TOptions analyzeOptions)
{
// 0. Initialize an common logger that drives all outputs. This
// object drives logging for console, statistics, etc.
using (AggregatingLogger logger = InitializeLogger(analyzeOptions))
{
try
{
Analyze(analyzeOptions, logger);
}
catch (ExitApplicationException<ExitReason> ex)
{
// These exceptions have already been logged
ExecutionException = ex;
return FAILURE;
}
catch (Exception ex)
{
// These exceptions escaped our net and must be logged here
RuntimeErrors |= Errors.LogUnhandledEngineException(this.rootContext, ex);
ExecutionException = ex;
return FAILURE;
}
finally
{
logger.AnalysisStopped(RuntimeErrors);
}
}
return ((RuntimeErrors & RuntimeConditions.Fatal) == RuntimeConditions.NoErrors) ? SUCCESS : FAILURE;
}
private void Analyze(TOptions analyzeOptions, AggregatingLogger logger)
{
// 0. Log analysis initiation
logger.AnalysisStarted();
// 1. Create our configuration property bag, which will be
// shared with all rules during analysis
PropertyBag policy = CreateConfigurationFromOptions(analyzeOptions);
// 2. Create context object to pass to skimmers. The logger
// and configuration objects are common to all context
// instances and will be passed on again for analysis.
this.rootContext = CreateContext(analyzeOptions, logger, policy, RuntimeErrors);
// 3. Produce a comprehensive set of analysis targets
HashSet<string> targets = CreateTargetsSet(analyzeOptions);
// 4. Proactively validate that we can locate and
// access all analysis targets. Helper will return
// a list that potentially filters out files which
// did not exist, could not be accessed, etc.
targets = ValidateTargetsExist(this.rootContext, targets);
// 5. Initialize report file, if configured.
InitializeOutputFile(analyzeOptions, this.rootContext, targets);
// 6. Instantiate skimmers.
HashSet<ISkimmer<TContext>> skimmers = CreateSkimmers(this.rootContext);
// 7. Initialize skimmers. Initialize occurs a single time only.
skimmers = InitializeSkimmers(skimmers, this.rootContext);
// 8. Run all analysis
AnalyzeTargets(analyzeOptions, skimmers, this.rootContext, targets);
// 9. For test purposes, raise an unhandled exception if indicated
if (RaiseUnhandledExceptionInDriverCode)
{
throw new InvalidOperationException(this.GetType().Name);
}
}
internal AggregatingLogger InitializeLogger(IAnalyzeOptions analyzeOptions)
{
var logger = new AggregatingLogger();
logger.Loggers.Add(new ConsoleLogger(analyzeOptions.Verbose));
if (analyzeOptions.Statistics)
{
logger.Loggers.Add(new StatisticsLogger());
}
return logger;
}
private static HashSet<string> CreateTargetsSet(TOptions analyzeOptions)
{
HashSet<string> targets = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
foreach (string specifier in analyzeOptions.TargetFileSpecifiers)
{
string normalizedSpecifier = specifier;
Uri uri;
if (Uri.TryCreate(specifier, UriKind.RelativeOrAbsolute, out uri))
{
if (uri.IsAbsoluteUri && (uri.IsFile || uri.IsUnc))
{
normalizedSpecifier = uri.LocalPath;
}
}
// Currently, we do not filter on any extensions.
var fileSpecifier = new FileSpecifier(normalizedSpecifier, recurse: analyzeOptions.Recurse, filter: "*");
foreach (string file in fileSpecifier.Files) { targets.Add(file); }
}
return targets;
}
private HashSet<string> ValidateTargetsExist(TContext context, HashSet<string> targets)
{
if (targets.Count == 0)
{
Errors.LogNoValidAnalysisTargets(context);
ThrowExitApplicationException(context, ExitReason.NoValidAnalysisTargets);
}
return targets;
}
protected virtual TContext CreateContext(
TOptions options,
IAnalysisLogger logger,
PropertyBag policy,
RuntimeConditions runtimeErrors,
string filePath = null)
{
var context = new TContext();
context.Logger = logger;
context.Policy = policy;
context.RuntimeErrors = runtimeErrors;
if (filePath != null)
{
context.TargetUri = new Uri(filePath);
}
return context;
}
private void InitializeOutputFile(TOptions analyzeOptions, TContext context, HashSet<string> targets)
{
string filePath = analyzeOptions.OutputFilePath;
AggregatingLogger aggregatingLogger = (AggregatingLogger)context.Logger;
if (!string.IsNullOrEmpty(filePath))
{
InvokeCatchingRelevantIOExceptions
(
() => aggregatingLogger.Loggers.Add(
new SarifLogger(
analyzeOptions.OutputFilePath,
analyzeOptions.Verbose,
targets,
analyzeOptions.ComputeTargetsHash,
Prerelease)),
(ex) =>
{
Errors.LogExceptionCreatingLogFile(context, filePath, ex);
ThrowExitApplicationException(context, ExitReason.ExceptionCreatingLogFile, ex);
}
);
}
}
public void InvokeCatchingRelevantIOExceptions(Action action, Action<Exception> exceptionHandler)
{
try
{
action();
}
catch (UnauthorizedAccessException ex)
{
exceptionHandler(ex);
}
catch (IOException ex)
{
exceptionHandler(ex);
}
}
private HashSet<ISkimmer<TContext>> CreateSkimmers(TContext context)
{
IEnumerable<ISkimmer<TContext>> skimmers;
HashSet<ISkimmer<TContext>> result = new HashSet<ISkimmer<TContext>>();
try
{
skimmers = DriverUtilities.GetExports<ISkimmer<TContext>>(DefaultPlugInAssemblies);
foreach (ISkimmer<TContext> skimmer in skimmers)
{
result.Add(skimmer);
}
}
catch (Exception ex)
{
Errors.LogExceptionInstantiatingSkimmers(context, DefaultPlugInAssemblies, ex);
ThrowExitApplicationException(context, ExitReason.UnhandledExceptionInstantiatingSkimmers, ex);
}
if (result.Count == 0)
{
Errors.LogNoRulesLoaded(context);
ThrowExitApplicationException(context, ExitReason.NoRulesLoaded);
}
return result;
}
protected virtual void AnalyzeTargets(
TOptions options,
IEnumerable<ISkimmer<TContext>> skimmers,
TContext rootContext,
IEnumerable<string> targets)
{
HashSet<string> disabledSkimmers = new HashSet<string>();
foreach (string target in targets)
{
using (TContext context = DetermineApplicabilityAndAnalyze(options, skimmers, rootContext, target, disabledSkimmers))
{
RuntimeErrors |= context.RuntimeErrors;
}
}
}
protected virtual TContext DetermineApplicabilityAndAnalyze(
TOptions options,
IEnumerable<ISkimmer<TContext>> skimmers,
TContext rootContext,
string target,
HashSet<string> disabledSkimmers)
{
var context = CreateContext(options, rootContext.Logger, rootContext.Policy, rootContext.RuntimeErrors, target);
if (context.TargetLoadException != null)
{
Errors.LogExceptionLoadingTarget(context);
context.Dispose();
return context;
}
else if (!context.IsValidAnalysisTarget)
{
Notes.LogExceptionInvalidTarget(context);
context.Dispose();
return context;
}
context.Logger.AnalyzingTarget(context);
// Analyzing '{0}'...
IEnumerable<ISkimmer<TContext>> applicableSkimmers = DetermineApplicabilityForTarget(skimmers, context, disabledSkimmers);
AnalyzeTarget(applicableSkimmers, context, disabledSkimmers);
return context;
}
protected virtual void AnalyzeTarget(IEnumerable<ISkimmer<TContext>> skimmers, TContext context, HashSet<string> disabledSkimmers)
{
foreach (ISkimmer<TContext> skimmer in skimmers)
{
if (disabledSkimmers.Contains(skimmer.Id)) { continue; }
context.Rule = skimmer;
try
{
skimmer.Analyze(context);
}
catch (Exception ex)
{
RuntimeErrors |= Errors.LogUnhandledRuleExceptionAnalyzingTarget(disabledSkimmers, context, ex);
}
}
}
protected virtual IEnumerable<ISkimmer<TContext>> DetermineApplicabilityForTarget(
IEnumerable<ISkimmer<TContext>> skimmers,
TContext context,
HashSet<string> disabledSkimmers)
{
var candidateSkimmers = new List<ISkimmer<TContext>>();
foreach (ISkimmer<TContext> skimmer in skimmers)
{
if (disabledSkimmers.Contains(skimmer.Id)) { continue; }
string reasonForNotAnalyzing = null;
context.Rule = skimmer;
AnalysisApplicability applicability = AnalysisApplicability.Unknown;
try
{
applicability = skimmer.CanAnalyze(context, out reasonForNotAnalyzing);
}
catch (Exception ex)
{
Errors.LogUnhandledRuleExceptionAssessingTargetApplicability (disabledSkimmers, context, ex);
continue;
}
finally
{
RuntimeErrors |= context.RuntimeErrors;
}
switch (applicability)
{
case AnalysisApplicability.NotApplicableToSpecifiedTarget:
{
Notes.LogNotApplicableToSpecifiedTarget(context, reasonForNotAnalyzing);
break;
}
case AnalysisApplicability.NotApplicableDueToMissingConfiguration:
{
Errors.LogMissingRuleConfiguration(context, reasonForNotAnalyzing);
disabledSkimmers.Add(skimmer.Id);
break;
}
case AnalysisApplicability.ApplicableToSpecifiedTarget:
{
candidateSkimmers.Add(skimmer);
break;
}
}
}
return candidateSkimmers;
}
private void ThrowExitApplicationException(TContext context, ExitReason exitReason, Exception innerException = null)
{
RuntimeErrors |= context.RuntimeErrors;
throw new ExitApplicationException<ExitReason>(SdkResources.MSG_UnexpectedApplicationExit, innerException)
{
ExitReason = exitReason
};
}
protected virtual HashSet<ISkimmer<TContext>> InitializeSkimmers(HashSet<ISkimmer<TContext>> skimmers, TContext context)
{
HashSet<ISkimmer<TContext>> disabledSkimmers = new HashSet<ISkimmer<TContext>>();
// ONE-TIME initialization of skimmers. Do not call
// Initialize more than once per skimmer instantiation
foreach (ISkimmer<TContext> skimmer in skimmers)
{
try
{
context.Rule = skimmer;
skimmer.Initialize(context);
}
catch (Exception ex)
{
RuntimeErrors |= RuntimeConditions.ExceptionInSkimmerInitialize;
Errors.LogUnhandledExceptionInitializingRule(context, ex);
disabledSkimmers.Add(skimmer);
}
}
foreach (ISkimmer<TContext> disabledSkimmer in disabledSkimmers)
{
skimmers.Remove(disabledSkimmer);
}
return skimmers;
}
public virtual PropertyBag CreateConfigurationFromOptions(TOptions analyzeOptions)
{
PropertyBag configuration = null;
string configurationFilePath = analyzeOptions.ConfigurationFilePath;
if (!string.IsNullOrEmpty(configurationFilePath))
{
configuration = new PropertyBag();
if (!configurationFilePath.Equals("default", StringComparison.OrdinalIgnoreCase))
{
configuration.LoadFrom(configurationFilePath);
}
}
return configuration;
}
}
} | 1 | 10,209 | `invocationInfoTokensToRedact: null` (I really like the convention of using a named parameter whenever the value doesn't communicate the meaning, such as for literal values.) | microsoft-sarif-sdk | .cs |
@@ -61,6 +61,10 @@ class BaseDetector(nn.Module):
"""
pass
+ @abstractmethod
+ async def async_test(self, img, img_meta, **kwargs):
+ pass
+
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
pass | 1 | import logging
from abc import ABCMeta, abstractmethod
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch.nn as nn
from mmdet.core import auto_fp16, get_classes, tensor2imgs
class BaseDetector(nn.Module):
"""Base class for detectors"""
__metaclass__ = ABCMeta
def __init__(self):
super(BaseDetector, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
@property
def with_shared_head(self):
return hasattr(self, 'shared_head') and self.shared_head is not None
@property
def with_bbox(self):
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
return hasattr(self, 'mask_head') and self.mask_head is not None
@abstractmethod
def extract_feat(self, imgs):
pass
def extract_feats(self, imgs):
assert isinstance(imgs, list)
for img in imgs:
yield self.extract_feat(img)
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (list[Tensor]): list of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has:
'img_shape', 'scale_factor', 'flip', and my also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
**kwargs: specific to concrete implementation
"""
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
pass
def init_weights(self, pretrained=None):
if pretrained is not None:
logger = logging.getLogger()
logger.info('load model from: {}'.format(pretrained))
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_meta (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(
'num of augmentations ({}) != num of image meta ({})'.format(
len(imgs), len(img_metas)))
# TODO: remove the restriction of imgs_per_gpu == 1 when prepared
imgs_per_gpu = imgs[0].size(0)
assert imgs_per_gpu == 1
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_meta, return_loss=True, **kwargs):
"""
Calls either forward_train or forward_test depending on whether
return_loss=True. Note this setting will change the expected inputs.
When `return_loss=False`, img and img_meta are single-nested (i.e.
Tensor and List[dict]), and when `resturn_loss=True`, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_meta, **kwargs)
else:
return self.forward_test(img, img_meta, **kwargs)
def show_result(self, data, result, dataset=None, score_thr=0.3):
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
img_tensor = data['img'][0]
img_metas = data['img_meta'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
if dataset is None:
class_names = self.CLASSES
elif isinstance(dataset, str):
class_names = get_classes(dataset)
elif isinstance(dataset, (list, tuple)):
class_names = dataset
else:
raise TypeError(
'dataset must be a valid dataset name or a sequence'
' of class names, not {}'.format(type(dataset)))
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img_show,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr)
| 1 | 18,259 | maybe renamed to `async_simple_test` if we consider supporting aug test later on? | open-mmlab-mmdetection | py |
@@ -113,7 +113,7 @@ module Mongoid #:nodoc:
# @return [ Document ] A new document.
def initialize(attrs = nil)
@new_record = true
- @attributes = default_attributes
+ @attributes = apply_default_attributes
process(attrs) do |document|
yield self if block_given?
identify | 1 | # encoding: utf-8
module Mongoid #:nodoc:
# This is the base module for all domain objects that need to be persisted to
# the database as documents.
module Document
extend ActiveSupport::Concern
include Mongoid::Components
include Mongoid::MultiDatabase
included do
attr_reader :new_record
end
# Default comparison is via the string version of the id.
#
# @example Compare two documents.
# person <=> other_person
#
# @param [ Document ] other The document to compare with.
#
# @return [ Integer ] -1, 0, 1.
def <=>(other)
id.to_s <=> other.id.to_s
end
# Performs equality checking on the document ids. For more robust
# equality checking please override this method.
#
# @example Compare for equality.
# document == other
#
# @param [ Document, Object ] other The other object to compare with.
#
# @return [ true, false ] True if the ids are equal, false if not.
def ==(other)
return false unless other.is_a?(Document)
id == other.id || equal?(other)
end
# Performs class equality checking.
#
# @example Compare the classes.
# document === other
#
# @param [ Document, Object ] other The other object to compare with.
#
# @return [ true, false ] True if the classes are equal, false if not.
def ===(other)
self.class == other.class
end
# Delegates to ==. Used when needing checks in hashes.
#
# @example Perform equality checking.
# document.eql?(other)
#
# @param [ Document, Object ] other The object to check against.
#
# @return [ true, false ] True if equal, false if not.
def eql?(other)
self == (other)
end
# Delegates to id in order to allow two records of the same type and id to
# work with something like:
#
# [ Person.find(1), Person.find(2), Person.find(3) ] &
# [ Person.find(1), Person.find(4) ] # => [ Person.find(1) ]
#
# @example Get the hash.
# document.hash
#
# @return [ Integer ] The hash of the document's id.
def hash
id.hash
end
# Return the attributes hash with indifferent access. Used mostly for
# convenience - use +Document#raw_attributes+ where you dont care if the
# keys are all strings.
#
# @example Get the attributes.
# person.attributes
#
# @return [ HashWithIndifferentAccess ] The attributes.
def attributes
@attributes.with_indifferent_access
end
# Generate an id for this +Document+.
#
# @example Create the id.
# person.identify
#
# @return [ BSON::ObjectId, String ] A newly created id.
def identify
Identity.new(self).create
end
# Instantiate a new +Document+, setting the Document's attributes if
# given. If no attributes are provided, they will be initialized with
# an empty +Hash+.
#
# If a primary key is defined, the document's id will be set to that key,
# otherwise it will be set to a fresh +BSON::ObjectId+ string.
#
# @example Create a new document.
# Person.new(:title => "Sir")
#
# @param [ Hash ] attrs The attributes to set up the document with.
#
# @return [ Document ] A new document.
def initialize(attrs = nil)
@new_record = true
@attributes = default_attributes
process(attrs) do |document|
yield self if block_given?
identify
end
run_callbacks(:initialize) { self }
end
# Return the attributes hash.
#
# @example Get the untouched attributes.
# person.raw_attributes
#
# @return [ Hash ] This document's attributes.
def raw_attributes
@attributes
end
# Reloads the +Document+ attributes from the database. If the document has
# not been saved then an error will get raised if the configuration option
# was set.
#
# @example Reload the document.
# person.reload
#
# @raise [ Errors::DocumentNotFound ] If the document was deleted.
#
# @return [ Document ] The document, reloaded.
def reload
reloaded = collection.find_one(:_id => id)
if Mongoid.raise_not_found_error
raise Errors::DocumentNotFound.new(self.class, id) if reloaded.nil?
end
@attributes = {}.merge(reloaded || {})
reset_modifications
tap do
relations.keys.each do |name|
if instance_variable_defined?("@#{name}")
remove_instance_variable("@#{name}")
end
end
end
end
# Remove a child document from this parent. If an embeds one then set to
# nil, otherwise remove from the embeds many.
#
# This is called from the +RemoveEmbedded+ persistence command.
#
# @example Remove the child.
# document.remove_child(child)
#
# @param [ Document ] child The child (embedded) document to remove.
def remove_child(child)
name = child.metadata.name
if child.embedded_one?
remove_instance_variable("@#{name}") if instance_variable_defined?("@#{name}")
else
send(name).delete(child)
end
end
# Return an array with this +Document+ only in it.
#
# @example Return the document in an array.
# document.to_a
#
# @return [ Array<Document> ] An array with the document as its only item.
def to_a
[ self ]
end
# Return a hash of the entire document hierarchy from this document and
# below. Used when the attributes are needed for everything and not just
# the current document.
#
# @example Get the full hierarchy.
# person.as_document
#
# @return [ Hash ] A hash of all attributes in the hierarchy.
def as_document
attributes = @attributes
attributes.tap do |attrs|
relations.select { |name, meta| meta.embedded? }.each do |name, meta|
relation = send(name, false, :continue => false)
attrs[name] = relation.as_document unless relation.blank?
end
end
end
module ClassMethods #:nodoc:
# Performs class equality checking.
#
# @example Compare the classes.
# document === other
#
# @param [ Document, Object ] other The other object to compare with.
#
# @return [ true, false ] True if the classes are equal, false if not.
#
# @since 2.0.0.rc.4
def ===(other)
self == (other.is_a?(Class) ? other : other.class)
end
# Instantiate a new object, only when loaded from the database or when
# the attributes have already been typecast.
#
# @example Create the document.
# Person.instantiate(:title => "Sir", :age => 30)
#
# @param [ Hash ] attrs The hash of attributes to instantiate with.
#
# @return [ Document ] A new document.
def instantiate(attrs = nil)
attributes = attrs || {}
if attributes["_id"]
allocate.tap do |doc|
doc.instance_variable_set(:@attributes, attributes)
doc.setup_modifications
end
else
new(attrs)
end
end
# Returns all types to query for when using this class as the base.
#
# @example Get the types.
# document._types
#
# @return [ Array<Class> ] All subclasses of the current document.
def _types
@_type ||= [descendants + [self]].flatten.uniq.map(&:to_s)
end
# Set the i18n scope to overwrite ActiveModel.
def i18n_scope
:mongoid
end
end
end
end
| 1 | 8,696 | So where is default_attributes now? Is it used anywhere else still? If not, can it be removed along with any tests of it? | mongodb-mongoid | rb |
@@ -42,9 +42,9 @@ namespace OpenTelemetry.Metrics.Aggregators
{
return new DoubleSumData
{
- StartTimestamp = new DateTime(this.GetLastStartTimestamp().Ticks),
+ StartTimestamp = new DateTime(this.GetLastStartTimestamp().Ticks, DateTimeKind.Utc),
Sum = this.checkPoint,
- Timestamp = new DateTime(this.GetLastEndTimestamp().Ticks),
+ Timestamp = new DateTime(this.GetLastEndTimestamp().Ticks, DateTimeKind.Utc),
};
}
| 1 | // <copyright file="DoubleCounterSumAggregator.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Threading;
using OpenTelemetry.Metrics.Export;
namespace OpenTelemetry.Metrics.Aggregators
{
/// <summary>
/// Basic aggregator which calculates a Sum from individual measurements.
/// </summary>
[Obsolete("Metrics API/SDK is not recommended for production. See https://github.com/open-telemetry/opentelemetry-dotnet/issues/1501 for more information on metrics support.")]
public class DoubleCounterSumAggregator : Aggregator<double>
{
private double sum;
private double checkPoint;
/// <inheritdoc/>
public override void Checkpoint()
{
// checkpoints the current running sum into checkpoint, and starts counting again.
base.Checkpoint();
this.checkPoint = Interlocked.Exchange(ref this.sum, 0.0);
}
/// <inheritdoc/>
public override MetricData ToMetricData()
{
return new DoubleSumData
{
StartTimestamp = new DateTime(this.GetLastStartTimestamp().Ticks),
Sum = this.checkPoint,
Timestamp = new DateTime(this.GetLastEndTimestamp().Ticks),
};
}
/// <inheritdoc/>
public override AggregationType GetAggregationType()
{
return AggregationType.DoubleSum;
}
/// <inheritdoc/>
public override void Update(double value)
{
// Adds value to the running total in a thread safe manner.
double initialTotal, computedTotal;
do
{
initialTotal = this.sum;
computedTotal = initialTotal + value;
}
while (initialTotal != Interlocked.CompareExchange(ref this.sum, computedTotal, initialTotal));
}
}
}
| 1 | 18,743 | We could move this to the base class and remove all these changes. What do you think? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -13,6 +13,7 @@ feature "subscriber requests access to beta trail" do
click_on "Request Access"
+ expect(page).to have_content(I18n.t("beta.replies.flashes.success"))
expect(page).not_to have_content("Exciting Beta Trail")
end
end | 1 | require "rails_helper"
feature "subscriber requests access to beta trail" do
scenario "gets added to database" do
user = create(:subscriber)
trail = create(:trail)
create(:status, user: user, completeable: trail, state: Status::COMPLETE)
create(:beta_offer, name: "Exciting Beta Trail")
visit practice_path(as: user)
expect(page).to have_content("Exciting Beta Trail")
click_on "Request Access"
expect(page).not_to have_content("Exciting Beta Trail")
end
end
| 1 | 16,255 | While we're doing some translation work, could you extract this button text and the other static text from the view? | thoughtbot-upcase | rb |
@@ -181,6 +181,12 @@ public class JobCallbackUtil {
String headersKeyPerSequence =
headersKey.replace(SEQUENCE_TOKEN, sequenceStr);
String headersValue = props.get(headersKeyPerSequence);
+
+ // replace all the tokens in the header
+ if(headersValue != null) {
+ headersValue = replaceTokens(headersValue, contextInfo, false);
+ }
+
privateLogger.info("headers: " + headersValue);
Header[] headers = parseHttpHeaders(headersValue);
if (headers != null) { | 1 | package azkaban.execapp.event;
import static azkaban.jobcallback.JobCallbackConstants.CONTEXT_EXECUTION_ID_TOKEN;
import static azkaban.jobcallback.JobCallbackConstants.FIRST_JOB_CALLBACK_URL_TEMPLATE;
import static azkaban.jobcallback.JobCallbackConstants.CONTEXT_FLOW_TOKEN;
import static azkaban.jobcallback.JobCallbackConstants.HEADER_ELEMENT_DELIMITER;
import static azkaban.jobcallback.JobCallbackConstants.HEADER_NAME_VALUE_DELIMITER;
import static azkaban.jobcallback.JobCallbackConstants.HTTP_GET;
import static azkaban.jobcallback.JobCallbackConstants.HTTP_POST;
import static azkaban.jobcallback.JobCallbackConstants.JOB_CALLBACK_BODY_TEMPLATE;
import static azkaban.jobcallback.JobCallbackConstants.JOB_CALLBACK_REQUEST_HEADERS_TEMPLATE;
import static azkaban.jobcallback.JobCallbackConstants.JOB_CALLBACK_REQUEST_METHOD_TEMPLATE;
import static azkaban.jobcallback.JobCallbackConstants.JOB_CALLBACK_URL_TEMPLATE;
import static azkaban.jobcallback.JobCallbackConstants.CONTEXT_JOB_STATUS_TOKEN;
import static azkaban.jobcallback.JobCallbackConstants.CONTEXT_JOB_TOKEN;
import static azkaban.jobcallback.JobCallbackConstants.CONTEXT_PROJECT_TOKEN;
import static azkaban.jobcallback.JobCallbackConstants.SEQUENCE_TOKEN;
import static azkaban.jobcallback.JobCallbackConstants.CONTEXT_SERVER_TOKEN;
import static azkaban.jobcallback.JobCallbackConstants.STATUS_TOKEN;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.http.Header;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.apache.log4j.Logger;
import azkaban.event.Event;
import azkaban.execapp.JobRunner;
import azkaban.executor.ExecutableNode;
import azkaban.jobcallback.JobCallbackStatusEnum;
import azkaban.utils.Props;
public class JobCallbackUtil {
private static final Logger logger = Logger.getLogger(JobCallbackUtil.class);
private static Map<JobCallbackStatusEnum, String> firstJobcallbackPropertyMap =
new HashMap<JobCallbackStatusEnum, String>(
JobCallbackStatusEnum.values().length);
static {
for (JobCallbackStatusEnum statusEnum : JobCallbackStatusEnum.values()) {
firstJobcallbackPropertyMap.put(statusEnum,
replaceStatusToken(FIRST_JOB_CALLBACK_URL_TEMPLATE, statusEnum));
}
}
/**
* Use to quickly determine if there is a job callback related property in the
* Props.
*
* @param props
* @param status
* @return true if there is job callback related property
*/
public static boolean isThereJobCallbackProperty(Props props,
JobCallbackStatusEnum status) {
if (props == null || status == null) {
throw new NullPointerException("One of the argument is null");
}
String jobCallBackUrl = firstJobcallbackPropertyMap.get(status);
return props.containsKey(jobCallBackUrl);
}
public static boolean isThereJobCallbackProperty(Props props,
JobCallbackStatusEnum... jobStatuses) {
if (props == null || jobStatuses == null) {
throw new NullPointerException("One of the argument is null");
}
for (JobCallbackStatusEnum jobStatus : jobStatuses) {
if (JobCallbackUtil.isThereJobCallbackProperty(props, jobStatus)) {
return true;
}
}
return false;
}
public static List<HttpRequestBase> parseJobCallbackProperties(Props props,
JobCallbackStatusEnum status, Map<String, String> contextInfo,
int maxNumCallback) {
return parseJobCallbackProperties(props, status, contextInfo,
maxNumCallback, logger);
}
/**
* This method is responsible for parsing job call URL properties and convert
* them into a list of HttpRequestBase, which callers can use to execute.
*
* In addition to parsing, it will also replace the tokens with actual values.
*
* @param props
* @param status
* @param event
* @return List<HttpRequestBase> - empty if no job callback related properties
*/
public static List<HttpRequestBase> parseJobCallbackProperties(Props props,
JobCallbackStatusEnum status, Map<String, String> contextInfo,
int maxNumCallback, Logger privateLogger) {
String callbackUrl = null;
if (!isThereJobCallbackProperty(props, status)) {
// short circuit
return Collections.emptyList();
}
List<HttpRequestBase> result = new ArrayList<HttpRequestBase>();
// replace property templates with status
String jobCallBackUrlKey =
replaceStatusToken(JOB_CALLBACK_URL_TEMPLATE, status);
String requestMethod =
replaceStatusToken(JOB_CALLBACK_REQUEST_METHOD_TEMPLATE, status);
String httpBodyKey = replaceStatusToken(JOB_CALLBACK_BODY_TEMPLATE, status);
String headersKey =
replaceStatusToken(JOB_CALLBACK_REQUEST_HEADERS_TEMPLATE, status);
for (int sequence = 1; sequence <= maxNumCallback; sequence++) {
HttpRequestBase httpRequest = null;
String sequenceStr = Integer.toString(sequence);
// callback url
String callbackUrlKey =
jobCallBackUrlKey.replace(SEQUENCE_TOKEN, sequenceStr);
callbackUrl = props.get(callbackUrlKey);
if (callbackUrl == null || callbackUrl.length() == 0) {
// no more needs to done
break;
} else {
String callbackUrlWithTokenReplaced =
replaceTokens(callbackUrl, contextInfo, true);
String requestMethodKey =
requestMethod.replace(SEQUENCE_TOKEN, sequenceStr);
String method = props.getString(requestMethodKey, HTTP_GET);
if (HTTP_POST.equals(method)) {
String postBodyKey = httpBodyKey.replace(SEQUENCE_TOKEN, sequenceStr);
String httpBodyValue = props.get(postBodyKey);
if (httpBodyValue == null) {
// missing body for POST, not good
// update the wiki about skipping callback url if body is missing
privateLogger.warn("Missing value for key: " + postBodyKey
+ " skipping job callback '" + callbackUrl + " for job "
+ contextInfo.get(CONTEXT_JOB_TOKEN));
} else {
// put together an URL
HttpPost httpPost = new HttpPost(callbackUrlWithTokenReplaced);
String postActualBody =
replaceTokens(httpBodyValue, contextInfo, false);
privateLogger.info("postActualBody: " + postActualBody);
httpPost.setEntity(createStringEntity(postActualBody));
httpRequest = httpPost;
}
} else if (HTTP_GET.equals(method)) {
// GET
httpRequest = new HttpGet(callbackUrlWithTokenReplaced);
} else {
privateLogger.warn("Unsupported request method: " + method
+ ". Only POST and GET are supported");
}
String headersKeyPerSequence =
headersKey.replace(SEQUENCE_TOKEN, sequenceStr);
String headersValue = props.get(headersKeyPerSequence);
privateLogger.info("headers: " + headersValue);
Header[] headers = parseHttpHeaders(headersValue);
if (headers != null) {
httpRequest.setHeaders(headers);
privateLogger.info("# of headers found: " + headers.length);
}
result.add(httpRequest);
}
}
return result;
}
/**
* Parse headers
*
* @param headers
* @return null if headers is null or empty
*/
public static Header[] parseHttpHeaders(String headers) {
if (headers == null || headers.length() == 0) {
return null;
}
String[] headerArray = headers.split(HEADER_ELEMENT_DELIMITER);
List<Header> headerList = new ArrayList<Header>(headerArray.length);
for (int i = 0; i < headerArray.length; i++) {
String headerPair = headerArray[i];
int index = headerPair.indexOf(HEADER_NAME_VALUE_DELIMITER);
if (index != -1) {
headerList.add(new BasicHeader(headerPair.substring(0, index),
headerPair.substring(index + 1)));
}
}
return headerList.toArray(new BasicHeader[0]);
}
private static String replaceStatusToken(String template,
JobCallbackStatusEnum status) {
return template.replaceFirst(STATUS_TOKEN, status.name().toLowerCase());
}
private static StringEntity createStringEntity(String str) {
try {
return new StringEntity(str);
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("Encoding not supported", e);
}
}
/**
* This method takes the job context info. and put the values into a map with
* keys as the tokens.
*
* @param event
* @return Map<String,String>
*/
public static Map<String, String> buildJobContextInfoMap(Event event,
String server) {
if (event.getRunner() instanceof JobRunner) {
JobRunner jobRunner = (JobRunner) event.getRunner();
ExecutableNode node = jobRunner.getNode();
String projectName = node.getParentFlow().getProjectName();
String flowName = node.getParentFlow().getFlowId();
String executionId =
String.valueOf(node.getParentFlow().getExecutionId());
String jobId = node.getId();
Map<String, String> result = new HashMap<String, String>();
result.put(CONTEXT_SERVER_TOKEN, server);
result.put(CONTEXT_PROJECT_TOKEN, projectName);
result.put(CONTEXT_FLOW_TOKEN, flowName);
result.put(CONTEXT_EXECUTION_ID_TOKEN, executionId);
result.put(CONTEXT_JOB_TOKEN, jobId);
result.put(CONTEXT_JOB_STATUS_TOKEN, node.getStatus().name().toLowerCase());
/*
* if (node.getStatus() == Status.SUCCEEDED || node.getStatus() ==
* Status.FAILED) { result.put(JOB_STATUS_TOKEN,
* node.getStatus().name().toLowerCase()); } else if (node.getStatus() ==
* Status.PREPARING) { result.put(JOB_STATUS_TOKEN, "started"); }
*/
return result;
} else {
throw new IllegalArgumentException("Provided event is not a job event");
}
}
/**
* Replace the supported tokens in the URL with values in the contextInfo.
* This will also make sure the values are HTTP encoded.
*
* @param value
* @param contextInfo
* @param withEncoding - whether the token values will be HTTP encoded
* @return String - value with tokens replaced with values
*/
public static String replaceTokens(String value,
Map<String, String> contextInfo, boolean withEncoding) {
String result = value;
String tokenValue =
encodeQueryParam(contextInfo.get(CONTEXT_SERVER_TOKEN), withEncoding);
result = result.replaceFirst(Pattern.quote(CONTEXT_SERVER_TOKEN), tokenValue);
tokenValue = encodeQueryParam(contextInfo.get(CONTEXT_PROJECT_TOKEN), withEncoding);
result = result.replaceFirst(Pattern.quote(CONTEXT_PROJECT_TOKEN), tokenValue);
tokenValue = encodeQueryParam(contextInfo.get(CONTEXT_FLOW_TOKEN), withEncoding);
result = result.replaceFirst(Pattern.quote(CONTEXT_FLOW_TOKEN), tokenValue);
tokenValue = encodeQueryParam(contextInfo.get(CONTEXT_JOB_TOKEN), withEncoding);
result = result.replaceFirst(Pattern.quote(CONTEXT_JOB_TOKEN), tokenValue);
tokenValue =
encodeQueryParam(contextInfo.get(CONTEXT_EXECUTION_ID_TOKEN), withEncoding);
result = result.replaceFirst(Pattern.quote(CONTEXT_EXECUTION_ID_TOKEN), tokenValue);
tokenValue =
encodeQueryParam(contextInfo.get(CONTEXT_JOB_STATUS_TOKEN), withEncoding);
result = result.replaceFirst(Pattern.quote(CONTEXT_JOB_STATUS_TOKEN), tokenValue);
return result;
}
private static String encodeQueryParam(String str, boolean withEncoding) {
if (!withEncoding) {
return str;
}
try {
return URLEncoder.encode(str, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalArgumentException(
"Encountered problem during encoding:", e);
}
}
}
| 1 | 10,904 | Nit: Please fix indentation (note: Azkaban uses 2 spaces and not tabs). | azkaban-azkaban | java |
@@ -60,7 +60,7 @@ module.exports = {
TestCase.assertEqual(defaultRealm.schemaVersion, 0);
TestCase.assertThrows(function() {
- new Realm({schemaVersion: 1});
+ new Realm({schemaVersion: 1, schema: []});
}, "Realm already opened at a different schema version");
TestCase.assertEqual(new Realm().schemaVersion, 0); | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
'use strict';
var Realm = require('realm');
var TestCase = require('./asserts');
var schemas = require('./schemas');
module.exports = {
testRealmConstructor: function() {
var realm = new Realm({schema: []});
TestCase.assertTrue(realm instanceof Realm);
TestCase.assertEqual(typeof Realm, 'function');
TestCase.assertTrue(Realm instanceof Function);
},
testRealmConstructorPath: function() {
TestCase.assertThrows(function() {
new Realm('');
}, 'Realm cannot be created with an invalid path');
TestCase.assertThrows(function() {
new Realm('test1.realm', 'invalidArgument');
}, 'Realm constructor can only have 0 or 1 argument(s)');
var defaultRealm = new Realm({schema: []});
TestCase.assertEqual(defaultRealm.path, Realm.defaultPath);
var defaultRealm2 = new Realm();
TestCase.assertEqual(defaultRealm2.path, Realm.defaultPath);
var defaultDir = Realm.defaultPath.substring(0, Realm.defaultPath.lastIndexOf("/") + 1)
var testPath = 'test1.realm';
var realm = new Realm({schema: [], path: testPath});
TestCase.assertEqual(realm.path, defaultDir + testPath);
var testPath2 = 'test2.realm';
var realm2 = new Realm({schema: [], path: testPath2});
TestCase.assertEqual(realm2.path, defaultDir + testPath2);
},
testRealmConstructorSchemaVersion: function() {
var defaultRealm = new Realm({schema: []});
TestCase.assertEqual(defaultRealm.schemaVersion, 0);
TestCase.assertThrows(function() {
new Realm({schemaVersion: 1});
}, "Realm already opened at a different schema version");
TestCase.assertEqual(new Realm().schemaVersion, 0);
TestCase.assertEqual(new Realm({schemaVersion: 0}).schemaVersion, 0);
var realm = new Realm({path: 'test1.realm', schema: [], schemaVersion: 1});
TestCase.assertEqual(realm.schemaVersion, 1);
TestCase.assertEqual(realm.schema.length, 0);
realm.close();
// FIXME - enable once realm initialization supports schema comparison
// TestCase.assertThrows(function() {
// realm = new Realm({path: testPath, schema: [schemas.TestObject], schemaVersion: 1});
// }, "schema changes require updating the schema version");
realm = new Realm({path: 'test1.realm', schema: [schemas.TestObject], schemaVersion: 2});
realm.write(function() {
realm.create('TestObject', {doubleCol: 1});
});
TestCase.assertEqual(realm.objects('TestObject')[0].doubleCol, 1);
TestCase.assertEqual(realm.schemaVersion, 2);
TestCase.assertEqual(realm.schema.length, 1);
},
testRealmConstructorDynamicSchema: function() {
var realm = new Realm({schema: [schemas.TestObject]});
realm.write(function() {
realm.create('TestObject', [1])
});
realm.close();
realm = new Realm();
var objects = realm.objects('TestObject');
TestCase.assertEqual(objects.length, 1);
TestCase.assertEqual(objects[0].doubleCol, 1.0);
},
testRealmConstructorSchemaValidation: function() {
TestCase.assertThrows(function() {
new Realm({schema: schemas.AllTypes});
}, 'The schema should be an array');
TestCase.assertThrows(function() {
new Realm({schema: ['SomeType']});
}, 'The schema should be an array of objects');
TestCase.assertThrows(function() {
new Realm({schema: [{}]});
}, 'The schema should be an array of ObjectSchema objects');
TestCase.assertThrows(function() {
new Realm({schema: [{name: 'SomeObject'}]});
}, 'The schema should be an array of ObjectSchema objects');
TestCase.assertThrows(function() {
new Realm({schema: [{properties: {intCol: 'int'}}]});
}, 'The schema should be an array of ObjectSchema objects');
},
testRealmConstructorReadOnly: function() {
var realm = new Realm({schema: [schemas.TestObject]});
realm.write(function() {
realm.create('TestObject', [1])
});
TestCase.assertEqual(realm.readOnly, false);
realm.close();
realm = new Realm({readOnly: true, schema: [schemas.TestObject]});
var objects = realm.objects('TestObject');
TestCase.assertEqual(objects.length, 1);
TestCase.assertEqual(objects[0].doubleCol, 1.0);
TestCase.assertEqual(realm.readOnly, true);
TestCase.assertThrows(function() {
realm.write(function() {});
});
realm.close();
realm = new Realm({readOnly: true});
TestCase.assertEqual(realm.schema.length, 1);
TestCase.assertEqual(realm.readOnly, true);
},
testDefaultPath: function() {
var defaultPath = Realm.defaultPath;
var defaultRealm = new Realm({schema: []});
TestCase.assertEqual(defaultRealm.path, Realm.defaultPath);
try {
var newPath = Realm.defaultPath.substring(0, defaultPath.lastIndexOf('/') + 1) + 'default2.realm';
Realm.defaultPath = newPath;
defaultRealm = new Realm({schema: []});
TestCase.assertEqual(defaultRealm.path, newPath, "should use updated default realm path");
TestCase.assertEqual(Realm.defaultPath, newPath, "defaultPath should have been updated");
} finally {
Realm.defaultPath = defaultPath;
}
},
testRealmSchemaVersion: function() {
TestCase.assertEqual(Realm.schemaVersion(Realm.defaultPath), -1);
var realm = new Realm({schema: []});
TestCase.assertEqual(realm.schemaVersion, 0);
TestCase.assertEqual(Realm.schemaVersion(Realm.defaultPath), 0);
realm = new Realm({schema: [], schemaVersion: 2, path: 'another.realm'});
TestCase.assertEqual(realm.schemaVersion, 2);
TestCase.assertEqual(Realm.schemaVersion('another.realm'), 2);
var encryptionKey = new Int8Array(64);
realm = new Realm({schema: [], schemaVersion: 3, path: 'encrypted.realm', encryptionKey: encryptionKey});
TestCase.assertEqual(realm.schemaVersion, 3);
TestCase.assertEqual(Realm.schemaVersion('encrypted.realm', encryptionKey), 3);
TestCase.assertThrows(function() {
Realm.schemaVersion('encrypted.realm', encryptionKey, 'extra');
});
TestCase.assertThrows(function() {
Realm.schemaVersion('encrypted.realm', 'asdf');
});
},
testRealmWrite: function() {
var realm = new Realm({schema: [schemas.IntPrimary, schemas.AllTypes, schemas.TestObject]});
// exceptions should be propogated
TestCase.assertThrows(function() {
realm.write(function() {
realm.invalid();
});
});
// writes should be possible after caught exception
realm.write(function() {
realm.create('TestObject', {doubleCol: 1});
});
TestCase.assertEqual(1, realm.objects('TestObject').length);
realm.write(function() {
// nested transactions not supported
TestCase.assertThrows(function() {
realm.write(function() {});
});
});
},
testRealmCreate: function() {
var realm = new Realm({schema: [schemas.TestObject]});
TestCase.assertThrows(function() {
realm.create('TestObject', {doubleCol: 1});
}, 'can only create inside a write transaction');
realm.write(function() {
realm.create('TestObject', {doubleCol: 1});
realm.create('TestObject', {doubleCol: 2});
});
var objects = realm.objects('TestObject');
TestCase.assertEqual(objects.length, 2, 'wrong object count');
TestCase.assertEqual(objects[0].doubleCol, 1, 'wrong object property value');
TestCase.assertEqual(objects[1].doubleCol, 2, 'wrong object property value');
},
testRealmCreatePrimaryKey: function() {
var realm = new Realm({schema: [schemas.IntPrimary]});
realm.write(function() {
var obj0 = realm.create('IntPrimaryObject', {
primaryCol: 0,
valueCol: 'val0',
});
TestCase.assertThrows(function() {
realm.create('IntPrimaryObject', {
primaryCol: 0,
valueCol: 'val0',
});
}, 'cannot create object with conflicting primary key');
realm.create('IntPrimaryObject', {
primaryCol: 1,
valueCol: 'val1',
}, true);
var objects = realm.objects('IntPrimaryObject');
TestCase.assertEqual(objects.length, 2);
realm.create('IntPrimaryObject', {
primaryCol: 0,
valueCol: 'newVal0',
}, true);
TestCase.assertEqual(obj0.valueCol, 'newVal0');
TestCase.assertEqual(objects.length, 2);
realm.create('IntPrimaryObject', {primaryCol: 0}, true);
TestCase.assertEqual(obj0.valueCol, 'newVal0');
});
},
testRealmCreateOptionals: function() {
var realm = new Realm({schema: [schemas.NullableBasicTypes, schemas.LinkTypes, schemas.TestObject]});
var basic, links;
realm.write(function() {
basic = realm.create('NullableBasicTypesObject', {});
links = realm.create('LinkTypesObject', {});
});
for (var name in schemas.NullableBasicTypes.properties) {
TestCase.assertEqual(basic[name], null);
}
TestCase.assertEqual(links.objectCol, null);
TestCase.assertEqual(links.arrayCol.length, 0);
},
testRealmCreateUpsert: function() {
var realm = new Realm({schema: [schemas.IntPrimary, schemas.StringPrimary, schemas.AllTypes, schemas.TestObject]});
realm.write(function() {
var values = {
primaryCol: '0',
boolCol: true,
intCol: 1,
floatCol: 1.1,
doubleCol: 1.11,
stringCol: '1',
dateCol: new Date(1),
dataCol: new ArrayBuffer(1),
objectCol: {doubleCol: 1},
arrayCol: [],
};
var obj0 = realm.create('AllTypesObject', values);
TestCase.assertThrows(function() {
realm.create('AllTypesObject', values);
}, 'cannot create object with conflicting primary key');
var obj1 = realm.create('AllTypesObject', {
primaryCol: '1',
boolCol: false,
intCol: 2,
floatCol: 2.2,
doubleCol: 2.22,
stringCol: '2',
dateCol: new Date(2),
dataCol: new ArrayBuffer(2),
objectCol: {doubleCol: 0},
arrayCol: [{doubleCol: 2}],
}, true);
var objects = realm.objects('AllTypesObject');
TestCase.assertEqual(objects.length, 2);
realm.create('AllTypesObject', {
primaryCol: '0',
boolCol: false,
intCol: 2,
floatCol: 2.2,
doubleCol: 2.22,
stringCol: '2',
dateCol: new Date(2),
dataCol: new ArrayBuffer(2),
objectCol: null,
arrayCol: [{doubleCol: 2}],
}, true);
TestCase.assertEqual(objects.length, 2);
TestCase.assertEqual(obj0.stringCol, '2');
TestCase.assertEqual(obj0.boolCol, false);
TestCase.assertEqual(obj0.intCol, 2);
TestCase.assertEqualWithTolerance(obj0.floatCol, 2.2, 0.000001);
TestCase.assertEqualWithTolerance(obj0.doubleCol, 2.22, 0.000001);
TestCase.assertEqual(obj0.dateCol.getTime(), 2);
TestCase.assertEqual(obj0.dataCol.byteLength, 2);
TestCase.assertEqual(obj0.objectCol, null);
TestCase.assertEqual(obj0.arrayCol.length, 1);
realm.create('AllTypesObject', {primaryCol: '0'}, true);
realm.create('AllTypesObject', {primaryCol: '1'}, true);
TestCase.assertEqual(obj0.stringCol, '2');
TestCase.assertEqual(obj0.objectCol, null);
TestCase.assertEqual(obj1.objectCol.doubleCol, 0);
realm.create('AllTypesObject', {
primaryCol: '0',
stringCol: '3',
objectCol: {doubleCol: 0},
}, true);
TestCase.assertEqual(obj0.stringCol, '3');
TestCase.assertEqual(obj0.boolCol, false);
TestCase.assertEqual(obj0.intCol, 2);
TestCase.assertEqualWithTolerance(obj0.floatCol, 2.2, 0.000001);
TestCase.assertEqualWithTolerance(obj0.doubleCol, 2.22, 0.000001);
TestCase.assertEqual(obj0.dateCol.getTime(), 2);
TestCase.assertEqual(obj0.dataCol.byteLength, 2);
TestCase.assertEqual(obj0.objectCol.doubleCol, 0);
TestCase.assertEqual(obj0.arrayCol.length, 1);
realm.create('AllTypesObject', {primaryCol: '0', objectCol: undefined}, true);
realm.create('AllTypesObject', {primaryCol: '1', objectCol: null}, true);
TestCase.assertEqual(obj0.objectCol, null);
TestCase.assertEqual(obj1.objectCol, null);
// test with string primaries
var obj =realm.create('StringPrimaryObject', {
primaryCol: '0',
valueCol: 0
});
TestCase.assertEqual(obj.valueCol, 0);
realm.create('StringPrimaryObject', {
primaryCol: '0',
valueCol: 1
}, true);
TestCase.assertEqual(obj.valueCol, 1);
});
},
testRealmWithIndexedProperties: function() {
var realm = new Realm({schema: [schemas.IndexedTypes]});
realm.write(function() {
realm.create('IndexedTypesObject', {boolCol: true, intCol: 1, stringCol: '1', dateCol: new Date(1)});
});
var NotIndexed = {
name: 'NotIndexedObject',
properties: {
floatCol: {type: 'float', indexed: false}
}
};
new Realm({schema: [NotIndexed], path: '1.realm'});
var IndexedSchema = {
name: 'IndexedSchema',
};
TestCase.assertThrows(function() {
IndexedSchema.properties = { floatCol: {type: 'float', indexed: true} };
new Realm({schema: [IndexedSchema], path: '2.realm'});
});
TestCase.assertThrows(function() {
IndexedSchema.properties = { doubleCol: {type: 'double', indexed: true} }
new Realm({schema: [IndexedSchema], path: '3.realm'});
});
TestCase.assertThrows(function() {
IndexedSchema.properties = { dataCol: {type: 'data', indexed: true} }
new Realm({schema: [IndexedSchema], path: '4.realm'});
});
// primary key
IndexedSchema.properties = { boolCol: {type: 'bool', indexed: true} };
IndexedSchema.primaryKey = 'boolCol';
// Test this doesn't throw
new Realm({schema: [IndexedSchema], path: '5.realm'});
},
testRealmCreateWithDefaults: function() {
var realm = new Realm({schema: [schemas.DefaultValues, schemas.TestObject]});
var createAndTestObject = function() {
var obj = realm.create('DefaultValuesObject', {});
var properties = schemas.DefaultValues.properties;
TestCase.assertEqual(obj.boolCol, properties.boolCol.default);
TestCase.assertEqual(obj.intCol, properties.intCol.default);
TestCase.assertEqualWithTolerance(obj.floatCol, properties.floatCol.default, 0.000001);
TestCase.assertEqualWithTolerance(obj.doubleCol, properties.doubleCol.default, 0.000001);
TestCase.assertEqual(obj.stringCol, properties.stringCol.default);
TestCase.assertEqual(obj.dateCol.getTime(), properties.dateCol.default.getTime());
TestCase.assertEqual(obj.dataCol.byteLength, properties.dataCol.default.byteLength);
TestCase.assertEqual(obj.objectCol.doubleCol, properties.objectCol.default.doubleCol);
TestCase.assertEqual(obj.nullObjectCol, null);
TestCase.assertEqual(obj.arrayCol.length, properties.arrayCol.default.length);
TestCase.assertEqual(obj.arrayCol[0].doubleCol, properties.arrayCol.default[0].doubleCol);
};
realm.write(createAndTestObject);
// Defaults should still work when creating another Realm instance.
realm = new Realm();
realm.write(createAndTestObject);
},
testRealmCreateWithChangingDefaults: function() {
var objectSchema = {
name: 'IntObject',
properties: {
intCol: {type: 'int', default: 1},
}
};
var realm = new Realm({schema: [objectSchema]});
var createAndTestObject = function() {
var object = realm.create('IntObject', {});
TestCase.assertEqual(object.intCol, objectSchema.properties.intCol.default);
};
realm.write(createAndTestObject);
objectSchema.properties.intCol.default++;
realm = new Realm({schema: [objectSchema]});
realm.write(createAndTestObject);
},
testRealmCreateWithConstructor: function() {
var customCreated = 0;
function CustomObject() {
customCreated++;
}
CustomObject.schema = {
name: 'CustomObject',
properties: {
intCol: 'int'
}
}
function InvalidObject() {
return {};
}
TestCase.assertThrows(function() {
new Realm({schema: [InvalidObject]});
});
InvalidObject.schema = {
name: 'InvalidObject',
properties: {
intCol: 'int'
}
};
var realm = new Realm({schema: [CustomObject, InvalidObject]});
realm.write(function() {
var object = realm.create('CustomObject', {intCol: 1});
TestCase.assertTrue(object instanceof CustomObject);
TestCase.assertTrue(Object.getPrototypeOf(object) == CustomObject.prototype);
TestCase.assertEqual(customCreated, 1);
// Should be able to create object by passing in constructor.
object = realm.create(CustomObject, {intCol: 2});
TestCase.assertTrue(object instanceof CustomObject);
TestCase.assertTrue(Object.getPrototypeOf(object) == CustomObject.prototype);
TestCase.assertEqual(customCreated, 2);
});
TestCase.assertThrows(function() {
realm.write(function() {
realm.create('InvalidObject', {intCol: 1});
});
});
// Only the original constructor should be valid.
function InvalidCustomObject() {}
InvalidCustomObject.schema = CustomObject.schema;
TestCase.assertThrows(function() {
realm.write(function() {
realm.create(InvalidCustomObject, {intCol: 1});
});
});
// The constructor should still work when creating another Realm instance.
realm = new Realm();
TestCase.assertTrue(realm.objects('CustomObject')[0] instanceof CustomObject);
TestCase.assertTrue(realm.objects(CustomObject).length > 0);
},
testRealmCreateWithChangingConstructor: function() {
function CustomObject() {}
CustomObject.schema = {
name: 'CustomObject',
properties: {
intCol: 'int'
}
};
var realm = new Realm({schema: [CustomObject]});
realm.write(function() {
var object = realm.create('CustomObject', {intCol: 1});
TestCase.assertTrue(object instanceof CustomObject);
});
function NewCustomObject() {}
NewCustomObject.schema = CustomObject.schema;
realm = new Realm({schema: [NewCustomObject]});
realm.write(function() {
var object = realm.create('CustomObject', {intCol: 1});
TestCase.assertTrue(object instanceof NewCustomObject);
});
},
testRealmDelete: function() {
var realm = new Realm({schema: [schemas.TestObject]});
realm.write(function() {
for (var i = 0; i < 10; i++) {
realm.create('TestObject', {doubleCol: i});
}
});
var objects = realm.objects('TestObject');
TestCase.assertThrows(function() {
realm.delete(objects[0]);
}, 'can only delete in a write transaction');
realm.write(function() {
TestCase.assertThrows(function() {
realm.delete();
});
realm.delete(objects[0]);
TestCase.assertEqual(objects.length, 9, 'wrong object count');
TestCase.assertEqual(objects[0].doubleCol, 9, "wrong property value");
TestCase.assertEqual(objects[1].doubleCol, 1, "wrong property value");
realm.delete([objects[0], objects[1]]);
TestCase.assertEqual(objects.length, 7, 'wrong object count');
TestCase.assertEqual(objects[0].doubleCol, 7, "wrong property value");
TestCase.assertEqual(objects[1].doubleCol, 8, "wrong property value");
var threeObjects = realm.objects('TestObject').filtered("doubleCol < 5");
TestCase.assertEqual(threeObjects.length, 3, "wrong results count");
realm.delete(threeObjects);
TestCase.assertEqual(objects.length, 4, 'wrong object count');
TestCase.assertEqual(threeObjects.length, 0, 'threeObject should have been deleted');
var o = objects[0];
realm.delete(o);
TestCase.assertThrows(function() {
realm.delete(o);
});
});
},
testDeleteAll: function() {
var realm = new Realm({schema: [schemas.TestObject, schemas.IntPrimary]});
realm.write(function() {
realm.create('TestObject', {doubleCol: 1});
realm.create('TestObject', {doubleCol: 2});
realm.create('IntPrimaryObject', {primaryCol: 2, valueCol: 'value'});
});
TestCase.assertEqual(realm.objects('TestObject').length, 2);
TestCase.assertEqual(realm.objects('IntPrimaryObject').length, 1);
TestCase.assertThrows(function() {
realm.deleteAll();
}, 'can only deleteAll in a write transaction');
realm.write(function() {
realm.deleteAll();
});
TestCase.assertEqual(realm.objects('TestObject').length, 0);
TestCase.assertEqual(realm.objects('IntPrimaryObject').length, 0);
},
testRealmObjects: function() {
var realm = new Realm({schema: [schemas.PersonObject, schemas.DefaultValues, schemas.TestObject]});
realm.write(function() {
realm.create('PersonObject', {name: 'Ari', age: 10});
realm.create('PersonObject', {name: 'Tim', age: 11});
realm.create('PersonObject', {name: 'Bjarne', age: 12});
realm.create('PersonObject', {name: 'Alex', age: 12, married: true});
});
// Should be able to pass constructor for getting objects.
var objects = realm.objects(schemas.PersonObject);
TestCase.assertTrue(objects[0] instanceof schemas.PersonObject);
function InvalidPerson() {}
InvalidPerson.schema = schemas.PersonObject.schema;
TestCase.assertThrows(function() {
realm.objects();
});
TestCase.assertThrows(function() {
realm.objects([]);
});
TestCase.assertThrows(function() {
realm.objects('InvalidClass');
});
TestCase.assertThrows(function() {
realm.objects('PersonObject', 'truepredicate');
});
TestCase.assertThrows(function() {
realm.objects(InvalidPerson);
});
var person = realm.objects('PersonObject')[0];
var listenerCallback = () => {};
realm.addListener('change', listenerCallback);
// The tests below assert that everthing throws when
// operating on a closed realm
realm.close();
TestCase.assertThrows(function() {
console.log("Name: ", person.name);
});
TestCase.assertThrows(function() {
realm.objects('PersonObject');
});
TestCase.assertThrows(function() {
realm.addListener('change', () => {});
});
TestCase.assertThrows(function() {
realm.create('PersonObject', {name: 'Ari', age: 10});
});
TestCase.assertThrows(function() {
realm.delete(person);
});
TestCase.assertThrows(function() {
realm.deleteAll();
});
TestCase.assertThrows(function() {
realm.write(() => {});
});
TestCase.assertThrows(function() {
realm.removeListener('change', listenerCallback);
});
TestCase.assertThrows(function() {
realm.removeAllListeners();
});
},
testRealmObjectForPrimaryKey: function() {
var realm = new Realm({schema: [schemas.IntPrimary, schemas.StringPrimary, schemas.TestObject]});
realm.write(function() {
realm.create('IntPrimaryObject', {primaryCol: 0, valueCol: 'val0'});
realm.create('IntPrimaryObject', {primaryCol: 1, valueCol: 'val1'});
realm.create('StringPrimaryObject', {primaryCol: '', valueCol: -1});
realm.create('StringPrimaryObject', {primaryCol: 'val0', valueCol: 0});
realm.create('StringPrimaryObject', {primaryCol: 'val1', valueCol: 1});
realm.create('TestObject', {doubleCol: 0});
});
TestCase.assertEqual(realm.objectForPrimaryKey('IntPrimaryObject', -1), undefined);
TestCase.assertEqual(realm.objectForPrimaryKey('IntPrimaryObject', 0).valueCol, 'val0');
TestCase.assertEqual(realm.objectForPrimaryKey('IntPrimaryObject', 1).valueCol, 'val1');
TestCase.assertEqual(realm.objectForPrimaryKey('StringPrimaryObject', 'invalid'), undefined);
TestCase.assertEqual(realm.objectForPrimaryKey('StringPrimaryObject', '').valueCol, -1);
TestCase.assertEqual(realm.objectForPrimaryKey('StringPrimaryObject', 'val0').valueCol, 0);
TestCase.assertEqual(realm.objectForPrimaryKey('StringPrimaryObject', 'val1').valueCol, 1);
TestCase.assertThrows(function() {
realm.objectForPrimaryKey('TestObject', 0);
});
TestCase.assertThrows(function() {
realm.objectForPrimaryKey();
});
TestCase.assertThrows(function() {
realm.objectForPrimaryKey('IntPrimary');
});
TestCase.assertThrows(function() {
realm.objectForPrimaryKey('InvalidClass', 0);
});
},
testNotifications: function() {
var realm = new Realm({schema: []});
var notificationCount = 0;
var notificationName;
realm.addListener('change', function(realm, name) {
notificationCount++;
notificationName = name;
});
TestCase.assertEqual(notificationCount, 0);
realm.write(function() {});
TestCase.assertEqual(notificationCount, 1);
TestCase.assertEqual(notificationName, 'change');
var secondNotificationCount = 0;
function secondNotification() {
secondNotificationCount++;
}
// The listener should only be added once.
realm.addListener('change', secondNotification);
realm.addListener('change', secondNotification);
realm.write(function() {});
TestCase.assertEqual(notificationCount, 2);
TestCase.assertEqual(secondNotificationCount, 1);
realm.removeListener('change', secondNotification);
realm.write(function() {});
TestCase.assertEqual(notificationCount, 3);
TestCase.assertEqual(secondNotificationCount, 1);
realm.removeAllListeners();
realm.write(function() {});
TestCase.assertEqual(notificationCount, 3);
TestCase.assertEqual(secondNotificationCount, 1);
TestCase.assertThrows(function() {
realm.addListener('invalid', function() {});
});
realm.addListener('change', function() {
throw new Error('error');
});
TestCase.assertThrows(function() {
realm.write(function() {});
});
},
testSchema: function() {
var originalSchema = [schemas.TestObject, schemas.BasicTypes, schemas.NullableBasicTypes, schemas.IndexedTypes, schemas.IntPrimary,
schemas.PersonObject, schemas.LinkTypes];
var schemaMap = {};
originalSchema.forEach(function(objectSchema) {
if (objectSchema.schema) { // for PersonObject
schemaMap[objectSchema.schema.name] = objectSchema;
} else {
schemaMap[objectSchema.name] = objectSchema;
}
});
var realm = new Realm({schema: originalSchema});
var schema = realm.schema;
TestCase.assertEqual(schema.length, originalSchema.length);
function isString(val) {
return typeof val === 'string' || val instanceof String;
}
function verifyObjectSchema(returned) {
var original = schemaMap[returned.name];
if (original.schema) {
original = original.schema;
}
TestCase.assertEqual(returned.primaryKey, original.primaryKey);
for (var propName in returned.properties) {
var prop1 = returned.properties[propName];
var prop2 = original.properties[propName];
if (prop1.type == 'object') {
TestCase.assertEqual(prop1.objectType, isString(prop2) ? prop2 : prop2.objectType);
TestCase.assertEqual(prop1.optional, true);
}
else if (prop1.type == 'list') {
TestCase.assertEqual(prop1.objectType, prop2.objectType);
TestCase.assertEqual(prop1.optional, undefined);
}
else {
TestCase.assertEqual(prop1.type, isString(prop2) ? prop2 : prop2.type);
TestCase.assertEqual(prop1.optional, prop2.optional || undefined);
}
TestCase.assertEqual(prop1.indexed, prop2.indexed || undefined);
}
}
for (var i = 0; i < originalSchema.length; i++) {
verifyObjectSchema(schema[i]);
}
},
testCopyBundledRealmFiles: function() {
Realm.copyBundledRealmFiles();
var realm = new Realm({path: 'dates-v5.realm', schema: [schemas.DateObject]});
TestCase.assertEqual(realm.objects('Date').length, 2);
TestCase.assertEqual(realm.objects('Date')[0].currentDate.getTime(), 1462500087955);
var newDate = new Date(1);
realm.write(function() {
realm.objects('Date')[0].currentDate = newDate;
});
realm.close();
// copy should not overwrite existing files
Realm.copyBundledRealmFiles();
realm = new Realm({path: 'dates-v5.realm', schema: [schemas.DateObject]});
TestCase.assertEqual(realm.objects('Date')[0].currentDate.getTime(), 1);
},
};
| 1 | 15,838 | This change is necessary because the schema version is ignored unless a schema is specified. I think that has always been the intent. | realm-realm-js | js |
@@ -2712,6 +2712,7 @@ dr_get_os_version(dr_os_version_info_t *info)
get_os_version_ex(&ver, &sp_major, &sp_minor);
if (info->size > offsetof(dr_os_version_info_t, version)) {
switch (ver) {
+ case WINDOWS_VERSION_10_1803: info->version = DR_WINDOWS_VERSION_10_1803; break;
case WINDOWS_VERSION_10_1709: info->version = DR_WINDOWS_VERSION_10_1709; break;
case WINDOWS_VERSION_10_1703: info->version = DR_WINDOWS_VERSION_10_1703; break;
case WINDOWS_VERSION_10_1607: info->version = DR_WINDOWS_VERSION_10_1607; break; | 1 | /* ******************************************************************************
* Copyright (c) 2010-2018 Google, Inc. All rights reserved.
* Copyright (c) 2010-2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2002-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2002-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2002 Hewlett-Packard Company */
/*
* instrument.c - interface for instrumentation
*/
#include "../globals.h" /* just to disable warning C4206 about an empty file */
#include "instrument.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "decode.h"
#include "disassemble.h"
#include "../fragment.h"
#include "../fcache.h"
#include "../emit.h"
#include "../link.h"
#include "../monitor.h" /* for mark_trace_head */
#include <string.h> /* for strstr */
#include <stdarg.h> /* for varargs */
#include "../nudge.h" /* for nudge_internal() */
#include "../synch.h"
#include "../annotations.h"
#include "../translate.h"
#ifdef UNIX
# include <sys/time.h> /* ITIMER_* */
# include "../unix/module.h" /* redirect_* functions */
#endif
#ifdef CLIENT_INTERFACE
/* in utils.c, not exported to everyone */
extern ssize_t do_file_write(file_t f, const char *fmt, va_list ap);
#ifdef DEBUG
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
#endif
/* PR 200065: User passes us the shared library, we look up "dr_init"
* or "dr_client_main" and call it. From there, the client can register which events it
* wishes to receive.
*/
#define INSTRUMENT_INIT_NAME_LEGACY "dr_init"
#define INSTRUMENT_INIT_NAME "dr_client_main"
/* PR 250952: version check
* If changing this, don't forget to update:
* - lib/dr_defines.h _USES_DR_VERSION_
* - api/docs/footer.html
*/
#define USES_DR_VERSION_NAME "_USES_DR_VERSION_"
/* Should we expose this for use in samples/tracedump.c?
* Also, if we change this, need to change the symlink generation
* in core/CMakeLists.txt: at that point should share single define.
*/
/* OLDEST_COMPATIBLE_VERSION now comes from configure.h */
/* The 3rd version number, the bugfix/patch number, should not affect
* compatibility, so our version check number simply uses:
* major*100 + minor
* Which gives us room for 100 minor versions per major.
*/
#define NEWEST_COMPATIBLE_VERSION CURRENT_API_VERSION
/* Store the unique not-part-of-version build number (the version
* BUILD_NUMBER is limited to 64K and is not guaranteed to be unique)
* somewhere accessible at a customer site. We could alternatively
* pull it out of our DYNAMORIO_DEFINES string.
*/
DR_API const char *unique_build_number = STRINGIFY(UNIQUE_BUILD_NUMBER);
/* Acquire when registering or unregistering event callbacks
* Also held when invoking events, which happens much more often
* than registration changes, so we use rwlock
*/
DECLARE_CXTSWPROT_VAR(static read_write_lock_t callback_registration_lock,
INIT_READWRITE_LOCK(callback_registration_lock));
/* Structures for maintaining lists of event callbacks */
typedef void (*callback_t)(void);
typedef struct _callback_list_t {
callback_t *callbacks; /* array of callback functions */
size_t num; /* number of callbacks registered */
size_t size; /* allocated space (may be larger than num) */
} callback_list_t;
/* This is a little convoluted. The following is a macro to iterate
* over a list of callbacks and call each function. We use a macro
* instead of a function so we can pass the function type and perform
* a typecast. We need to copy the callback list before iterating to
* support the possibility of one callback unregistering another and
* messing up the list while we're iterating. We'll optimize the case
* for 5 or fewer registered callbacks and stack-allocate the temp
* list. Otherwise, we'll heap-allocate the temp.
*
* We allow the args to use the var "idx" to access the client index.
*
* We consider the first registered callback to have the highest
* priority and call it last. If we gave the last registered callback
* the highest priority, a client could re-register a routine to
* increase its priority. That seems a little weird.
*/
/*
*/
#define FAST_COPY_SIZE 5
#define call_all_ret(ret, retop, postop, vec, type, ...) \
do { \
size_t idx, num; \
/* we will be called even if no callbacks (i.e., (vec).num == 0) */ \
/* we guarantee we're in DR state at all callbacks and clean calls */ \
/* XXX: add CLIENT_ASSERT here */ \
read_lock(&callback_registration_lock); \
num = (vec).num; \
if (num == 0) { \
read_unlock(&callback_registration_lock); \
} \
else if (num <= FAST_COPY_SIZE) { \
callback_t tmp[FAST_COPY_SIZE]; \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
read_unlock(&callback_registration_lock); \
for (idx=0; idx<num; idx++) { \
ret retop (((type)tmp[num-idx-1])(__VA_ARGS__)) postop; \
} \
} \
else { \
callback_t *tmp = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, callback_t, \
num, ACCT_OTHER, UNPROTECTED); \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
read_unlock(&callback_registration_lock); \
for (idx=0; idx<num; idx++) { \
ret retop (((type)tmp[num-idx-1])(__VA_ARGS__)) postop; \
} \
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, tmp, callback_t, num, \
ACCT_OTHER, UNPROTECTED); \
} \
} while (0)
/* It's less error-prone if we just have one call_all macro. We'll
* reuse call_all_ret above for callbacks that don't have a return
* value by assigning to a dummy var. Note that this means we'll
* have to pass an int-returning type to call_all()
*/
#define call_all(vec, type, ...) \
do { \
int dummy; \
call_all_ret(dummy, =, , vec, type, __VA_ARGS__); \
} while (0)
/* Lists of callbacks for each event type. Note that init and nudge
* callback lists are kept in the client_lib_t data structure below.
* We could store all lists on a per-client basis, but we can iterate
* over these lists slightly more efficiently if we store all
* callbacks for a specific event in a single list.
*/
static callback_list_t exit_callbacks = {0,};
static callback_list_t thread_init_callbacks = {0,};
static callback_list_t thread_exit_callbacks = {0,};
#ifdef UNIX
static callback_list_t fork_init_callbacks = {0,};
#endif
static callback_list_t bb_callbacks = {0,};
static callback_list_t trace_callbacks = {0,};
#ifdef CUSTOM_TRACES
static callback_list_t end_trace_callbacks = {0,};
#endif
static callback_list_t fragdel_callbacks = {0,};
static callback_list_t restore_state_callbacks = {0,};
static callback_list_t restore_state_ex_callbacks = {0,};
static callback_list_t module_load_callbacks = {0,};
static callback_list_t module_unload_callbacks = {0,};
static callback_list_t filter_syscall_callbacks = {0,};
static callback_list_t pre_syscall_callbacks = {0,};
static callback_list_t post_syscall_callbacks = {0,};
static callback_list_t kernel_xfer_callbacks = {0,};
#ifdef WINDOWS
static callback_list_t exception_callbacks = {0,};
#else
static callback_list_t signal_callbacks = {0,};
#endif
#ifdef PROGRAM_SHEPHERDING
static callback_list_t security_violation_callbacks = {0,};
#endif
static callback_list_t persist_ro_size_callbacks = {0,};
static callback_list_t persist_ro_callbacks = {0,};
static callback_list_t resurrect_ro_callbacks = {0,};
static callback_list_t persist_rx_size_callbacks = {0,};
static callback_list_t persist_rx_callbacks = {0,};
static callback_list_t resurrect_rx_callbacks = {0,};
static callback_list_t persist_rw_size_callbacks = {0,};
static callback_list_t persist_rw_callbacks = {0,};
static callback_list_t resurrect_rw_callbacks = {0,};
static callback_list_t persist_patch_callbacks = {0,};
/* An array of client libraries. We use a static array instead of a
* heap-allocated list so we can load the client libs before
* initializing DR's heap.
*/
typedef struct _client_lib_t {
client_id_t id;
char path[MAXIMUM_PATH];
/* PR 366195: dlopen() handle truly is opaque: != start */
shlib_handle_t lib;
app_pc start;
app_pc end;
/* The raw option string, which after i#1736 contains token-delimiting quotes */
char options[MAX_OPTION_LENGTH];
/* The option string with token-delimiting quotes removed for backward compat */
char legacy_options[MAX_OPTION_LENGTH];
/* The parsed options: */
int argc;
const char **argv;
/* We need to associate nudge events with a specific client so we
* store that list here in the client_lib_t instead of using a
* single global list.
*/
callback_list_t nudge_callbacks;
} client_lib_t;
/* these should only be modified prior to instrument_init(), since no
* readers of the client_libs array (event handlers, etc.) use synch
*/
static client_lib_t client_libs[MAX_CLIENT_LIBS] = {{0,}};
static size_t num_client_libs = 0;
static void *persist_user_data[MAX_CLIENT_LIBS];
#ifdef WINDOWS
/* private kernel32 lib, used to print to console */
static bool print_to_console;
static shlib_handle_t priv_kernel32;
typedef BOOL (WINAPI *kernel32_WriteFile_t)
(HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED);
static kernel32_WriteFile_t kernel32_WriteFile;
static ssize_t dr_write_to_console_varg(bool to_stdout, const char *fmt, ...);
#endif
bool client_requested_exit;
#ifdef WINDOWS
/* used for nudge support */
static bool block_client_nudge_threads = false;
DECLARE_CXTSWPROT_VAR(static int num_client_nudge_threads, 0);
#endif
#ifdef CLIENT_SIDELINE
/* # of sideline threads */
DECLARE_CXTSWPROT_VAR(static int num_client_sideline_threads, 0);
#endif
#if defined(WINDOWS) || defined(CLIENT_SIDELINE)
/* protects block_client_nudge_threads and incrementing num_client_nudge_threads */
DECLARE_CXTSWPROT_VAR(static mutex_t client_thread_count_lock,
INIT_LOCK_FREE(client_thread_count_lock));
#endif
static vm_area_vector_t *client_aux_libs;
static bool track_where_am_i;
#ifdef WINDOWS
DECLARE_CXTSWPROT_VAR(static mutex_t client_aux_lib64_lock,
INIT_LOCK_FREE(client_aux_lib64_lock));
#endif
/****************************************************************************/
/* INTERNAL ROUTINES */
static bool
char_is_quote(char c)
{
return c == '"' || c == '\'' || c == '`';
}
static void
parse_option_array(client_id_t client_id, const char *opstr,
int *argc OUT, const char ***argv OUT,
size_t max_token_size)
{
const char **a;
int cnt;
const char *s;
char *token = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, char, max_token_size,
ACCT_CLIENT, UNPROTECTED);
for (cnt = 0, s = dr_get_token(opstr, token, max_token_size);
s != NULL;
s = dr_get_token(s, token, max_token_size)) {
cnt++;
}
cnt++; /* add 1 so 0 can be "app" */
a = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, const char *, cnt, ACCT_CLIENT, UNPROTECTED);
cnt = 0;
a[cnt] = dr_strdup(dr_get_client_path(client_id) HEAPACCT(ACCT_CLIENT));
cnt++;
for (s = dr_get_token(opstr, token, max_token_size);
s != NULL;
s = dr_get_token(s, token, max_token_size)) {
a[cnt] = dr_strdup(token HEAPACCT(ACCT_CLIENT));
cnt++;
}
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, token, char, max_token_size,
ACCT_CLIENT, UNPROTECTED);
*argc = cnt;
*argv = a;
}
static bool
free_option_array(int argc, const char **argv)
{
int i;
for (i = 0; i < argc; i++) {
dr_strfree(argv[i] HEAPACCT(ACCT_CLIENT));
}
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, argv, char *, argc, ACCT_CLIENT, UNPROTECTED);
return true;
}
static void
add_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
if (func == NULL) {
CLIENT_ASSERT(false, "trying to register a NULL callback");
return;
}
if (standalone_library) {
CLIENT_ASSERT(false, "events not supported in standalone library mode");
return;
}
write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
/* We may already have an open slot since we allocate in twos and
* because we don't bother to free the storage when we remove the
* callback. Check and only allocate if necessary.
*/
if (vec->num == vec->size) {
callback_t *tmp = HEAP_ARRAY_ALLOC
(GLOBAL_DCONTEXT, callback_t, vec->size + 2, /* Let's allocate 2 */
ACCT_OTHER, UNPROTECTED);
if (tmp == NULL) {
CLIENT_ASSERT(false, "out of memory: can't register callback");
write_unlock(&callback_registration_lock);
return;
}
if (vec->callbacks != NULL) {
memcpy(tmp, vec->callbacks, vec->num * sizeof(callback_t));
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
}
vec->callbacks = tmp;
vec->size += 2;
}
vec->callbacks[vec->num] = func;
vec->num++;
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
write_unlock(&callback_registration_lock);
}
static bool
remove_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
size_t i;
bool found = false;
if (func == NULL) {
CLIENT_ASSERT(false, "trying to unregister a NULL callback");
return false;
}
write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
for (i=0; i<vec->num; i++) {
if (vec->callbacks[i] == func) {
size_t j;
/* shift down the entries on the tail */
for (j=i; j<vec->num-1; j++) {
vec->callbacks[j] = vec->callbacks[j+1];
}
vec->num -= 1;
found = true;
break;
}
}
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
write_unlock(&callback_registration_lock);
return found;
}
/* This should only be called prior to instrument_init(),
* since no readers of the client_libs array use synch
* and since this routine assumes .data is writable.
*/
static void
add_client_lib(const char *path, const char *id_str, const char *options)
{
client_id_t id;
shlib_handle_t client_lib;
DEBUG_DECLARE(size_t i);
ASSERT(!dynamo_initialized);
/* if ID not specified, we'll default to 0 */
id = (id_str == NULL) ? 0 : strtoul(id_str, NULL, 16);
#ifdef DEBUG
/* Check for conflicting IDs */
for (i=0; i<num_client_libs; i++) {
CLIENT_ASSERT(client_libs[i].id != id, "Clients have the same ID");
}
#endif
if (num_client_libs == MAX_CLIENT_LIBS) {
CLIENT_ASSERT(false, "Max number of clients reached");
return;
}
LOG(GLOBAL, LOG_INTERP, 4, "about to load client library %s\n", path);
client_lib = load_shared_library(path, IF_X64_ELSE(DYNAMO_OPTION(reachable_client),
true));
if (client_lib == NULL) {
char msg[MAXIMUM_PATH*4];
char err[MAXIMUM_PATH*2];
shared_library_error(err, BUFFER_SIZE_ELEMENTS(err));
snprintf(msg, BUFFER_SIZE_ELEMENTS(msg),
".\n\tError opening instrumentation library %s:\n\t%s",
path, err);
NULL_TERMINATE_BUFFER(msg);
/* PR 232490 - malformed library names or incorrect
* permissions shouldn't blow up an app in release builds as
* they may happen at customer sites with a third party
* client.
*/
/* PR 408318: 32-vs-64 errors should NOT be fatal to continue
* in debug build across execve chains. Xref i#147.
* XXX: w/ -private_loader, err always equals "error in private loader"
* and so we never match here!
*/
IF_UNIX(if (strstr(err, "wrong ELF class") == NULL))
CLIENT_ASSERT(false, msg);
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4,
get_application_name(), get_application_pid(), path, msg);
}
else {
/* PR 250952: version check */
int *uses_dr_version = (int *)
lookup_library_routine(client_lib, USES_DR_VERSION_NAME);
if (uses_dr_version == NULL ||
*uses_dr_version < OLDEST_COMPATIBLE_VERSION ||
*uses_dr_version > NEWEST_COMPATIBLE_VERSION) {
/* not a fatal usage error since we want release build to continue */
CLIENT_ASSERT(false,
"client library is incompatible with this version of DR");
SYSLOG(SYSLOG_ERROR, CLIENT_VERSION_INCOMPATIBLE, 2,
get_application_name(), get_application_pid());
}
else {
size_t idx = num_client_libs++;
DEBUG_DECLARE(bool ok;)
client_libs[idx].id = id;
client_libs[idx].lib = client_lib;
DEBUG_DECLARE(ok =)
shared_library_bounds(client_lib, (byte *) uses_dr_version, NULL,
&client_libs[idx].start, &client_libs[idx].end);
ASSERT(ok);
LOG(GLOBAL, LOG_INTERP, 1, "loaded %s at "PFX"-"PFX"\n",
path, client_libs[idx].start, client_libs[idx].end);
#ifdef X64
/* Now that we map the client within the constraints, this request
* should always succeed.
*/
if (DYNAMO_OPTION(reachable_client)) {
request_region_be_heap_reachable(client_libs[idx].start,
client_libs[idx].end -
client_libs[idx].start);
}
#endif
strncpy(client_libs[idx].path, path,
BUFFER_SIZE_ELEMENTS(client_libs[idx].path));
NULL_TERMINATE_BUFFER(client_libs[idx].path);
if (options != NULL) {
strncpy(client_libs[idx].options, options,
BUFFER_SIZE_ELEMENTS(client_libs[idx].options));
NULL_TERMINATE_BUFFER(client_libs[idx].options);
}
/* We'll look up dr_client_main and call it in instrument_init */
}
}
}
void
instrument_load_client_libs(void)
{
if (CLIENTS_EXIST()) {
char buf[MAX_LIST_OPTION_LENGTH];
char *path;
string_option_read_lock();
strncpy(buf, INTERNAL_OPTION(client_lib), BUFFER_SIZE_ELEMENTS(buf));
string_option_read_unlock();
NULL_TERMINATE_BUFFER(buf);
/* We're expecting path;ID;options triples */
path = buf;
do {
char *id = NULL;
char *options = NULL;
char *next_path = NULL;
id = strstr(path, ";");
if (id != NULL) {
id[0] = '\0';
id++;
options = strstr(id, ";");
if (options != NULL) {
options[0] = '\0';
options++;
next_path = strstr(options, ";");
if (next_path != NULL) {
next_path[0] = '\0';
next_path++;
}
}
}
#ifdef STATIC_LIBRARY
/* We ignore client library paths and allow client code anywhere in the app.
* We have a check in load_shared_library() to avoid loading
* a 2nd copy of the app.
* We do support passing client ID and options via the first -client_lib.
*/
add_client_lib(get_application_name(), id == NULL ? "0" : id,
options == NULL ? "" : options);
break;
#endif
add_client_lib(path, id, options);
path = next_path;
} while (path != NULL);
}
}
static void
init_client_aux_libs(void)
{
if (client_aux_libs == NULL) {
VMVECTOR_ALLOC_VECTOR(client_aux_libs, GLOBAL_DCONTEXT,
VECTOR_SHARED, client_aux_libs);
}
}
void
instrument_init(void)
{
size_t i;
init_client_aux_libs();
if (num_client_libs > 0) {
/* We no longer distinguish in-DR vs in-client crashes, as many crashes in
* the DR lib are really client bugs.
* We expect most end-user tools to call dr_set_client_name() so we
* have generic defaults here:
*/
set_exception_strings("Tool", "your tool's issue tracker");
}
/* Iterate over the client libs and call each init routine */
for (i=0; i<num_client_libs; i++) {
void (*init)(client_id_t, int, const char **) =
(void (*)(client_id_t, int, const char **))
(lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME));
void (*legacy)(client_id_t) = (void (*)(client_id_t))
(lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME_LEGACY));
/* we can't do this in instrument_load_client_libs() b/c vmheap
* is not set up at that point
*/
all_memory_areas_lock();
update_all_memory_areas(client_libs[i].start, client_libs[i].end,
/* FIXME: need to walk the sections: but may be
* better to obfuscate from clients anyway.
* We can't set as MEMPROT_NONE as that leads to
* bugs if the app wants to interpret part of
* its code section (xref PR 504629).
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
/* i#1736: parse the options up front */
parse_option_array(client_libs[i].id, client_libs[i].options,
&client_libs[i].argc, &client_libs[i].argv,
MAX_OPTION_LENGTH);
#ifdef STATIC_LIBRARY
/* We support the app having client code anywhere, so there does not
* have to be an init routine that we call. This means the app
* may have to iterate modules on its own.
*/
#else
/* Since the user has to register all other events, it
* doesn't make sense to provide the -client_lib
* option for a module that doesn't export an init routine.
*/
CLIENT_ASSERT(init != NULL || legacy != NULL,
"client does not export a dr_client_main or dr_init routine");
#endif
if (init != NULL)
(*init)(client_libs[i].id, client_libs[i].argc, client_libs[i].argv);
else if (legacy != NULL)
(*legacy)(client_libs[i].id);
}
/* We now initialize the 1st thread before coming here, so we can
* hand the client a dcontext; so we need to specially generate
* the thread init event now. An alternative is to have
* dr_get_global_drcontext(), but that's extra complexity for no
* real reason.
* We raise the thread init event prior to the module load events
* so the client can access a dcontext in module load events (i#1339).
*/
if (thread_init_callbacks.num > 0) {
instrument_thread_init(get_thread_private_dcontext(), false, false);
}
/* If the client just registered the module-load event, let's
* assume it wants to be informed of *all* modules and tell it
* which modules are already loaded. If the client registers the
* event later, it will need to use the module iterator routines
* to retrieve currently loaded modules. We use the dr_module_iterator
* exposed to the client to avoid locking issues.
*/
if (module_load_callbacks.num > 0) {
dr_module_iterator_t *mi = dr_module_iterator_start();
while (dr_module_iterator_hasnext(mi)) {
module_data_t *data = dr_module_iterator_next(mi);
instrument_module_load(data, true /*already loaded*/);
/* XXX; more efficient to set this flag during dr_module_iterator_start */
os_module_set_flag(data->start, MODULE_LOAD_EVENT);
dr_free_module_data(data);
}
dr_module_iterator_stop(mi);
}
}
static void
free_callback_list(callback_list_t *vec)
{
if (vec->callbacks != NULL) {
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
vec->callbacks = NULL;
}
vec->size = 0;
vec->num = 0;
}
static void
free_all_callback_lists()
{
free_callback_list(&exit_callbacks);
free_callback_list(&thread_init_callbacks);
free_callback_list(&thread_exit_callbacks);
#ifdef UNIX
free_callback_list(&fork_init_callbacks);
#endif
free_callback_list(&bb_callbacks);
free_callback_list(&trace_callbacks);
#ifdef CUSTOM_TRACES
free_callback_list(&end_trace_callbacks);
#endif
free_callback_list(&fragdel_callbacks);
free_callback_list(&restore_state_callbacks);
free_callback_list(&restore_state_ex_callbacks);
free_callback_list(&module_load_callbacks);
free_callback_list(&module_unload_callbacks);
free_callback_list(&filter_syscall_callbacks);
free_callback_list(&pre_syscall_callbacks);
free_callback_list(&post_syscall_callbacks);
free_callback_list(&kernel_xfer_callbacks);
#ifdef WINDOWS
free_callback_list(&exception_callbacks);
#else
free_callback_list(&signal_callbacks);
#endif
#ifdef PROGRAM_SHEPHERDING
free_callback_list(&security_violation_callbacks);
#endif
free_callback_list(&persist_ro_size_callbacks);
free_callback_list(&persist_ro_callbacks);
free_callback_list(&resurrect_ro_callbacks);
free_callback_list(&persist_rx_size_callbacks);
free_callback_list(&persist_rx_callbacks);
free_callback_list(&resurrect_rx_callbacks);
free_callback_list(&persist_rw_size_callbacks);
free_callback_list(&persist_rw_callbacks);
free_callback_list(&resurrect_rw_callbacks);
free_callback_list(&persist_patch_callbacks);
}
void
instrument_exit_post_sideline(void)
{
#if defined(WINDOWS) || defined(CLIENT_SIDELINE)
DELETE_LOCK(client_thread_count_lock);
#endif
}
void
instrument_exit(void)
{
/* Note - currently own initexit lock when this is called (see PR 227619). */
/* support dr_get_mcontext() from the exit event */
if (!standalone_library)
get_thread_private_dcontext()->client_data->mcontext_in_dcontext = true;
call_all(exit_callbacks, int (*)(),
/* It seems the compiler is confused if we pass no var args
* to the call_all macro. Bogus NULL arg */
NULL);
if (IF_DEBUG_ELSE(true, doing_detach)) {
/* Unload all client libs and free any allocated storage */
size_t i;
for (i=0; i<num_client_libs; i++) {
free_callback_list(&client_libs[i].nudge_callbacks);
unload_shared_library(client_libs[i].lib);
if (client_libs[i].argv != NULL)
free_option_array(client_libs[i].argc, client_libs[i].argv);
}
free_all_callback_lists();
}
vmvector_delete_vector(GLOBAL_DCONTEXT, client_aux_libs);
client_aux_libs = NULL;
num_client_libs = 0;
#ifdef WINDOWS
DELETE_LOCK(client_aux_lib64_lock);
#endif
DELETE_READWRITE_LOCK(callback_registration_lock);
}
bool
is_in_client_lib(app_pc addr)
{
/* NOTE: we use this routine for detecting exceptions in
* clients. If we add a callback on that event we'll have to be
* sure to deliver it only to the right client.
*/
size_t i;
for (i=0; i<num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) &&
(addr < client_libs[i].end)) {
return true;
}
}
if (client_aux_libs != NULL &&
vmvector_overlap(client_aux_libs, addr, addr+1))
return true;
return false;
}
bool
get_client_bounds(client_id_t client_id,
app_pc *start/*OUT*/, app_pc *end/*OUT*/)
{
if (client_id >= num_client_libs)
return false;
if (start != NULL)
*start = (app_pc) client_libs[client_id].start;
if (end != NULL)
*end = (app_pc) client_libs[client_id].end;
return true;
}
const char *
get_client_path_from_addr(app_pc addr)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) &&
(addr < client_libs[i].end)) {
return client_libs[i].path;
}
}
return "";
}
bool
is_valid_client_id(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return true;
}
}
return false;
}
void
dr_register_exit_event(void (*func)(void))
{
add_callback(&exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_exit_event(void (*func)(void))
{
return remove_callback(&exit_callbacks, (void (*)(void))func, true);
}
void
dr_register_bb_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *bb,
bool for_trace, bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for bb event when code_api is disabled");
return;
}
add_callback(&bb_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_bb_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *bb,
bool for_trace, bool translating))
{
return remove_callback(&bb_callbacks, (void (*)(void))func, true);
}
void
dr_register_trace_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *trace,
bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for trace event when code_api is disabled");
return;
}
add_callback(&trace_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_trace_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *trace,
bool translating))
{
return remove_callback(&trace_callbacks, (void (*)(void))func, true);
}
#ifdef CUSTOM_TRACES
void
dr_register_end_trace_event(dr_custom_trace_action_t (*func)
(void *drcontext, void *tag, void *next_tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for end-trace event when code_api is disabled");
return;
}
add_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_end_trace_event(dr_custom_trace_action_t
(*func)(void *drcontext, void *tag, void *next_tag))
{
return remove_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_delete_event(void (*func)(void *drcontext, void *tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for delete event when code_api is disabled");
return;
}
add_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_delete_event(void (*func)(void *drcontext, void *tag))
{
return remove_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_event(void (*func)
(void *drcontext, void *tag, dr_mcontext_t *mcontext,
bool restore_memory, bool app_code_consistent))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore state event when code_api is disabled");
return;
}
add_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_event(void (*func)
(void *drcontext, void *tag, dr_mcontext_t *mcontext,
bool restore_memory, bool app_code_consistent))
{
return remove_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_ex_event(bool (*func) (void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore_state_ex event when code_api disabled");
return;
}
add_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_ex_event(bool (*func) (void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
return remove_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_init_event(void (*func)(void *drcontext))
{
add_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_init_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_exit_event(void (*func)(void *drcontext))
{
add_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_exit_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
#ifdef UNIX
void
dr_register_fork_init_event(void (*func)(void *drcontext))
{
add_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_fork_init_event(void (*func)(void *drcontext))
{
return remove_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
add_callback(&module_load_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
return remove_callback(&module_load_callbacks, (void (*)(void))func, true);
}
void
dr_register_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
add_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
return remove_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
#ifdef WINDOWS
void
dr_register_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
add_callback(&exception_callbacks, (bool (*)(void))func, true);
}
bool
dr_unregister_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
return remove_callback(&exception_callbacks, (bool (*)(void))func, true);
}
#else
void
dr_register_signal_event(dr_signal_action_t (*func)
(void *drcontext, dr_siginfo_t *siginfo))
{
add_callback(&signal_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_signal_event(dr_signal_action_t (*func)
(void *drcontext, dr_siginfo_t *siginfo))
{
return remove_callback(&signal_callbacks, (void (*)(void))func, true);
}
#endif /* WINDOWS */
void
dr_register_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
add_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
return remove_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_kernel_xfer_event(void (*func)(void *drcontext,
const dr_kernel_xfer_info_t *info))
{
add_callback(&kernel_xfer_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_kernel_xfer_event(void (*func)(void *drcontext,
const dr_kernel_xfer_info_t *info))
{
return remove_callback(&kernel_xfer_callbacks, (void (*)(void))func, true);
}
#ifdef PROGRAM_SHEPHERDING
void
dr_register_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
add_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
return remove_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
add_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
return;
}
}
CLIENT_ASSERT(false, "dr_register_nudge_event: invalid client ID");
}
bool
dr_unregister_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return remove_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
}
}
CLIENT_ASSERT(false, "dr_unregister_nudge_event: invalid client ID");
return false;
}
dr_config_status_t
dr_nudge_client_ex(process_id_t process_id, client_id_t client_id,
uint64 argument, uint timeout_ms)
{
if (process_id == get_process_id()) {
size_t i;
#ifdef WINDOWS
pre_second_thread();
#endif
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == client_id) {
if (client_libs[i].nudge_callbacks.num == 0) {
CLIENT_ASSERT(false, "dr_nudge_client: no nudge handler registered");
return false;
}
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
return false;
} else {
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
bool
dr_nudge_client(client_id_t client_id, uint64 argument)
{
return dr_nudge_client_ex(get_process_id(), client_id, argument, 0) == DR_SUCCESS;
}
#ifdef WINDOWS
DR_API
bool
dr_is_nudge_thread(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid parameter to dr_is_nudge_thread");
return dcontext->nudge_target != NULL;
}
#endif
void
instrument_client_thread_init(dcontext_t *dcontext, bool client_thread)
{
if (dcontext->client_data == NULL) {
dcontext->client_data = HEAP_TYPE_ALLOC(dcontext, client_data_t,
ACCT_OTHER, UNPROTECTED);
memset(dcontext->client_data, 0x0, sizeof(client_data_t));
#ifdef CLIENT_SIDELINE
ASSIGN_INIT_LOCK_FREE(dcontext->client_data->sideline_mutex, sideline_mutex);
#endif
CLIENT_ASSERT(dynamo_initialized || thread_init_callbacks.num == 0 ||
client_thread,
"1st call to instrument_thread_init should have no cbs");
}
#ifdef CLIENT_SIDELINE
if (client_thread) {
ATOMIC_INC(int, num_client_sideline_threads);
/* We don't call dynamo_thread_not_under_dynamo() b/c we want itimers. */
dcontext->thread_record->under_dynamo_control = false;
dcontext->client_data->is_client_thread = true;
dcontext->client_data->suspendable = true;
}
#endif /* CLIENT_SIDELINE */
}
void
instrument_thread_init(dcontext_t *dcontext, bool client_thread, bool valid_mc)
{
/* Note that we're called twice for the initial thread: once prior
* to instrument_init() (PR 216936) to set up the dcontext client
* field (at which point there should be no callbacks since client
* has not had a chance to register any) (now split out, but both
* routines are called prior to instrument_init()), and once after
* instrument_init() to call the client event.
*/
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
bool swap_peb = false;
#endif
if (client_thread) {
/* no init event */
return;
}
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
/* i#996: we might be in app's state.
* It is simpler to check and swap here than earlier on thread init paths.
*/
if (dr_using_app_state(dcontext)) {
swap_peb_pointer(dcontext, true/*to priv*/);
swap_peb = true;
}
#endif
/* i#117/PR 395156: support dr_get_mcontext() from the thread init event */
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = true;
call_all(thread_init_callbacks, int (*)(void *), (void *)dcontext);
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = false;
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
if (swap_peb)
swap_peb_pointer(dcontext, false/*to app*/);
#endif
}
#ifdef UNIX
void
instrument_fork_init(dcontext_t *dcontext)
{
call_all(fork_init_callbacks, int (*)(void *), (void *)dcontext);
}
#endif
/* PR 536058: split the exit event from thread cleanup, to provide a
* dcontext in the process exit event
*/
void
instrument_thread_exit_event(dcontext_t *dcontext)
{
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(dcontext)
/* if nudge thread calls dr_exit_process() it will be marked as a client
* thread: rule it out here so we properly clean it up
*/
IF_WINDOWS(&& dcontext->nudge_target == NULL)) {
ATOMIC_DEC(int, num_client_sideline_threads);
/* no exit event */
return;
}
#endif
/* i#1394: best-effort to try to avoid crashing thread exit events
* where thread init was never called.
*/
if (!dynamo_initialized)
return;
/* support dr_get_mcontext() from the exit event */
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently own initexit lock when this is called (see PR 227619). */
call_all(thread_exit_callbacks, int (*)(void *), (void *)dcontext);
}
void
instrument_thread_exit(dcontext_t *dcontext)
{
#ifdef DEBUG
client_todo_list_t *todo;
client_flush_req_t *flush;
#endif
#ifdef DEBUG
/* PR 470957: avoid racy crashes by not freeing in release build */
# ifdef CLIENT_SIDELINE
DELETE_LOCK(dcontext->client_data->sideline_mutex);
# endif
/* could be heap space allocated for the todo list */
todo = dcontext->client_data->to_do;
while (todo != NULL) {
client_todo_list_t *next_todo = todo->next;
if (todo->ilist != NULL) {
instrlist_clear_and_destroy(dcontext, todo->ilist);
}
HEAP_TYPE_FREE(dcontext, todo, client_todo_list_t, ACCT_CLIENT, UNPROTECTED);
todo = next_todo;
}
/* could be heap space allocated for the flush list */
flush = dcontext->client_data->flush_list;
while (flush != NULL) {
client_flush_req_t *next_flush = flush->next;
HEAP_TYPE_FREE(dcontext, flush, client_flush_req_t, ACCT_CLIENT, UNPROTECTED);
flush = next_flush;
}
HEAP_TYPE_FREE(dcontext, dcontext->client_data, client_data_t,
ACCT_OTHER, UNPROTECTED);
dcontext->client_data = NULL; /* for mutex_wait_contended_lock() */
dcontext->is_client_thread_exiting = true; /* for is_using_app_peb() */
#endif /* DEBUG */
}
bool
dr_bb_hook_exists(void)
{
return (bb_callbacks.num > 0);
}
bool
dr_trace_hook_exists(void)
{
return (trace_callbacks.num > 0);
}
bool
dr_fragment_deleted_hook_exists(void)
{
return (fragdel_callbacks.num > 0);
}
bool
dr_end_trace_hook_exists(void)
{
return (end_trace_callbacks.num > 0);
}
bool
dr_thread_exit_hook_exists(void)
{
return (thread_exit_callbacks.num > 0);
}
bool
dr_exit_hook_exists(void)
{
return (exit_callbacks.num > 0);
}
bool
dr_xl8_hook_exists(void)
{
return (restore_state_callbacks.num > 0 ||
restore_state_ex_callbacks.num > 0);
}
#endif /* CLIENT_INTERFACE */
/* needed outside of CLIENT_INTERFACE for simpler USE_BB_BUILDING_LOCK_STEADY_STATE() */
bool
dr_modload_hook_exists(void)
{
/* We do not support (as documented in the module event doxygen)
* the client changing this during bb building, as that will mess
* up USE_BB_BUILDING_LOCK_STEADY_STATE().
*/
return IF_CLIENT_INTERFACE_ELSE(module_load_callbacks.num > 0, false);
}
#ifdef CLIENT_INTERFACE
bool
hide_tag_from_client(app_pc tag)
{
#ifdef WINDOWS
/* Case 10009: Basic blocks that consist of a single jump into the
* interception buffer should be obscured from clients. Clients
* will see the displaced code, so we'll provide the address of this
* block if the client asks for the address of the displaced code.
*
* Note that we assume the jump is the first instruction in the
* BB for any blocks that jump to the interception buffer.
*/
if (is_intercepted_app_pc(tag, NULL) ||
/* Displaced app code is now in the landing pad, so skip the
* jump from the interception buffer to the landing pad
*/
is_in_interception_buffer(tag) ||
/* Landing pads that exist between hook points and the trampolines
* shouldn't be seen by the client too. PR 250294.
*/
is_on_interception_initial_route(tag) ||
/* PR 219351: if we lose control on a callback and get it back on
* one of our syscall trampolines, we'll appear at the jmp out of
* the interception buffer to the int/sysenter instruction. The
* problem is that our syscall trampolines, unlike our other
* intercepted code, are hooked earlier than the real action point
* and we have displaced app code at the start of the interception
* buffer: we hook at the wrapper entrance and return w/ a jmp to
* the sysenter/int instr. When creating bbs at the start we hack
* it to make it look like there is no hook. But on retaking control
* we end up w/ this jmp out that won't be solved w/ our normal
* mechanism for other hook jmp-outs: so we just suppress and the
* client next sees the post-syscall bb. It already saw a gap.
*/
is_syscall_trampoline(tag, NULL))
return true;
#endif
return false;
}
#ifdef DEBUG
/* PR 214962: client must set translation fields */
static void
check_ilist_translations(instrlist_t *ilist)
{
/* Ensure client set the translation field for all non-meta
* instrs, even if it didn't return DR_EMIT_STORE_TRANSLATIONS
* (since we may decide ourselves to store)
*/
instr_t *in;
for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) {
if (!instr_opcode_valid(in)) {
CLIENT_ASSERT(INTERNAL_OPTION(fast_client_decode), "level 0 instr found");
} else if (instr_is_app(in)) {
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) == NULL)
loginst(get_thread_private_dcontext(), 1, in, "translation is NULL");
});
CLIENT_ASSERT(instr_get_translation(in) != NULL,
"translation field must be set for every app instruction");
} else {
/* The meta instr could indeed not affect app state, but
* better I think to assert and make them put in an
* empty restore event callback in that case. */
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) != NULL &&
!instr_is_our_mangling(in) &&
!dr_xl8_hook_exists())
loginst(get_thread_private_dcontext(), 1, in, "translation != NULL");
});
CLIENT_ASSERT(instr_get_translation(in) == NULL ||
instr_is_our_mangling(in) ||
dr_xl8_hook_exists(),
/* FIXME: if multiple clients, we need to check that this
* particular client has the callback: but we have
* no way to do that other than looking at library
* bounds...punting for now */
"a meta instr should not have its translation field "
"set without also having a restore_state callback");
}
}
}
#endif
/* Returns true if the bb hook is called */
bool
instrument_basic_block(dcontext_t *dcontext, app_pc tag, instrlist_t *bb,
bool for_trace, bool translating, dr_emit_flags_t *emitflags)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
/* return false if no BB hooks are registered */
if (bb_callbacks.num == 0)
return false;
if (hide_tag_from_client(tag)) {
LOG(THREAD, LOG_INTERP, 3, "hiding tag "PFX" from client\n", tag);
return false;
}
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_basic_block ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
#endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating && !for_trace)
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently we are couldbelinking and hold the
* bb_building lock when this is called (see PR 227619).
*/
/* We or together the return values */
call_all_ret(ret, |=, , bb_callbacks,
int (*) (void *, void *, instrlist_t *, bool, bool),
(void *)dcontext, (void *)tag, bb, for_trace, translating);
if (emitflags != NULL)
*emitflags = ret;
DOCHECK(1, { check_ilist_translations(bb); });
dcontext->client_data->mcontext_in_dcontext = false;
if (IF_DEBUG_ELSE(for_trace, false)) {
CLIENT_ASSERT(instrlist_get_return_target(bb) == NULL &&
instrlist_get_fall_through_target(bb) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
}
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
#endif
return true;
}
/* Give the user the completely mangled and optimized trace just prior
* to emitting into code cache, user gets final crack at it
*/
dr_emit_flags_t
instrument_trace(dcontext_t *dcontext, app_pc tag, instrlist_t *trace,
bool translating)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
#ifdef UNSUPPORTED_API
instr_t *instr;
#endif
if (trace_callbacks.num == 0)
return DR_EMIT_DEFAULT;
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_trace ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
#endif
/* We always pass Level 3 instrs to the client, since we no longer
* expose the expansion routines.
*/
#ifdef UNSUPPORTED_API
for (instr = instrlist_first_expanded(dcontext, trace);
instr != NULL;
instr = instr_get_next_expanded(dcontext, trace, instr)) {
instr_decode(dcontext, instr);
}
/* ASSUMPTION: all ctis are already at Level 3, so we don't have
* to do a separate pass to fix up intra-list targets like
* instrlist_decode_cti() does
*/
#endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating)
dcontext->client_data->mcontext_in_dcontext = true;
/* We or together the return values */
call_all_ret(ret, |=, , trace_callbacks,
int (*)(void *, void *, instrlist_t *, bool),
(void *)dcontext, (void *)tag, trace, translating);
DOCHECK(1, { check_ilist_translations(trace); });
CLIENT_ASSERT(instrlist_get_return_target(trace) == NULL &&
instrlist_get_fall_through_target(trace) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
dcontext->client_data->mcontext_in_dcontext = false;
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
#endif
return ret;
}
/* Notify user when a fragment is deleted from the cache
* FIXME PR 242544: how does user know whether this is a shadowed copy or the
* real thing? The user might free memory that shouldn't be freed!
*/
void
instrument_fragment_deleted(dcontext_t *dcontext, app_pc tag, uint flags)
{
if (fragdel_callbacks.num == 0)
return;
#ifdef WINDOWS
/* Case 10009: We don't call the basic block hook for blocks that
* are jumps to the interception buffer, so we'll hide them here
* as well.
*/
if (!TEST(FRAG_IS_TRACE, flags) && hide_tag_from_client(tag))
return;
#endif
/* PR 243008: we don't expose GLOBAL_DCONTEXT, so change to NULL.
* Our comments warn the user about this.
*/
if (dcontext == GLOBAL_DCONTEXT)
dcontext = NULL;
call_all(fragdel_callbacks, int (*)(void *, void *),
(void *)dcontext, (void *)tag);
}
bool
instrument_restore_state(dcontext_t *dcontext, bool restore_memory,
dr_restore_state_info_t *info)
{
bool res = true;
/* Support both legacy and extended handlers */
if (restore_state_callbacks.num > 0) {
call_all(restore_state_callbacks,
int (*)(void *, void *, dr_mcontext_t *, bool, bool),
(void *)dcontext, info->fragment_info.tag, info->mcontext,
restore_memory, info->fragment_info.app_code_consistent);
}
if (restore_state_ex_callbacks.num > 0) {
/* i#220/PR 480565: client has option of failing the translation.
* We fail it if any client wants to, short-circuiting in that case.
* This does violate the "priority order" of events where the
* last one is supposed to have final say b/c it won't even
* see the event (xref i#424).
*/
call_all_ret(res, = res &&, , restore_state_ex_callbacks,
int (*)(void *, bool, dr_restore_state_info_t *),
(void *)dcontext, restore_memory, info);
}
CLIENT_ASSERT(!restore_memory || res,
"translation should not fail for restore_memory=true");
return res;
}
#ifdef CUSTOM_TRACES
/* Ask whether to end trace prior to adding next_tag fragment.
* Return values:
* CUSTOM_TRACE_DR_DECIDES = use standard termination criteria
* CUSTOM_TRACE_END_NOW = end trace
* CUSTOM_TRACE_CONTINUE = do not end trace
*/
dr_custom_trace_action_t
instrument_end_trace(dcontext_t *dcontext, app_pc trace_tag, app_pc next_tag)
{
dr_custom_trace_action_t ret = CUSTOM_TRACE_DR_DECIDES;
if (end_trace_callbacks.num == 0)
return ret;
/* Highest priority callback decides how to end the trace (see
* call_all_ret implementation)
*/
call_all_ret(ret, =, , end_trace_callbacks, int (*)(void *, void *, void *),
(void *)dcontext, (void *)trace_tag, (void *)next_tag);
return ret;
}
#endif
static module_data_t *
create_and_initialize_module_data(app_pc start, app_pc end, app_pc entry_point,
uint flags, const module_names_t *names,
const char *full_path
#ifdef WINDOWS
, version_number_t file_version,
version_number_t product_version,
uint checksum, uint timestamp,
size_t mod_size
#else
, bool contiguous,
uint num_segments,
module_segment_t *os_segments,
module_segment_data_t *segments,
uint timestamp
# ifdef MACOS
, uint current_version,
uint compatibility_version,
const byte uuid[16]
# endif
#endif
)
{
#ifndef WINDOWS
uint i;
#endif
module_data_t *copy = (module_data_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, module_data_t, ACCT_CLIENT, UNPROTECTED);
memset(copy, 0, sizeof(module_data_t));
copy->start = start;
copy->end = end;
copy->entry_point = entry_point;
copy->flags = flags;
if (full_path != NULL)
copy->full_path = dr_strdup(full_path HEAPACCT(ACCT_CLIENT));
if (names->module_name != NULL)
copy->names.module_name = dr_strdup(names->module_name HEAPACCT(ACCT_CLIENT));
if (names->file_name != NULL)
copy->names.file_name = dr_strdup(names->file_name HEAPACCT(ACCT_CLIENT));
#ifdef WINDOWS
if (names->exe_name != NULL)
copy->names.exe_name = dr_strdup(names->exe_name HEAPACCT(ACCT_CLIENT));
if (names->rsrc_name != NULL)
copy->names.rsrc_name = dr_strdup(names->rsrc_name HEAPACCT(ACCT_CLIENT));
copy->file_version = file_version;
copy->product_version = product_version;
copy->checksum = checksum;
copy->timestamp = timestamp;
copy->module_internal_size = mod_size;
#else
copy->contiguous = contiguous;
copy->num_segments = num_segments;
copy->segments = (module_segment_data_t *)
HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, module_segment_data_t,
num_segments, ACCT_VMAREAS, PROTECTED);
if (os_segments != NULL) {
ASSERT(segments == NULL);
for (i = 0; i < num_segments; i++) {
copy->segments[i].start = os_segments[i].start;
copy->segments[i].end = os_segments[i].end;
copy->segments[i].prot = os_segments[i].prot;
copy->segments[i].offset = os_segments[i].offset;
}
} else {
ASSERT(segments != NULL);
if (segments != NULL)
memcpy(copy->segments, segments, num_segments*sizeof(module_segment_data_t));
}
copy->timestamp = timestamp;
# ifdef MACOS
copy->current_version = current_version;
copy->compatibility_version = compatibility_version;
memcpy(copy->uuid, uuid, sizeof(copy->uuid));
# endif
#endif
return copy;
}
module_data_t *
copy_module_area_to_module_data(const module_area_t *area)
{
if (area == NULL)
return NULL;
return create_and_initialize_module_data(area->start, area->end, area->entry_point,
0, &area->names, area->full_path
#ifdef WINDOWS
, area->os_data.file_version,
area->os_data.product_version,
area->os_data.checksum,
area->os_data.timestamp,
area->os_data.module_internal_size
#else
, area->os_data.contiguous,
area->os_data.num_segments,
area->os_data.segments,
NULL,
area->os_data.timestamp
# ifdef MACOS
, area->os_data.current_version,
area->os_data.compatibility_version,
area->os_data.uuid
# endif
#endif
);
}
DR_API
/* Makes a copy of a module_data_t for returning to the client. We return a copy so
* we don't have to hold the module areas list lock while in the client (xref PR 225020).
* Note - dr_data is allowed to be NULL. */
module_data_t *
dr_copy_module_data(const module_data_t *data)
{
if (data == NULL)
return NULL;
return create_and_initialize_module_data(data->start, data->end, data->entry_point,
0, &data->names, data->full_path
#ifdef WINDOWS
, data->file_version,
data->product_version,
data->checksum, data->timestamp,
data->module_internal_size
#else
, data->contiguous,
data->num_segments,
NULL,
data->segments,
data->timestamp
# ifdef MACOS
, data->current_version,
data->compatibility_version,
data->uuid
# endif
#endif
);
}
DR_API
/* Used to free a module_data_t created by dr_copy_module_data() */
void
dr_free_module_data(module_data_t *data)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (data == NULL)
return;
if (dcontext != NULL && data == dcontext->client_data->no_delete_mod_data) {
CLIENT_ASSERT(false, "dr_free_module_data: don\'t free module_data passed to "
"the image load or image unload event callbacks.");
return;
}
#ifdef UNIX
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, data->segments, module_segment_data_t,
data->num_segments, ACCT_VMAREAS, PROTECTED);
#endif
if (data->full_path != NULL)
dr_strfree(data->full_path HEAPACCT(ACCT_CLIENT));
free_module_names(&data->names HEAPACCT(ACCT_CLIENT));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, data, module_data_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
bool
dr_module_contains_addr(const module_data_t *data, app_pc addr)
{
/* XXX: this duplicates module_contains_addr(), but we have two different
* data structures (module_area_t and module_data_t) so it's hard to share.
*/
#ifdef WINDOWS
return (addr >= data->start && addr < data->end);
#else
if (data->contiguous)
return (addr >= data->start && addr < data->end);
else {
uint i;
for (i = 0; i < data->num_segments; i++) {
if (addr >= data->segments[i].start && addr < data->segments[i].end)
return true;
}
}
return false;
#endif
}
/* Looks up module containing pc (assumed to be fully loaded).
* If it exists and its client module load event has not been called, calls it.
*/
void
instrument_module_load_trigger(app_pc pc)
{
if (CLIENTS_EXIST()) {
module_area_t *ma;
module_data_t *client_data = NULL;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) {
/* switch to write lock */
os_get_module_info_unlock();
os_get_module_info_write_lock();
ma = module_pc_lookup(pc);
if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) {
ma->flags |= MODULE_LOAD_EVENT;
client_data = copy_module_area_to_module_data(ma);
os_get_module_info_write_unlock();
instrument_module_load(client_data, true/*i#884: already loaded*/);
dr_free_module_data(client_data);
} else
os_get_module_info_write_unlock();
} else
os_get_module_info_unlock();
}
}
/* Notify user when a module is loaded */
void
instrument_module_load(module_data_t *data, bool previously_loaded)
{
/* Note - during DR initialization this routine is called before we've set up a
* dcontext for the main thread and before we've called instrument_init. It's okay
* since there's no way a callback will be registered and we'll return immediately. */
dcontext_t *dcontext;
if (module_load_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_load_callbacks, int (*)(void *, module_data_t *, bool),
(void *)dcontext, data, previously_loaded);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* Notify user when a module is unloaded */
void
instrument_module_unload(module_data_t *data)
{
dcontext_t *dcontext;
if (module_unload_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_unload_callbacks, int (*)(void *, module_data_t *),
(void *)dcontext, data);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* returns whether this sysnum should be intercepted */
bool
instrument_filter_syscall(dcontext_t *dcontext, int sysnum)
{
bool ret = false;
/* if client does not filter then we don't intercept anything */
if (filter_syscall_callbacks.num == 0)
return ret;
/* if any client wants to intercept, then we intercept */
call_all_ret(ret, =, || ret, filter_syscall_callbacks, bool (*)(void *, int),
(void *)dcontext, sysnum);
return ret;
}
/* returns whether this syscall should execute */
bool
instrument_pre_syscall(dcontext_t *dcontext, int sysnum)
{
bool exec = true;
dcontext->client_data->in_pre_syscall = true;
/* clear flag from dr_syscall_invoke_another() */
dcontext->client_data->invoke_another_syscall = false;
if (pre_syscall_callbacks.num > 0) {
dr_where_am_i_t old_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
DODEBUG({
/* Avoid the common mistake of forgetting a filter event. */
CLIENT_ASSERT(filter_syscall_callbacks.num > 0, "A filter event must be "
"provided when using pre- and post-syscall events");
});
/* Skip syscall if any client wants to skip it, but don't short-circuit,
* as skipping syscalls is usually done when the effect of the syscall
* will be emulated in some other way. The app is typically meant to
* think that the syscall succeeded. Thus, other tool components
* should see the syscall as well (xref i#424).
*/
call_all_ret(exec, =, && exec, pre_syscall_callbacks,
bool (*)(void *, int), (void *)dcontext, sysnum);
dcontext->whereami = old_whereami;
}
dcontext->client_data->in_pre_syscall = false;
return exec;
}
void
instrument_post_syscall(dcontext_t *dcontext, int sysnum)
{
dr_where_am_i_t old_whereami = dcontext->whereami;
if (post_syscall_callbacks.num == 0)
return;
DODEBUG({
/* Avoid the common mistake of forgetting a filter event. */
CLIENT_ASSERT(filter_syscall_callbacks.num > 0, "A filter event must be "
"provided when using pre- and post-syscall events");
});
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
dcontext->client_data->in_post_syscall = true;
call_all(post_syscall_callbacks, int (*)(void *, int),
(void *)dcontext, sysnum);
dcontext->client_data->in_post_syscall = false;
dcontext->whereami = old_whereami;
}
bool
instrument_invoke_another_syscall(dcontext_t *dcontext)
{
return dcontext->client_data->invoke_another_syscall;
}
bool
instrument_kernel_xfer(dcontext_t *dcontext, dr_kernel_xfer_type_t type,
os_cxt_ptr_t source_os_cxt, dr_mcontext_t *source_dmc,
priv_mcontext_t *source_mc,
app_pc target_pc, reg_t target_xsp,
os_cxt_ptr_t target_os_cxt, priv_mcontext_t *target_mc,
int sig)
{
if (kernel_xfer_callbacks.num == 0) {
return false;
}
dr_kernel_xfer_info_t info;
info.type = type;
info.source_mcontext = NULL;
info.target_pc = target_pc;
info.target_xsp = target_xsp;
info.sig = sig;
dr_mcontext_t dr_mcontext;
dr_mcontext.size = sizeof(dr_mcontext);
dr_mcontext.flags = DR_MC_CONTROL | DR_MC_INTEGER;
if (source_dmc != NULL)
info.source_mcontext = source_dmc;
else if (source_mc != NULL) {
if (priv_mcontext_to_dr_mcontext(&dr_mcontext, source_mc))
info.source_mcontext = &dr_mcontext;
} else if (!is_os_cxt_ptr_null(source_os_cxt)) {
if (os_context_to_mcontext(&dr_mcontext, NULL, source_os_cxt))
info.source_mcontext = &dr_mcontext;
}
/* Our compromise to reduce context copying is to provide the PC and XSP inline,
* and only get more if the user calls dr_get_mcontext(), which we support again
* without any copying if not used by taking in a raw os_context_t.
*/
dcontext->client_data->os_cxt = target_os_cxt;
dcontext->client_data->cur_mc = target_mc;
call_all(kernel_xfer_callbacks, int (*)(void *, const dr_kernel_xfer_info_t *),
(void *)dcontext, &info);
set_os_cxt_ptr_null(&dcontext->client_data->os_cxt);
dcontext->client_data->cur_mc = NULL;
return true;
}
#ifdef WINDOWS
/* Notify user of exceptions. Note: not called for RaiseException */
bool
instrument_exception(dcontext_t *dcontext, dr_exception_t *exception)
{
bool res = true;
/* Ensure that dr_get_mcontext() called from instrument_kernel_xfer() from
* dr_redirect_execution() will get the source context.
* cur_mc will later be clobbered by instrument_kernel_xfer() which is ok:
* the redirect ends the callback calling.
*/
dcontext->client_data->cur_mc = dr_mcontext_as_priv_mcontext(exception->mcontext);
/* We short-circuit if any client wants to "own" the fault and not pass on.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own it (xref i#424).
*/
call_all_ret(res, = res &&, , exception_callbacks,
bool (*)(void *, dr_exception_t *),
(void *)dcontext, exception);
dcontext->client_data->cur_mc = NULL;
return res;
}
#else
dr_signal_action_t
instrument_signal(dcontext_t *dcontext, dr_siginfo_t *siginfo)
{
dr_signal_action_t ret = DR_SIGNAL_DELIVER;
/* We short-circuit if any client wants to do other than deliver to the app.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own the signal (xref i#424).
*/
call_all_ret(ret, = ret == DR_SIGNAL_DELIVER ? , : ret, signal_callbacks,
dr_signal_action_t (*)(void *, dr_siginfo_t *),
(void *)dcontext, siginfo);
return ret;
}
bool
dr_signal_hook_exists(void)
{
return (signal_callbacks.num > 0);
}
#endif /* WINDOWS */
#ifdef PROGRAM_SHEPHERDING
/* Notify user when a security violation is detected */
void
instrument_security_violation(dcontext_t *dcontext, app_pc target_pc,
security_violation_t violation, action_type_t *action)
{
dr_security_violation_type_t dr_violation;
dr_security_violation_action_t dr_action, dr_action_original;
app_pc source_pc = NULL;
fragment_t *last;
dr_mcontext_t dr_mcontext;
dr_mcontext_init(&dr_mcontext);
if (security_violation_callbacks.num == 0)
return;
if (!priv_mcontext_to_dr_mcontext(&dr_mcontext, get_mcontext(dcontext)))
return;
/* FIXME - the source_tag, source_pc, and context can all be incorrect if the
* violation ends up occurring in the middle of a bb we're building. See case
* 7380 which we should fix in interp.c.
*/
/* Obtain the source addr to pass to the client. xref case 285 --
* we're using the more heavy-weight solution 2) here, but that
* should be okay since we already have the overhead of calling
* into the client. */
last = dcontext->last_fragment;
if (!TEST(FRAG_FAKE, last->flags)) {
cache_pc pc = EXIT_CTI_PC(last, dcontext->last_exit);
source_pc = recreate_app_pc(dcontext, pc, last);
}
/* FIXME - set pc field of dr_mcontext_t. We'll probably want it
* for thread start and possibly apc/callback events as well.
*/
switch (violation) {
case STACK_EXECUTION_VIOLATION:
dr_violation = DR_RCO_STACK_VIOLATION;
break;
case HEAP_EXECUTION_VIOLATION:
dr_violation = DR_RCO_HEAP_VIOLATION;
break;
case RETURN_TARGET_VIOLATION:
dr_violation = DR_RCT_RETURN_VIOLATION;
break;
case RETURN_DIRECT_RCT_VIOLATION:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
case INDIRECT_CALL_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_CALL_VIOLATION;
break;
case INDIRECT_JUMP_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_JUMP_VIOLATION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
}
switch (*action) {
case ACTION_TERMINATE_PROCESS:
dr_action = DR_VIOLATION_ACTION_KILL_PROCESS;
break;
case ACTION_CONTINUE:
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
case ACTION_TERMINATE_THREAD:
dr_action = DR_VIOLATION_ACTION_KILL_THREAD;
break;
case ACTION_THROW_EXCEPTION:
dr_action = DR_VIOLATION_ACTION_THROW_EXCEPTION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
}
dr_action_original = dr_action;
/* NOTE - last->tag should be valid here (even if the frag is fake since the
* coarse wrappers set the tag). FIXME - for traces we really want the bb tag not
* the trace tag, should get that. Of course the only real reason we pass source
* tag is because we can't always give a valid source_pc. */
/* Note that the last registered function gets the final crack at
* changing the action.
*/
call_all(security_violation_callbacks,
int (*)(void *, void *, app_pc, app_pc, dr_security_violation_type_t,
dr_mcontext_t *, dr_security_violation_action_t *),
(void *)dcontext, last->tag, source_pc, target_pc,
dr_violation, &dr_mcontext, &dr_action);
if (dr_action != dr_action_original) {
switch (dr_action) {
case DR_VIOLATION_ACTION_KILL_PROCESS:
*action = ACTION_TERMINATE_PROCESS;
break;
case DR_VIOLATION_ACTION_KILL_THREAD:
*action = ACTION_TERMINATE_THREAD;
break;
case DR_VIOLATION_ACTION_THROW_EXCEPTION:
*action = ACTION_THROW_EXCEPTION;
break;
case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT:
/* FIXME - not safe to implement till case 7380 is fixed. */
CLIENT_ASSERT(false, "action DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT "
"not yet supported.");
/* note - no break, fall through */
case DR_VIOLATION_ACTION_CONTINUE:
*action = ACTION_CONTINUE;
break;
default:
CLIENT_ASSERT(false, "Security violation event callback returned invalid "
"action value.");
}
}
}
#endif
/* Notify the client of a nudge. */
void
instrument_nudge(dcontext_t *dcontext, client_id_t id, uint64 arg)
{
size_t i;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT &&
dcontext == get_thread_private_dcontext());
/* synch_with_all_threads and flush API assume that client nudge threads
* hold no dr locks and are !couldbelinking while in client lib code */
ASSERT_OWN_NO_LOCKS();
ASSERT(!is_couldbelinking(dcontext));
/* find the client the nudge is intended for */
for (i=0; i<num_client_libs; i++) {
/* until we have nudge-arg support (PR 477454), nudges target the 1st client */
if (IF_VMX86_ELSE(true, client_libs[i].id == id)) {
break;
}
}
if (i == num_client_libs || client_libs[i].nudge_callbacks.num == 0)
return;
#ifdef WINDOWS
/* count the number of nudge events so we can make sure they're
* all finished before exiting
*/
mutex_lock(&client_thread_count_lock);
if (block_client_nudge_threads) {
/* FIXME - would be nice if there was a way to let the external agent know that
* the nudge event wasn't delivered (but this only happens when the process
* is detaching or exiting). */
mutex_unlock(&client_thread_count_lock);
return;
}
/* atomic to avoid locking around the dec */
ATOMIC_INC(int, num_client_nudge_threads);
mutex_unlock(&client_thread_count_lock);
/* We need to mark this as a client controlled thread for synch_with_all_threads
* and otherwise treat it as native. Xref PR 230836 on what to do if this
* thread hits native_exec_syscalls hooks.
* XXX: this requires extra checks for "not a nudge thread" after IS_CLIENT_THREAD
* in get_stack_bounds() and instrument_thread_exit_event(): maybe better
* to have synchall checks do extra checks and have IS_CLIENT_THREAD be
* false for nudge threads at exit time?
*/
dcontext->client_data->is_client_thread = true;
dcontext->thread_record->under_dynamo_control = false;
#else
/* support calling dr_get_mcontext() on this thread. the app
* context should be intact in the current mcontext except
* pc which we set from next_tag.
*/
CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext,
"internal inconsistency in where mcontext is");
dcontext->client_data->mcontext_in_dcontext = true;
/* officially get_mcontext() doesn't always set pc: we do anyway */
get_mcontext(dcontext)->pc = dcontext->next_tag;
#endif
call_all(client_libs[i].nudge_callbacks, int (*)(void *, uint64),
(void *)dcontext, arg);
#ifdef UNIX
dcontext->client_data->mcontext_in_dcontext = false;
#else
dcontext->thread_record->under_dynamo_control = true;
dcontext->client_data->is_client_thread = false;
ATOMIC_DEC(int, num_client_nudge_threads);
#endif
}
int
get_num_client_threads(void)
{
int num = IF_WINDOWS_ELSE(num_client_nudge_threads, 0);
# ifdef CLIENT_SIDELINE
num += num_client_sideline_threads;
# endif
return num;
}
#ifdef WINDOWS
/* wait for all nudges to finish */
void
wait_for_outstanding_nudges()
{
/* block any new nudge threads from starting */
mutex_lock(&client_thread_count_lock);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
block_client_nudge_threads = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
DOLOG(1, LOG_TOP, {
if (num_client_nudge_threads > 0) {
LOG(GLOBAL, LOG_TOP, 1,
"Waiting for %d nudges to finish - app is about to kill all threads "
"except the current one.\n", num_client_nudge_threads);
}
});
/* don't wait if the client requested exit: after all the client might
* have done so from a nudge, and if the client does want to exit it's
* its own problem if it misses nudges (and external nudgers should use
* a finite timeout)
*/
if (client_requested_exit) {
mutex_unlock(&client_thread_count_lock);
return;
}
while (num_client_nudge_threads > 0) {
/* yield with lock released to allow nudges to finish */
mutex_unlock(&client_thread_count_lock);
dr_thread_yield();
mutex_lock(&client_thread_count_lock);
}
mutex_unlock(&client_thread_count_lock);
}
#endif /* WINDOWS */
/****************************************************************************/
/* EXPORTED ROUTINES */
DR_API
/* Creates a DR context that can be used in a standalone program.
* WARNING: this context cannot be used as the drcontext for a thread
* running under DR control! It is only for standalone programs that
* wish to use DR as a library of disassembly, etc. routines.
*/
void *
dr_standalone_init(void)
{
dcontext_t *dcontext = standalone_init();
return (void *) dcontext;
}
DR_API
/* Aborts the process immediately */
void
dr_abort(void)
{
if (TEST(DUMPCORE_DR_ABORT, dynamo_options.dumpcore_mask))
os_dump_core("dr_abort");
os_terminate(NULL, TERMINATE_PROCESS);
}
DR_API
void
dr_exit_process(int exit_code)
{
dcontext_t *dcontext = get_thread_private_dcontext();
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
/* Prevent cleanup from waiting for nudges as this may be called
* from a nudge!
* Also suppress leak asserts, as it's hard to clean up from
* some situations (such as DrMem -crash_at_error).
*/
client_requested_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
#ifdef WINDOWS
if (dcontext != NULL && dcontext->nudge_target != NULL) {
/* we need to free the nudge thread stack which may involved
* switching stacks so we have the nudge thread invoke
* os_terminate for us
*/
nudge_thread_cleanup(dcontext, true/*kill process*/, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
#endif
if (!is_currently_on_dstack(dcontext)
IF_UNIX(&& !is_currently_on_sigaltstack(dcontext))) {
/* if on app stack or sigaltstack, avoid incorrect leak assert at exit */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_api_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* to keep properly nested */
}
os_terminate_with_code(dcontext, /* dcontext is required */
TERMINATE_CLEANUP|TERMINATE_PROCESS, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
DR_API
bool
dr_create_memory_dump(dr_memory_dump_spec_t *spec)
{
if (spec->size != sizeof(dr_memory_dump_spec_t))
return false;
#ifdef WINDOWS
if (TEST(DR_MEMORY_DUMP_LDMP, spec->flags))
return os_dump_core_live(spec->label, spec->ldmp_path, spec->ldmp_path_size);
#endif
return false;
}
DR_API
/* Returns true if all DynamoRIO caches are thread private. */
bool
dr_using_all_private_caches(void)
{
return !SHARED_FRAGMENTS_ENABLED();
}
DR_API
void
dr_request_synchronized_exit(void)
{
SYSLOG_INTERNAL_WARNING_ONCE("dr_request_synchronized_exit deprecated: "
"use dr_set_process_exit_behavior instead");
}
DR_API
void
dr_set_process_exit_behavior(dr_exit_flags_t flags)
{
if ((!DYNAMO_OPTION(multi_thread_exit) && TEST(DR_EXIT_MULTI_THREAD, flags)) ||
(DYNAMO_OPTION(multi_thread_exit) && !TEST(DR_EXIT_MULTI_THREAD, flags))) {
options_make_writable();
dynamo_options.multi_thread_exit = TEST(DR_EXIT_MULTI_THREAD, flags);
options_restore_readonly();
}
if ((!DYNAMO_OPTION(skip_thread_exit_at_exit) &&
TEST(DR_EXIT_SKIP_THREAD_EXIT, flags)) ||
(DYNAMO_OPTION(skip_thread_exit_at_exit) &&
!TEST(DR_EXIT_SKIP_THREAD_EXIT, flags))) {
options_make_writable();
dynamo_options.skip_thread_exit_at_exit = TEST(DR_EXIT_SKIP_THREAD_EXIT, flags);
options_restore_readonly();
}
}
void
dr_allow_unsafe_static_behavior(void)
{
loader_allow_unsafe_static_behavior();
}
DR_API
/* Returns the option string passed along with a client path via DR's
* -client_lib option.
*/
/* i#1736: we now token-delimit with quotes, but for backward compat we need to
* pass a version w/o quotes for dr_get_options().
*/
const char *
dr_get_options(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
/* If we already converted, pass the result */
if (client_libs[i].legacy_options[0] != '\0' ||
client_libs[i].options[0] == '\0')
return client_libs[i].legacy_options;
/* For backward compatibility, we need to remove the token-delimiting
* quotes. We tokenize, and then re-assemble the flat string.
* i#1755: however, for legacy custom frontends that are not re-quoting
* like drrun now is, we need to avoid removing any quotes from the
* original strings. We try to detect this by assuming a frontend will
* either re-quote everything or nothing. Ideally we would check all
* args, but that would require plumbing info from getword() or
* duplicating its functionality: so instead our heuristic is just checking
* the first and last chars.
*/
if (!char_is_quote(client_libs[i].options[0]) ||
/* Emptry string already detected above */
!char_is_quote(client_libs[i].options[strlen(client_libs[i].
options)-1])) {
/* At least one arg is not quoted => better use original */
snprintf(client_libs[i].legacy_options,
BUFFER_SIZE_ELEMENTS(client_libs[i].legacy_options),
"%s", client_libs[i].options);
} else {
int j;
size_t sofar = 0;
for (j = 1/*skip client lib*/; j < client_libs[i].argc; j++) {
if (!print_to_buffer(client_libs[i].legacy_options,
BUFFER_SIZE_ELEMENTS(client_libs[i].
legacy_options),
&sofar, "%s%s", (j == 1) ? "" : " ",
client_libs[i].argv[j]))
break;
}
}
NULL_TERMINATE_BUFFER(client_libs[i].legacy_options);
return client_libs[i].legacy_options;
}
}
CLIENT_ASSERT(false, "dr_get_options(): invalid client id");
return NULL;
}
DR_API
bool
dr_get_option_array(client_id_t id, int *argc OUT, const char ***argv OUT)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
*argc = client_libs[i].argc;
*argv = client_libs[i].argv;
return true;
}
}
CLIENT_ASSERT(false, "dr_get_option_array(): invalid client id");
return false;
}
DR_API
/* Returns the path to the client library. Client must pass its ID */
const char *
dr_get_client_path(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].path;
}
}
CLIENT_ASSERT(false, "dr_get_client_path(): invalid client id");
return NULL;
}
DR_API
byte *
dr_get_client_base(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].start;
}
}
CLIENT_ASSERT(false, "dr_get_client_base(): invalid client id");
return NULL;
}
DR_API
bool
dr_set_client_name(const char *name, const char *report_URL)
{
/* Although set_exception_strings() accepts NULL, clients should pass real vals. */
if (name == NULL || report_URL == NULL)
return false;
set_exception_strings(name, report_URL);
return true;
}
bool
dr_set_client_version_string(const char *version)
{
if (version == NULL)
return false;
set_display_version(version);
return true;
}
DR_API const char *
dr_get_application_name(void)
{
#ifdef UNIX
return get_application_short_name();
#else
return get_application_short_unqualified_name();
#endif
}
DR_API process_id_t
dr_get_process_id(void)
{
return (process_id_t) get_process_id();
}
#ifdef UNIX
DR_API
process_id_t
dr_get_parent_id(void)
{
return get_parent_id();
}
#endif
#ifdef WINDOWS
DR_API
process_id_t
dr_convert_handle_to_pid(HANDLE process_handle)
{
ASSERT(POINTER_MAX == INVALID_PROCESS_ID);
return process_id_from_handle(process_handle);
}
DR_API
HANDLE
dr_convert_pid_to_handle(process_id_t pid)
{
return process_handle_from_id(pid);
}
DR_API
/**
* Returns information about the version of the operating system.
* Returns whether successful.
*/
bool
dr_get_os_version(dr_os_version_info_t *info)
{
int ver;
uint sp_major, sp_minor;
get_os_version_ex(&ver, &sp_major, &sp_minor);
if (info->size > offsetof(dr_os_version_info_t, version)) {
switch (ver) {
case WINDOWS_VERSION_10_1709: info->version = DR_WINDOWS_VERSION_10_1709; break;
case WINDOWS_VERSION_10_1703: info->version = DR_WINDOWS_VERSION_10_1703; break;
case WINDOWS_VERSION_10_1607: info->version = DR_WINDOWS_VERSION_10_1607; break;
case WINDOWS_VERSION_10_1511: info->version = DR_WINDOWS_VERSION_10_1511; break;
case WINDOWS_VERSION_10: info->version = DR_WINDOWS_VERSION_10; break;
case WINDOWS_VERSION_8_1: info->version = DR_WINDOWS_VERSION_8_1; break;
case WINDOWS_VERSION_8: info->version = DR_WINDOWS_VERSION_8; break;
case WINDOWS_VERSION_7: info->version = DR_WINDOWS_VERSION_7; break;
case WINDOWS_VERSION_VISTA: info->version = DR_WINDOWS_VERSION_VISTA; break;
case WINDOWS_VERSION_2003: info->version = DR_WINDOWS_VERSION_2003; break;
case WINDOWS_VERSION_XP: info->version = DR_WINDOWS_VERSION_XP; break;
case WINDOWS_VERSION_2000: info->version = DR_WINDOWS_VERSION_2000; break;
case WINDOWS_VERSION_NT: info->version = DR_WINDOWS_VERSION_NT; break;
default: CLIENT_ASSERT(false, "unsupported windows version");
};
} else
return false; /* struct too small for any info */
if (info->size > offsetof(dr_os_version_info_t, service_pack_major)) {
info->service_pack_major = sp_major;
if (info->size > offsetof(dr_os_version_info_t, service_pack_minor)) {
info->service_pack_minor = sp_minor;
}
}
return true;
}
DR_API
bool
dr_is_wow64(void)
{
return is_wow64_process(NT_CURRENT_PROCESS);
}
DR_API
void *
dr_get_app_PEB(void)
{
return get_own_peb();
}
#endif
DR_API
/* Retrieves the current time */
void
dr_get_time(dr_time_t *time)
{
convert_millis_to_date(query_time_millis(), time);
}
DR_API
uint64
dr_get_milliseconds(void)
{
return query_time_millis();
}
DR_API
uint64
dr_get_microseconds(void)
{
return query_time_micros();
}
DR_API
uint
dr_get_random_value(uint max)
{
return (uint) get_random_offset(max);
}
DR_API
void
dr_set_random_seed(uint seed)
{
set_random_seed(seed);
}
DR_API
uint
dr_get_random_seed(void)
{
return get_random_seed();
}
/***************************************************************************
* MEMORY ALLOCATION
*
* XXX i#774: once we split vmheap from vmcode, we need to make
* dr_thread_alloc(), dr_global_alloc(), and dr_nonheap_alloc()
* all allocate vmcode-reachable memory. Library-redirected
* allocations do not need to be reachable.
*/
DR_API
/* Allocates memory from DR's memory pool specific to the
* thread associated with drcontext.
*/
void *
dr_thread_alloc(void *drcontext, size_t size)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
return heap_alloc(dcontext, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees thread-specific memory allocated by dr_thread_alloc.
* size must be the same size passed to dr_thread_alloc.
*/
void
dr_thread_free(void *drcontext, void *mem, size_t size)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_thread_free: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_thread_free: drcontext is invalid");
heap_free(dcontext, mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Allocates memory from DR's global memory pool.
*/
void *
dr_global_alloc(size_t size)
{
return global_heap_alloc(size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees memory allocated by dr_global_alloc.
* size must be the same size passed to dr_global_alloc.
*/
void
dr_global_free(void *mem, size_t size)
{
global_heap_free(mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* PR 352427: API routine to allocate executable memory */
void *
dr_nonheap_alloc(size_t size, uint prot)
{
return heap_mmap_ex(size, size, prot, false/*no guard pages*/, VMM_SPECIAL_MMAP);
}
DR_API
void
dr_nonheap_free(void *mem, size_t size)
{
heap_munmap_ex(mem, size, false/*no guard pages*/, VMM_SPECIAL_MMAP);
}
static void *
raw_mem_alloc(size_t size, uint prot, void *addr, dr_alloc_flags_t flags)
{
byte *p;
heap_error_code_t error_code;
CLIENT_ASSERT(ALIGNED(addr, PAGE_SIZE), "addr is not page size aligned");
if (!TEST(DR_ALLOC_NON_DR, flags)) {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
addr = (void *)ALIGN_BACKWARD(addr, PAGE_SIZE);
size = ALIGN_FORWARD(size, PAGE_SIZE);
#ifdef WINDOWS
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"cannot combine commit-only and low-2GB");
p = os_heap_reserve_in_region(NULL, (byte *)(ptr_uint_t)0x80000000, size,
&error_code, TEST(DR_MEMPROT_EXEC, flags));
if (p != NULL && !TEST(DR_ALLOC_RESERVE_ONLY, flags)) {
if (!os_heap_commit(p, size, prot, &error_code)) {
os_heap_free(p, size, &error_code);
p = NULL;
}
}
} else
#endif
{
/* We specify that DR_ALLOC_LOW_2GB only applies to x64, so it's
* ok that the Linux kernel will ignore MAP_32BIT for 32-bit.
*/
#ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
#else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY :
(TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
#endif
if (IF_WINDOWS(TEST(DR_ALLOC_COMMIT_ONLY, flags) &&)
addr != NULL &&
!app_memory_pre_alloc(get_thread_private_dcontext(), addr, size, prot,
false))
p = NULL;
else
p = os_raw_mem_alloc(addr, size, prot, os_flags, &error_code);
}
if (p != NULL) {
if (TEST(DR_ALLOC_NON_DR, flags)) {
all_memory_areas_lock();
update_all_memory_areas(p, p+size, prot, DR_MEMTYPE_DATA);
all_memory_areas_unlock();
} else {
/* this routine updates allmem for us: */
add_dynamo_vm_area((app_pc)p, ((app_pc)p)+size, prot,
true _IF_DEBUG("fls cb in private lib"));
}
RSTATS_ADD_PEAK(client_raw_mmap_size, size);
}
if (!TEST(DR_ALLOC_NON_DR, flags))
dynamo_vm_areas_unlock();
return p;
}
static bool
raw_mem_free(void *addr, size_t size, dr_alloc_flags_t flags)
{
bool res;
heap_error_code_t error_code;
byte *p = addr;
#ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
#else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY :
(TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
#endif
size = ALIGN_FORWARD(size, PAGE_SIZE);
if (TEST(DR_ALLOC_NON_DR, flags)) {
/* use lock to avoid racy update on parallel memory allocation,
* e.g. allocation from another thread at p happens after os_heap_free
* but before remove_from_all_memory_areas
*/
all_memory_areas_lock();
} else {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
res = os_raw_mem_free(p, size, os_flags, &error_code);
if (TEST(DR_ALLOC_NON_DR, flags)) {
remove_from_all_memory_areas(p, p + size);
all_memory_areas_unlock();
} else {
/* this routine updates allmem for us: */
remove_dynamo_vm_area((app_pc)addr, ((app_pc)addr)+size);
}
if (!TEST(DR_ALLOC_NON_DR, flags))
dynamo_vm_areas_unlock();
if (res)
RSTATS_SUB(client_raw_mmap_size, size);
return res;
}
DR_API
void *
dr_raw_mem_alloc(size_t size, uint prot, void *addr)
{
return raw_mem_alloc(size, prot, addr, DR_ALLOC_NON_DR);
}
DR_API
bool
dr_raw_mem_free(void *addr, size_t size)
{
return raw_mem_free(addr, size, DR_ALLOC_NON_DR);
}
static void *
custom_memory_shared(bool alloc, void *drcontext, dr_alloc_flags_t flags, size_t size,
uint prot, void *addr, bool *free_res)
{
CLIENT_ASSERT(alloc || free_res != NULL, "must ask for free_res on free");
CLIENT_ASSERT(alloc || addr != NULL, "cannot free NULL");
CLIENT_ASSERT(!TESTALL(DR_ALLOC_NON_DR|DR_ALLOC_CACHE_REACHABLE, flags),
"dr_custom_alloc: cannot combine non-DR and cache-reachable");
CLIENT_ASSERT(!alloc || TEST(DR_ALLOC_FIXED_LOCATION, flags) || addr == NULL,
"dr_custom_alloc: address only honored for fixed location");
#ifdef WINDOWS
CLIENT_ASSERT(!TESTANY(DR_ALLOC_RESERVE_ONLY | DR_ALLOC_COMMIT_ONLY, flags) ||
TESTALL(DR_ALLOC_NON_HEAP|DR_ALLOC_NON_DR, flags),
"dr_custom_alloc: reserve/commit-only are only for non-DR non-heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_RESERVE_ONLY, flags) ||
!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: cannot combine reserve-only + commit-only");
#endif
if (TEST(DR_ALLOC_NON_HEAP, flags)) {
CLIENT_ASSERT(drcontext == NULL,
"dr_custom_alloc: drcontext must be NULL for non-heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_THREAD_PRIVATE, flags),
"dr_custom_alloc: non-heap cannot be thread-private");
CLIENT_ASSERT(!TESTALL(DR_ALLOC_CACHE_REACHABLE|DR_ALLOC_LOW_2GB, flags),
"dr_custom_alloc: cannot combine low-2GB and cache-reachable");
#ifdef WINDOWS
CLIENT_ASSERT(addr != NULL || !TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: commit-only requires non-NULL addr");
#endif
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
#ifdef WINDOWS
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: cannot combine commit-only and low-2GB");
#endif
CLIENT_ASSERT(!alloc || addr == NULL,
"dr_custom_alloc: cannot pass an addr with low-2GB");
/* Even if not non-DR, easier to allocate via raw */
if (alloc)
return raw_mem_alloc(size, prot, addr, flags);
else
*free_res = raw_mem_free(addr, size, flags);
} else if (TEST(DR_ALLOC_NON_DR, flags)) {
/* ok for addr to be NULL */
if (alloc)
return raw_mem_alloc(size, prot, addr, flags);
else
*free_res = raw_mem_free(addr, size, flags);
} else { /* including DR_ALLOC_CACHE_REACHABLE */
CLIENT_ASSERT(!alloc || !TEST(DR_ALLOC_CACHE_REACHABLE, flags) ||
addr == NULL,
"dr_custom_alloc: cannot ask for addr and cache-reachable");
/* This flag is here solely so we know which version of free to call */
if (TEST(DR_ALLOC_FIXED_LOCATION, flags)) {
CLIENT_ASSERT(addr != NULL,
"dr_custom_alloc: fixed location requires an address");
if (alloc)
return raw_mem_alloc(size, prot, addr, 0);
else
*free_res = raw_mem_free(addr, size, 0);
} else {
if (alloc)
return dr_nonheap_alloc(size, prot);
else {
*free_res = true;
dr_nonheap_free(addr, size);
}
}
}
} else {
if (!alloc)
*free_res = true;
CLIENT_ASSERT(!alloc || addr == NULL,
"dr_custom_alloc: cannot pass an addr for heap memory");
CLIENT_ASSERT(drcontext == NULL || TEST(DR_ALLOC_THREAD_PRIVATE, flags),
"dr_custom_alloc: drcontext must be NULL for global heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_LOW_2GB, flags),
"dr_custom_alloc: cannot ask for heap in low 2GB");
CLIENT_ASSERT(!TEST(DR_ALLOC_NON_DR, flags),
"dr_custom_alloc: cannot ask for non-DR heap memory");
/* for now it's all cache-reachable so we ignore DR_ALLOC_CACHE_REACHABLE */
if (TEST(DR_ALLOC_THREAD_PRIVATE, flags)) {
if (alloc)
return dr_thread_alloc(drcontext, size);
else
dr_thread_free(drcontext, addr, size);
} else {
if (alloc)
return dr_global_alloc(size);
else
dr_global_free(addr, size);
}
}
return NULL;
}
DR_API
void *
dr_custom_alloc(void *drcontext, dr_alloc_flags_t flags, size_t size,
uint prot, void *addr)
{
return custom_memory_shared(true, drcontext, flags, size, prot, addr, NULL);
}
DR_API
bool
dr_custom_free(void *drcontext, dr_alloc_flags_t flags, void *addr, size_t size)
{
bool res;
custom_memory_shared(false, drcontext, flags, size, 0, addr, &res);
return res;
}
#ifdef UNIX
DR_API
/* With ld's -wrap option, we can supply a replacement for malloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_malloc(size_t size)
{
return redirect_malloc(size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for realloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_realloc(void *mem, size_t size)
{
return redirect_realloc(mem, size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for calloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_calloc(size_t nmemb, size_t size)
{
return redirect_calloc(nmemb, size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for free. This
* routine frees memory allocated by __wrap_alloc and expects the
* allocation size to be available in the few bytes before 'mem'.
*/
void
__wrap_free(void *mem)
{
redirect_free(mem);
}
#endif
DR_API
bool
dr_memory_protect(void *base, size_t size, uint new_prot)
{
/* We do allow the client to modify DR memory, for allocating a
* region and later making it unwritable. We should probably
* allow modifying ntdll, since our general model is to trust the
* client and let it shoot itself in the foot, but that would require
* passing in extra args to app_memory_protection_change() to ignore
* the patch_proof_list: and maybe it is safer to disallow client
* from putting hooks in ntdll.
*/
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (!dynamo_vm_area_overlap(base, ((byte *)base) + size)) {
uint mod_prot = new_prot;
uint res = app_memory_protection_change(get_thread_private_dcontext(),
base, size, new_prot, &mod_prot, NULL);
if (res != DO_APP_MEM_PROT_CHANGE) {
if (res == FAIL_APP_MEM_PROT_CHANGE ||
res == PRETEND_APP_MEM_PROT_CHANGE) {
return false;
} else {
/* SUBSET_APP_MEM_PROT_CHANGE should only happen for
* PROGRAM_SHEPHERDING. FIXME: not sure how common
* this will be: for now we just fail.
*/
return false;
}
}
CLIENT_ASSERT(mod_prot == new_prot, "internal error on dr_memory_protect()");
}
return set_protection(base, size, new_prot);
}
DR_API
size_t
dr_page_size(void)
{
return os_page_size();
}
DR_API
/* checks to see that all bytes with addresses from pc to pc+size-1
* are readable and that reading from there won't generate an exception.
*/
bool
dr_memory_is_readable(const byte *pc, size_t size)
{
return is_readable_without_exception(pc, size);
}
DR_API
/* OS neutral memory query for clients, just wrapper around our get_memory_info(). */
bool
dr_query_memory(const byte *pc, byte **base_pc, size_t *size, uint *prot)
{
uint real_prot;
bool res;
#if defined(UNIX) && defined(HAVE_MEMINFO)
/* xref PR 246897 - the cached all memory list can have problems when
* out-of-process entities change the mapings. For now we use the from
* os version instead (even though it's slower, and only if we have
* HAVE_MEMINFO_MAPS support). FIXME
* XXX i#853: We could decide allmem vs os with the use_all_memory_areas
* option.
*/
res = get_memory_info_from_os(pc, base_pc, size, &real_prot);
#else
res = get_memory_info(pc, base_pc, size, &real_prot);
#endif
if (prot != NULL) {
if (is_pretend_or_executable_writable((app_pc)pc)) {
/* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod
* as executable-but-writable and we'll come here.
*/
real_prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE;
}
*prot = real_prot;
}
return res;
}
DR_API
bool
dr_query_memory_ex(const byte *pc, OUT dr_mem_info_t *info)
{
bool res;
#if defined(UNIX) && defined(HAVE_MEMINFO)
/* PR 246897: all_memory_areas not ready for prime time */
res = query_memory_ex_from_os(pc, info);
#else
res = query_memory_ex(pc, info);
#endif
if (is_pretend_or_executable_writable((app_pc)pc)) {
/* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod
* as executable-but-writable and we'll come here.
*/
info->prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE;
}
return res;
}
DR_API
/* Wrapper around our safe_read. Xref P4 198875, placeholder till we have try/except */
bool
dr_safe_read(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
return safe_read_ex(base, size, out_buf, bytes_read);
}
DR_API
/* Wrapper around our safe_write. Xref P4 198875, placeholder till we have try/except */
bool
dr_safe_write(void *base, size_t size, const void *in_buf, size_t *bytes_written)
{
return safe_write_ex(base, size, in_buf, bytes_written);
}
DR_API
void
dr_try_setup(void *drcontext, void **try_cxt)
{
/* Yes we're duplicating the code from the TRY() macro but this
* provides better abstraction and lets us change our impl later
* vs exposing that macro
*/
dcontext_t *dcontext = (dcontext_t *) drcontext;
try_except_context_t *try_state;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext());
ASSERT(try_cxt != NULL);
/* We allocate on the heap to avoid having to expose the try_except_context_t
* and dr_jmp_buf_t structs and be tied to their exact layouts.
* The client is likely to allocate memory inside the try anyway
* if doing a decode or something.
*/
try_state = (try_except_context_t *)
HEAP_TYPE_ALLOC(dcontext, try_except_context_t, ACCT_CLIENT, PROTECTED);
*try_cxt = try_state;
try_state->prev_context = dcontext->try_except.try_except_state;
dcontext->try_except.try_except_state = try_state;
}
/* dr_try_start() is in x86.asm since we can't have an extra frame that's
* going to be torn down between the longjmp and the restore point
*/
DR_API
void
dr_try_stop(void *drcontext, void *try_cxt)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
try_except_context_t *try_state = (try_except_context_t *) try_cxt;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext());
ASSERT(try_state != NULL);
POP_TRY_BLOCK(&dcontext->try_except, *try_state);
HEAP_TYPE_FREE(dcontext, try_state, try_except_context_t, ACCT_CLIENT, PROTECTED);
}
DR_API
bool
dr_memory_is_dr_internal(const byte *pc)
{
return is_dynamo_address((app_pc)pc);
}
DR_API
bool
dr_memory_is_in_client(const byte *pc)
{
return is_in_client_lib((app_pc)pc);
}
void
instrument_client_lib_loaded(byte *start, byte *end)
{
/* i#852: include Extensions as they are really part of the clients and
* aren't like other private libs.
* XXX: we only avoid having the client libs on here b/c they're specified via
* full path and don't go through the loaders' locate routines.
* Not a big deal if they do end up on here: if they always did we could
* remove the linear walk in is_in_client_lib().
*/
/* called prior to instrument_init() */
init_client_aux_libs();
vmvector_add(client_aux_libs, start, end, NULL/*not an auxlib*/);
}
void
instrument_client_lib_unloaded(byte *start, byte *end)
{
/* called after instrument_exit() */
if (client_aux_libs != NULL)
vmvector_remove(client_aux_libs, start, end);
}
/**************************************************
* CLIENT AUXILIARY LIBRARIES
*/
DR_API
dr_auxlib_handle_t
dr_load_aux_library(const char *name,
byte **lib_start /*OPTIONAL OUT*/,
byte **lib_end /*OPTIONAL OUT*/)
{
byte *start, *end;
dr_auxlib_handle_t lib = load_shared_library(name, true/*reachable*/);
if (shared_library_bounds(lib, NULL, name, &start, &end)) {
/* be sure to replace b/c i#852 now adds during load w/ empty data */
vmvector_add_replace(client_aux_libs, start, end, (void*)lib);
if (lib_start != NULL)
*lib_start = start;
if (lib_end != NULL)
*lib_end = end;
all_memory_areas_lock();
update_all_memory_areas(start, end,
/* XXX: see comment in instrument_init()
* on walking the sections and what prot to use
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
} else {
unload_shared_library(lib);
lib = NULL;
}
return lib;
}
DR_API
dr_auxlib_routine_ptr_t
dr_lookup_aux_library_routine(dr_auxlib_handle_t lib, const char *name)
{
if (lib == NULL)
return NULL;
return lookup_library_routine(lib, name);
}
DR_API
bool
dr_unload_aux_library(dr_auxlib_handle_t lib)
{
byte *start = NULL, *end = NULL;
/* unfortunately on linux w/ dlopen we cannot find the bounds w/o
* either the path or an address so we iterate.
* once we have our private loader we shouldn't need this:
* XXX i#157
*/
vmvector_iterator_t vmvi;
dr_auxlib_handle_t found = NULL;
if (lib == NULL)
return false;
vmvector_iterator_start(client_aux_libs, &vmvi);
while (vmvector_iterator_hasnext(&vmvi)) {
found = (dr_auxlib_handle_t) vmvector_iterator_next(&vmvi, &start, &end);
if (found == lib)
break;
}
vmvector_iterator_stop(&vmvi);
if (found == lib) {
CLIENT_ASSERT(start != NULL && start < end, "logic error");
vmvector_remove(client_aux_libs, start, end);
unload_shared_library(lib);
all_memory_areas_lock();
update_all_memory_areas(start, end, MEMPROT_NONE, DR_MEMTYPE_FREE);
all_memory_areas_unlock();
return true;
} else {
CLIENT_ASSERT(false, "invalid aux lib");
return false;
}
}
#if defined(WINDOWS) && !defined(X64)
/* XXX i#1633: these routines all have 64-bit handle and routine types for
* handling win8's high ntdll64 in the future. For now the implementation
* treats them as 32-bit types and we do not support win8+.
*/
DR_API
dr_auxlib64_handle_t
dr_load_aux_x64_library(const char *name)
{
HANDLE h;
/* We use the x64 system loader. We assume that x64 state is fine being
* interrupted at arbitrary points during x86 execution, and that there
* is little risk of transparency violations.
*/
/* load_library_64() is racy. We don't expect anyone else to load
* x64 libs, but another thread in this client could, so we
* serialize here.
*/
mutex_lock(&client_aux_lib64_lock);
/* XXX: if we switch to our private loader we'll need to add custom
* search support to look in 64-bit system dir
*/
/* XXX: I'd add to the client_aux_libs vector, but w/ the system loader
* loading this I don't know all the dependent libs it might load.
* Not bothering for now.
*/
h = load_library_64(name);
mutex_unlock(&client_aux_lib64_lock);
return (dr_auxlib64_handle_t) h;
}
DR_API
dr_auxlib64_routine_ptr_t
dr_lookup_aux_x64_library_routine(dr_auxlib64_handle_t lib, const char *name)
{
uint64 res = get_proc_address_64((uint64)lib, name);
return (dr_auxlib64_routine_ptr_t) res;
}
DR_API
bool
dr_unload_aux_x64_library(dr_auxlib64_handle_t lib)
{
bool res;
mutex_lock(&client_aux_lib64_lock);
res = free_library_64((HANDLE)(uint)lib); /* uint cast to avoid cl warning */
mutex_unlock(&client_aux_lib64_lock);
return res;
}
#endif
/***************************************************************************
* LOCKS
*/
DR_API
/* Initializes a mutex
*/
void *
dr_mutex_create(void)
{
void *mutex = (void *)HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, mutex_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_LOCK_FREE(*((mutex_t *) mutex), dr_client_mutex);
return mutex;
}
DR_API
/* Deletes mutex
*/
void
dr_mutex_destroy(void *mutex)
{
/* Delete mutex so locks_not_closed()==0 test in dynamo.c passes */
DELETE_LOCK(*((mutex_t *) mutex));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (mutex_t *)mutex, mutex_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
/* Locks mutex
*/
void
dr_mutex_lock(void *mutex)
{
dcontext_t *dcontext = get_thread_private_dcontext();
/* set client_grab_mutex so that we know to set client_thread_safe_for_synch
* around the actual wait for the lock */
if (IS_CLIENT_THREAD(dcontext)) {
dcontext->client_data->client_grab_mutex = mutex;
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
dcontext->client_data->mutex_count++;
}
mutex_lock((mutex_t *) mutex);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_grab_mutex = NULL;
}
DR_API
/* Unlocks mutex
*/
void
dr_mutex_unlock(void *mutex)
{
dcontext_t *dcontext = get_thread_private_dcontext();
mutex_unlock((mutex_t *) mutex);
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
if (IS_CLIENT_THREAD(dcontext)) {
CLIENT_ASSERT(dcontext->client_data->mutex_count > 0,
"internal client mutex nesting error");
dcontext->client_data->mutex_count--;
}
}
DR_API
/* Tries once to grab the lock, returns whether or not successful
*/
bool
dr_mutex_trylock(void *mutex)
{
bool success = false;
dcontext_t *dcontext = get_thread_private_dcontext();
/* set client_grab_mutex so that we know to set client_thread_safe_for_synch
* around the actual wait for the lock */
if (IS_CLIENT_THREAD(dcontext)) {
dcontext->client_data->client_grab_mutex = mutex;
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
dcontext->client_data->mutex_count++;
}
success = mutex_trylock((mutex_t *) mutex);
if (IS_CLIENT_THREAD(dcontext)) {
if (!success)
dcontext->client_data->mutex_count--;
dcontext->client_data->client_grab_mutex = NULL;
}
return success;
}
DR_API
bool
dr_mutex_self_owns(void *mutex)
{
return IF_DEBUG_ELSE(OWN_MUTEX((mutex_t *)mutex), true);
}
DR_API
bool
dr_mutex_mark_as_app(void *mutex)
{
mutex_t *lock = (mutex_t *) mutex;
mutex_mark_as_app(lock);
return true;
}
DR_API
void *
dr_rwlock_create(void)
{
void *rwlock = (void *) HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, read_write_lock_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_READWRITE_LOCK_FREE(*((read_write_lock_t *)rwlock), dr_client_mutex);
return rwlock;
}
DR_API
void
dr_rwlock_destroy(void *rwlock)
{
DELETE_READWRITE_LOCK(*((read_write_lock_t *) rwlock));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (read_write_lock_t *)rwlock, read_write_lock_t,
ACCT_CLIENT, UNPROTECTED);
}
DR_API
void
dr_rwlock_read_lock(void *rwlock)
{
read_lock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_read_unlock(void *rwlock)
{
read_unlock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_write_lock(void *rwlock)
{
write_lock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_write_unlock(void *rwlock)
{
write_unlock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_write_trylock(void *rwlock)
{
return write_trylock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_self_owns_write_lock(void *rwlock)
{
return self_owns_write_lock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_mark_as_app(void *rwlock)
{
read_write_lock_t *lock = (read_write_lock_t *) rwlock;
mutex_mark_as_app(&lock->lock);
return true;
}
DR_API
void *
dr_recurlock_create(void)
{
void *reclock = (void *) HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, recursive_lock_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_RECURSIVE_LOCK_FREE(*((recursive_lock_t *)reclock), dr_client_mutex);
return reclock;
}
DR_API
void
dr_recurlock_destroy(void *reclock)
{
DELETE_RECURSIVE_LOCK(*((recursive_lock_t *) reclock));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (recursive_lock_t *)reclock, recursive_lock_t,
ACCT_CLIENT, UNPROTECTED);
}
DR_API
void
dr_recurlock_lock(void *reclock)
{
acquire_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
void
dr_app_recurlock_lock(void *reclock, dr_mcontext_t *mc)
{
CLIENT_ASSERT(mc->flags == DR_MC_ALL,
"mcontext must be for DR_MC_ALL");
acquire_recursive_app_lock((recursive_lock_t *)reclock,
dr_mcontext_as_priv_mcontext(mc));
}
DR_API
void
dr_recurlock_unlock(void *reclock)
{
release_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_trylock(void *reclock)
{
return try_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_self_owns(void *reclock)
{
return self_owns_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_mark_as_app(void *reclock)
{
recursive_lock_t *lock = (recursive_lock_t *) reclock;
mutex_mark_as_app(&lock->lock);
return true;
}
DR_API
void *
dr_event_create(void)
{
return (void *)create_event();
}
DR_API
bool
dr_event_destroy(void *event)
{
destroy_event((event_t)event);
return true;
}
DR_API
bool
dr_event_wait(void *event)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
wait_for_event((event_t)event, 0);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
return true;
}
DR_API
bool
dr_event_signal(void *event)
{
signal_event((event_t)event);
return true;
}
DR_API
bool
dr_event_reset(void *event)
{
reset_event((event_t)event);
return true;
}
DR_API
bool
dr_mark_safe_to_suspend(void *drcontext, bool enter)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
ASSERT_OWN_NO_LOCKS();
/* We need to return so we can't call check_wait_at_safe_spot().
* We don't set mcontext b/c noone should examine it.
*/
if (enter)
set_synch_state(dcontext, THREAD_SYNCH_NO_LOCKS_NO_XFER);
else
set_synch_state(dcontext, THREAD_SYNCH_NONE);
return true;
}
DR_API
int
dr_atomic_add32_return_sum(volatile int *x, int val)
{
return atomic_add_exchange_int(x, val);
}
/***************************************************************************
* MODULES
*/
DR_API
/* Looks up the module data containing pc. Returns NULL if not found.
* Returned module_data_t must be freed with dr_free_module_data(). */
module_data_t *
dr_lookup_module(byte *pc)
{
module_area_t *area;
module_data_t *client_data;
os_get_module_info_lock();
area = module_pc_lookup(pc);
client_data = copy_module_area_to_module_data(area);
os_get_module_info_unlock();
return client_data;
}
DR_API
module_data_t *
dr_get_main_module(void)
{
return dr_lookup_module(get_image_entry());
}
DR_API
/* Looks up the module with name matching name (ignoring case). Returns NULL if not
* found. Returned module_data_t must be freed with dr_free_module_data(). */
module_data_t *
dr_lookup_module_by_name(const char *name)
{
/* We have no quick way of doing this since our module list is indexed by pc. We
* could use get_module_handle() but that's dangerous to call at arbitrary times,
* so we just walk our full list here. */
module_iterator_t *mi = module_iterator_start();
CLIENT_ASSERT((name != NULL), "dr_lookup_module_info_by_name: null name");
while (module_iterator_hasnext(mi)) {
module_area_t *area = module_iterator_next(mi);
module_data_t *client_data;
const char *modname = GET_MODULE_NAME(&area->names);
if (modname != NULL && strcasecmp(modname, name) == 0) {
client_data = copy_module_area_to_module_data(area);
module_iterator_stop(mi);
return client_data;
}
}
module_iterator_stop(mi);
return NULL;
}
typedef struct _client_mod_iterator_list_t {
module_data_t *info;
struct _client_mod_iterator_list_t *next;
} client_mod_iterator_list_t;
typedef struct {
client_mod_iterator_list_t *current;
client_mod_iterator_list_t *full_list;
} client_mod_iterator_t;
DR_API
/* Initialize a new client module iterator. */
dr_module_iterator_t *
dr_module_iterator_start(void)
{
client_mod_iterator_t *client_iterator = (client_mod_iterator_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_mod_iterator_t, ACCT_CLIENT, UNPROTECTED);
module_iterator_t *dr_iterator = module_iterator_start();
memset(client_iterator, 0, sizeof(*client_iterator));
while (module_iterator_hasnext(dr_iterator)) {
module_area_t *area = module_iterator_next(dr_iterator);
client_mod_iterator_list_t *list = (client_mod_iterator_list_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_mod_iterator_list_t, ACCT_CLIENT,
UNPROTECTED);
ASSERT(area != NULL);
list->info = copy_module_area_to_module_data(area);
list->next = NULL;
if (client_iterator->current == NULL) {
client_iterator->current = list;
client_iterator->full_list = client_iterator->current;
} else {
client_iterator->current->next = list;
client_iterator->current = client_iterator->current->next;
}
}
module_iterator_stop(dr_iterator);
client_iterator->current = client_iterator->full_list;
return (dr_module_iterator_t)client_iterator;
}
DR_API
/* Returns true if there is another loaded module in the iterator. */
bool
dr_module_iterator_hasnext(dr_module_iterator_t *mi)
{
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_hasnext: null iterator");
return ((client_mod_iterator_t *)mi)->current != NULL;
}
DR_API
/* Retrieves the module_data_t for the next loaded module in the iterator. */
module_data_t *
dr_module_iterator_next(dr_module_iterator_t *mi)
{
module_data_t *data;
client_mod_iterator_t *ci = (client_mod_iterator_t *)mi;
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_next: null iterator");
CLIENT_ASSERT((ci->current != NULL), "dr_module_iterator_next: has no next, use "
"dr_module_iterator_hasnext() first");
if (ci->current == NULL)
return NULL;
data = ci->current->info;
ci->current = ci->current->next;
return data;
}
DR_API
/* Free the module iterator. */
void
dr_module_iterator_stop(dr_module_iterator_t *mi)
{
client_mod_iterator_t *ci = (client_mod_iterator_t *)mi;
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_stop: null iterator");
/* free module_data_t's we didn't give to the client */
while (ci->current != NULL) {
dr_free_module_data(ci->current->info);
ci->current = ci->current->next;
}
ci->current = ci->full_list;
while (ci->current != NULL) {
client_mod_iterator_list_t *next = ci->current->next;
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci->current, client_mod_iterator_list_t,
ACCT_CLIENT, UNPROTECTED);
ci->current = next;
}
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci, client_mod_iterator_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
/* Get the name dr uses for this module. */
const char *
dr_module_preferred_name(const module_data_t *data)
{
if (data == NULL)
return NULL;
return GET_MODULE_NAME(&data->names);
}
#ifdef WINDOWS
DR_API
/* If pc is within a section of module lib returns true and (optionally) a copy of
* the IMAGE_SECTION_HEADER in section_out. If pc is not within a section of the
* module mod return false. */
bool
dr_lookup_module_section(module_handle_t lib, byte *pc, IMAGE_SECTION_HEADER *section_out)
{
CLIENT_ASSERT((lib != NULL), "dr_lookup_module_section: null module_handle_t");
return module_pc_section_lookup((app_pc)lib, pc, section_out);
}
#endif
/* i#805: Instead of exposing multiple instruction levels, we expose a way for
* clients to turn off instrumentation. Then DR can avoid a full decode and we
* can save some time on modules that are not interesting.
* XXX: This breaks other clients and extensions, in particular drwrap, which
* can miss call and return sites in the uninstrumented module.
*/
DR_API
bool
dr_module_set_should_instrument(module_handle_t handle, bool should_instrument)
{
module_area_t *ma;
DEBUG_DECLARE(dcontext_t *dcontext = get_thread_private_dcontext());
IF_DEBUG(executable_areas_lock());
os_get_module_info_write_lock();
ma = module_pc_lookup((byte*)handle);
if (ma != NULL) {
/* This kind of obviates the need for handle, but it makes the API more
* explicit.
*/
CLIENT_ASSERT(dcontext->client_data->no_delete_mod_data->handle == handle,
"Do not call dr_module_set_should_instrument() outside "
"of the module's own load event");
ASSERT(!executable_vm_area_executed_from(ma->start, ma->end));
if (should_instrument) {
ma->flags &= ~MODULE_NULL_INSTRUMENT;
} else {
ma->flags |= MODULE_NULL_INSTRUMENT;
}
}
os_get_module_info_write_unlock();
IF_DEBUG(executable_areas_unlock());
return (ma != NULL);
}
DR_API
bool
dr_module_should_instrument(module_handle_t handle)
{
bool should_instrument = true;
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup((byte*)handle);
CLIENT_ASSERT(ma != NULL, "invalid module handle");
if (ma != NULL) {
should_instrument = !TEST(MODULE_NULL_INSTRUMENT, ma->flags);
}
os_get_module_info_unlock();
return should_instrument;
}
DR_API
/* Returns the entry point of the function with the given name in the module
* with the given handle.
* We're not taking in module_data_t to make it simpler for the client
* to iterate or lookup the module_data_t, store the single-field
* handle, and then free the data right away: besides, module_data_t
* is not an opaque type.
*/
generic_func_t
dr_get_proc_address(module_handle_t lib, const char *name)
{
#ifdef WINDOWS
return get_proc_address_resolve_forward(lib, name);
#else
return get_proc_address(lib, name);
#endif
}
DR_API
bool
dr_get_proc_address_ex(module_handle_t lib, const char *name,
dr_export_info_t *info OUT, size_t info_len)
{
/* If we add new fields we'll check various values of info_len */
if (info == NULL || info_len < sizeof(*info))
return false;
#ifdef WINDOWS
info->address = get_proc_address_resolve_forward(lib, name);
info->is_indirect_code = false;
#else
info->address = get_proc_address_ex(lib, name, &info->is_indirect_code);
#endif
return (info->address != NULL);
}
byte *
dr_map_executable_file(const char *filename, dr_map_executable_flags_t flags,
size_t *size OUT)
{
#ifdef MACOS
/* XXX i#1285: implement private loader on Mac */
return NULL;
#else
modload_flags_t mflags = MODLOAD_NOT_PRIVLIB;
if (TEST(DR_MAPEXE_SKIP_WRITABLE, flags))
mflags |= MODLOAD_SKIP_WRITABLE;
if (filename == NULL)
return NULL;
return privload_map_and_relocate(filename, size, mflags);
#endif
}
bool
dr_unmap_executable_file(byte *base, size_t size)
{
return unmap_file(base, size);
}
DR_API
/* Creates a new directory. Fails if the directory already exists
* or if it can't be created.
*/
bool
dr_create_dir(const char *fname)
{
return os_create_dir(fname, CREATE_DIR_REQUIRE_NEW);
}
DR_API
bool
dr_delete_dir(const char *fname)
{
return os_delete_dir(fname);
}
DR_API
bool
dr_get_current_directory(char *buf, size_t bufsz)
{
return os_get_current_dir(buf, bufsz);
}
DR_API
/* Checks existence of a directory. */
bool
dr_directory_exists(const char *fname)
{
return os_file_exists(fname, true);
}
DR_API
/* Checks for the existence of a file. */
bool
dr_file_exists(const char *fname)
{
return os_file_exists(fname, false);
}
DR_API
/* Opens a file in the mode specified by mode_flags.
* Returns INVALID_FILE if unsuccessful
*/
file_t
dr_open_file(const char *fname, uint mode_flags)
{
uint flags = 0;
if (TEST(DR_FILE_WRITE_REQUIRE_NEW, mode_flags)) {
flags |= OS_OPEN_WRITE | OS_OPEN_REQUIRE_NEW;
}
if (TEST(DR_FILE_WRITE_APPEND, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE | OS_OPEN_APPEND;
}
if (TEST(DR_FILE_WRITE_OVERWRITE, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE;
}
if (TEST(DR_FILE_WRITE_ONLY, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE_ONLY;
}
if (TEST(DR_FILE_READ, mode_flags))
flags |= OS_OPEN_READ;
CLIENT_ASSERT((flags != 0), "dr_open_file: no mode selected");
if (TEST(DR_FILE_ALLOW_LARGE, mode_flags))
flags |= OS_OPEN_ALLOW_LARGE;
if (TEST(DR_FILE_CLOSE_ON_FORK, mode_flags))
flags |= OS_OPEN_CLOSE_ON_FORK;
/* all client-opened files are protected */
return os_open_protected(fname, flags);
}
DR_API
/* Closes file f
*/
void
dr_close_file(file_t f)
{
/* all client-opened files are protected */
os_close_protected(f);
}
DR_API
/* Renames the file src to dst. */
bool
dr_rename_file(const char *src, const char *dst, bool replace)
{
return os_rename_file(src, dst, replace);
}
DR_API
/* Deletes a file. */
bool
dr_delete_file(const char *filename)
{
/* os_delete_mapped_file should be a superset of os_delete_file, so we use
* it.
*/
return os_delete_mapped_file(filename);
}
DR_API
/* Flushes any buffers for file f
*/
void
dr_flush_file(file_t f)
{
os_flush(f);
}
DR_API
/* Writes count bytes from buf to f.
* Returns the actual number written.
*/
ssize_t
dr_write_file(file_t f, const void *buf, size_t count)
{
#ifdef WINDOWS
if ((f == STDOUT || f == STDERR) && print_to_console)
return dr_write_to_console_varg(f == STDOUT, "%.*s", count, buf);
else
#endif
return os_write(f, buf, count);
}
DR_API
/* Reads up to count bytes from f into buf.
* Returns the actual number read.
*/
ssize_t
dr_read_file(file_t f, void *buf, size_t count)
{
return os_read(f, buf, count);
}
DR_API
/* sets the current file position for file f to offset bytes from the specified origin
* returns true if successful */
bool
dr_file_seek(file_t f, int64 offset, int origin)
{
CLIENT_ASSERT(origin == DR_SEEK_SET || origin == DR_SEEK_CUR || origin == DR_SEEK_END,
"dr_file_seek: invalid origin value");
return os_seek(f, offset, origin);
}
DR_API
/* gets the current file position for file f in bytes from start of file */
int64
dr_file_tell(file_t f)
{
return os_tell(f);
}
DR_API
file_t
dr_dup_file_handle(file_t f)
{
#ifdef UNIX
/* returns -1 on failure == INVALID_FILE */
return dup_syscall(f);
#else
HANDLE ht = INVALID_HANDLE_VALUE;
NTSTATUS res = duplicate_handle(NT_CURRENT_PROCESS, f, NT_CURRENT_PROCESS,
&ht, SYNCHRONIZE, 0,
DUPLICATE_SAME_ACCESS|DUPLICATE_SAME_ATTRIBUTES);
if (!NT_SUCCESS(res))
return INVALID_FILE;
else
return ht;
#endif
}
DR_API
bool
dr_file_size(file_t fd, OUT uint64 *size)
{
return os_get_file_size_by_handle(fd, size);
}
DR_API
void *
dr_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
uint flags)
{
return (void *)
map_file(f, size, offs, addr, prot,
(TEST(DR_MAP_PRIVATE, flags) ? MAP_FILE_COPY_ON_WRITE : 0) |
IF_WINDOWS((TEST(DR_MAP_IMAGE, flags) ? MAP_FILE_IMAGE : 0) |)
IF_UNIX((TEST(DR_MAP_FIXED, flags) ? MAP_FILE_FIXED : 0) |)
(TEST(DR_MAP_CACHE_REACHABLE, flags) ? MAP_FILE_REACHABLE : 0));
}
DR_API
bool
dr_unmap_file(void *map, size_t size)
{
dr_mem_info_t info;
CLIENT_ASSERT(ALIGNED(map, PAGE_SIZE),
"dr_unmap_file: map is not page aligned");
if (!dr_query_memory_ex(map, &info) /* fail to query */ ||
info.type == DR_MEMTYPE_FREE /* not mapped file */) {
CLIENT_ASSERT(false, "dr_unmap_file: incorrect file map");
return false;
}
#ifdef WINDOWS
/* On Windows, the whole file will be unmapped instead, so we adjust
* the bound to make sure vm_areas are updated correctly.
*/
map = info.base_pc;
if (info.type == DR_MEMTYPE_IMAGE) {
size = get_allocation_size(map, NULL);
} else
size = info.size;
#endif
return unmap_file((byte *) map, size);
}
DR_API
void
dr_log(void *drcontext, uint mask, uint level, const char *fmt, ...)
{
#ifdef DEBUG
dcontext_t *dcontext = (dcontext_t *) drcontext;
va_list ap;
if (stats != NULL &&
((stats->logmask & mask) == 0 ||
stats->loglevel < level))
return;
va_start(ap, fmt);
if (dcontext != NULL)
do_file_write(dcontext->logfile, fmt, ap);
else
do_file_write(main_logfile, fmt, ap);
va_end(ap);
#else
return; /* no logging if not debug */
#endif
}
DR_API
/* Returns the log file for the drcontext thread.
* If drcontext is NULL, returns the main log file.
*/
file_t
dr_get_logfile(void *drcontext)
{
#ifdef DEBUG
dcontext_t *dcontext = (dcontext_t *) drcontext;
if (dcontext != NULL)
return dcontext->logfile;
else
return main_logfile;
#else
return INVALID_FILE;
#endif
}
DR_API
/* Returns true iff the -stderr_mask runtime option is non-zero, indicating
* that the user wants notification messages printed to stderr.
*/
bool
dr_is_notify_on(void)
{
return (dynamo_options.stderr_mask != 0);
}
#ifdef WINDOWS
DR_API file_t
dr_get_stdout_file(void)
{
return get_stdout_handle();
}
DR_API file_t
dr_get_stderr_file(void)
{
return get_stderr_handle();
}
DR_API file_t
dr_get_stdin_file(void)
{
return get_stdin_handle();
}
#endif
#ifdef PROGRAM_SHEPHERDING
DR_API void
dr_write_forensics_report(void *dcontext, file_t file,
dr_security_violation_type_t violation,
dr_security_violation_action_t action,
const char *violation_name)
{
security_violation_t sec_violation;
action_type_t sec_action;
switch (violation) {
case DR_RCO_STACK_VIOLATION:
sec_violation = STACK_EXECUTION_VIOLATION;
break;
case DR_RCO_HEAP_VIOLATION:
sec_violation = HEAP_EXECUTION_VIOLATION;
break;
case DR_RCT_RETURN_VIOLATION:
sec_violation = RETURN_TARGET_VIOLATION;
break;
case DR_RCT_INDIRECT_CALL_VIOLATION:
sec_violation = INDIRECT_CALL_RCT_VIOLATION;
break;
case DR_RCT_INDIRECT_JUMP_VIOLATION:
sec_violation = INDIRECT_JUMP_RCT_VIOLATION;
break;
default:
CLIENT_ASSERT(false, "dr_write_forensics_report does not support "
"DR_UNKNOWN_VIOLATION or invalid violation types");
return;
}
switch (action) {
case DR_VIOLATION_ACTION_KILL_PROCESS:
sec_action = ACTION_TERMINATE_PROCESS;
break;
case DR_VIOLATION_ACTION_CONTINUE:
case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT:
sec_action = ACTION_CONTINUE;
break;
case DR_VIOLATION_ACTION_KILL_THREAD:
sec_action = ACTION_TERMINATE_THREAD;
break;
case DR_VIOLATION_ACTION_THROW_EXCEPTION:
sec_action = ACTION_THROW_EXCEPTION;
break;
default:
CLIENT_ASSERT(false, "dr_write_forensics_report invalid action selection");
return;
}
/* FIXME - could use a better message. */
append_diagnostics(file, action_message[sec_action], violation_name, sec_violation);
}
#endif /* PROGRAM_SHEPHERDING */
#ifdef WINDOWS
DR_API void
dr_messagebox(const char *fmt, ...)
{
dcontext_t *dcontext = get_thread_private_dcontext();
char msg[MAX_LOG_LENGTH];
wchar_t wmsg[MAX_LOG_LENGTH];
va_list ap;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
va_start(ap, fmt);
vsnprintf(msg, BUFFER_SIZE_ELEMENTS(msg), fmt, ap);
NULL_TERMINATE_BUFFER(msg);
snwprintf(wmsg, BUFFER_SIZE_ELEMENTS(wmsg), L"%S", msg);
NULL_TERMINATE_BUFFER(wmsg);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
nt_messagebox(wmsg, debugbox_get_title());
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
va_end(ap);
}
static ssize_t
dr_write_to_console(bool to_stdout, const char *fmt, va_list ap)
{
bool res = true;
char msg[MAX_LOG_LENGTH];
uint written = 0;
int len;
HANDLE std;
CLIENT_ASSERT(dr_using_console(), "internal logic error");
ASSERT(priv_kernel32 != NULL &&
kernel32_WriteFile != NULL);
/* kernel32!GetStdHandle(STD_OUTPUT_HANDLE) == our PEB-based get_stdout_handle */
std = (to_stdout ? get_stdout_handle() : get_stderr_handle());
if (std == INVALID_HANDLE_VALUE)
return false;
len = vsnprintf(msg, BUFFER_SIZE_ELEMENTS(msg), fmt, ap);
/* Let user know if message was truncated */
if (len < 0 || len == BUFFER_SIZE_ELEMENTS(msg))
res = false;
NULL_TERMINATE_BUFFER(msg);
/* Make this routine work in all kinds of windows by going through
* kernel32!WriteFile, which will call WriteConsole for us.
*/
res = res &&
kernel32_WriteFile(std, msg, (DWORD) strlen(msg), (LPDWORD) &written, NULL);
return (res ? written : 0);
}
static ssize_t
dr_write_to_console_varg(bool to_stdout, const char *fmt, ...)
{
va_list ap;
ssize_t res;
va_start(ap, fmt);
res = dr_write_to_console(to_stdout, fmt, ap);
va_end(ap);
return res;
}
DR_API
bool
dr_using_console(void)
{
bool res;
if (get_os_version() >= WINDOWS_VERSION_8) {
FILE_FS_DEVICE_INFORMATION device_info;
HANDLE herr = get_stderr_handle();
/* The handle is invalid iff it's a gui app and the parent is a console */
if (herr == INVALID_HANDLE_VALUE) {
module_data_t *app_kernel32 = dr_lookup_module_by_name("kernel32.dll");
if (privload_attach_parent_console(app_kernel32->start) == false) {
dr_free_module_data(app_kernel32);
return false;
}
dr_free_module_data(app_kernel32);
herr = get_stderr_handle();
}
if (nt_query_volume_info(herr, &device_info, sizeof(device_info),
FileFsDeviceInformation) == STATUS_SUCCESS) {
if (device_info.DeviceType == FILE_DEVICE_CONSOLE)
return true;
}
return false;
}
/* We detect cmd window using what kernel32!WriteFile uses: a handle
* having certain bits set.
*/
res = (((ptr_int_t)get_stderr_handle() & 0x10000003) == 0x3);
CLIENT_ASSERT(!res || get_os_version() < WINDOWS_VERSION_8,
"Please report this: Windows 8 does have old-style consoles!");
return res;
}
DR_API
bool
dr_enable_console_printing(void)
{
bool success = false;
/* b/c private loader sets cxt sw code up front based on whether have windows
* priv libs or not, this can only be called during client init()
*/
if (dynamo_initialized) {
CLIENT_ASSERT(false, "dr_enable_console_printing() must be called during init");
return false;
}
/* Direct writes to std handles work on win8+ (xref i#911) but we don't need
* a separate check as the handle is detected as a non-console handle.
*/
if (!dr_using_console())
return true;
if (!INTERNAL_OPTION(private_loader))
return false;
if (!print_to_console) {
if (priv_kernel32 == NULL) {
/* Not using load_shared_library() b/c it won't search paths
* for us. XXX: should add os-shared interface for
* locate-and-load.
*/
priv_kernel32 = (shlib_handle_t)
locate_and_load_private_library("kernel32.dll", false/*!reachable*/);
}
if (priv_kernel32 != NULL && kernel32_WriteFile == NULL) {
module_data_t *app_kernel32 = dr_lookup_module_by_name("kernel32.dll");
kernel32_WriteFile = (kernel32_WriteFile_t)
lookup_library_routine(priv_kernel32, "WriteFile");
/* There is some problem in loading 32 bit kernel32.dll
* when 64 bit kernel32.dll is already loaded. If kernel32 is
* not loaded we can't call privload_console_share because it
* assumes kernel32 is loaded
*/
if (app_kernel32 == NULL) {
success = false;
} else {
success = privload_console_share(priv_kernel32, app_kernel32->start);
dr_free_module_data(app_kernel32);
}
}
/* We go ahead and cache whether dr_using_console(). If app really
* changes its console, client could call this routine again
* as a workaround. Seems unlikely: better to have better perf.
*/
print_to_console = (priv_kernel32 != NULL &&
kernel32_WriteFile != NULL && success);
}
return print_to_console;
}
#endif /* WINDOWS */
DR_API void
dr_printf(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
#ifdef WINDOWS
if (print_to_console)
dr_write_to_console(true/*stdout*/, fmt, ap);
else
#endif
do_file_write(STDOUT, fmt, ap);
va_end(ap);
}
DR_API ssize_t
dr_vfprintf(file_t f, const char *fmt, va_list ap)
{
ssize_t written;
#ifdef WINDOWS
if ((f == STDOUT || f == STDERR) && print_to_console) {
written = dr_write_to_console(f == STDOUT, fmt, ap);
if (written <= 0)
written = -1;
} else
#endif
written = do_file_write(f, fmt, ap);
return written;
}
DR_API ssize_t
dr_fprintf(file_t f, const char *fmt, ...)
{
va_list ap;
ssize_t res;
va_start(ap, fmt);
res = dr_vfprintf(f, fmt, ap);
va_end(ap);
return res;
}
DR_API int
dr_snprintf(char *buf, size_t max, const char *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
/* PR 219380: we use our_vsnprintf instead of ntdll._vsnprintf b/c the
* latter does not support floating point.
* Plus, our_vsnprintf returns -1 for > max chars (matching Windows
* behavior, but which Linux libc version does not do).
*/
res = our_vsnprintf(buf, max, fmt, ap);
va_end(ap);
return res;
}
DR_API int
dr_vsnprintf(char *buf, size_t max, const char *fmt, va_list ap)
{
return our_vsnprintf(buf, max, fmt, ap);
}
DR_API int
dr_snwprintf(wchar_t *buf, size_t max, const wchar_t *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
res = our_vsnprintf_wide(buf, max, fmt, ap);
va_end(ap);
return res;
}
DR_API int
dr_vsnwprintf(wchar_t *buf, size_t max, const wchar_t *fmt, va_list ap)
{
return our_vsnprintf_wide(buf, max, fmt, ap);
}
DR_API int
dr_sscanf(const char *str, const char *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
res = our_vsscanf(str, fmt, ap);
va_end(ap);
return res;
}
DR_API const char *
dr_get_token(const char *str, char *buf, size_t buflen)
{
/* We don't indicate whether any truncation happened. The
* reasoning is that this is meant to be used on a string of known
* size ahead of time, so the max size for any one token is known.
*/
const char *pos = str;
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(buflen), "buflen too large");
if (parse_word(str, &pos, buf, (uint)buflen) == NULL)
return NULL;
else
return pos;
}
DR_API void
dr_print_instr(void *drcontext, file_t f, instr_t *instr, const char *msg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_print_instr: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT || standalone_library,
"dr_print_instr: drcontext is invalid");
dr_fprintf(f, "%s "PFX" ", msg, instr_get_translation(instr));
instr_disassemble(dcontext, instr, f);
dr_fprintf(f, "\n");
}
DR_API void
dr_print_opnd(void *drcontext, file_t f, opnd_t opnd, const char *msg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_print_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT || standalone_library,
"dr_print_opnd: drcontext is invalid");
dr_fprintf(f, "%s ", msg);
opnd_disassemble(dcontext, opnd, f);
dr_fprintf(f, "\n");
}
/***************************************************************************
* Thread support
*/
DR_API
/* Returns the DR context of the current thread */
void *
dr_get_current_drcontext(void)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
return (void *) dcontext;
}
DR_API thread_id_t
dr_get_thread_id(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_thread_id: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_thread_id: drcontext is invalid");
return dcontext->owning_thread;
}
#ifdef WINDOWS
/* Added for DrMem i#1254 */
DR_API HANDLE
dr_get_dr_thread_handle(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_thread_id: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_thread_id: drcontext is invalid");
return dcontext->thread_record->handle;
}
#endif
DR_API void *
dr_get_tls_field(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_tls_field: drcontext is invalid");
return dcontext->client_data->user_field;
}
DR_API void
dr_set_tls_field(void *drcontext, void *value)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_set_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_set_tls_field: drcontext is invalid");
dcontext->client_data->user_field = value;
}
DR_API void *
dr_get_dr_segment_base(IN reg_id_t seg)
{
#ifdef AARCHXX
if (seg == dr_reg_stolen)
return os_get_dr_tls_base(get_thread_private_dcontext());
else
return NULL;
#else
return get_segment_base(seg);
#endif
}
DR_API
bool
dr_raw_tls_calloc(OUT reg_id_t *tls_register,
OUT uint *offset,
IN uint num_slots,
IN uint alignment)
{
CLIENT_ASSERT(tls_register != NULL,
"dr_raw_tls_calloc: tls_register cannot be NULL");
CLIENT_ASSERT(offset != NULL,
"dr_raw_tls_calloc: offset cannot be NULL");
*tls_register = IF_X86_ELSE(SEG_TLS, dr_reg_stolen);
if (num_slots == 0)
return true;
return os_tls_calloc(offset, num_slots, alignment);
}
DR_API
bool
dr_raw_tls_cfree(uint offset, uint num_slots)
{
if (num_slots == 0)
return true;
return os_tls_cfree(offset, num_slots);
}
DR_API
opnd_t
dr_raw_tls_opnd(void *drcontext, reg_id_t tls_register, uint tls_offs)
{
CLIENT_ASSERT(drcontext != NULL, "dr_raw_tls_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_raw_tls_opnd: drcontext is invalid");
IF_X86_ELSE({
return opnd_create_far_base_disp_ex(tls_register, DR_REG_NULL, DR_REG_NULL,
0, tls_offs, OPSZ_PTR,
/* modern processors don't want addr16
* prefixes
*/
false, true, false);
}, {
return OPND_CREATE_MEMPTR(tls_register, tls_offs);
});
}
DR_API
void
dr_insert_read_raw_tls(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t tls_register, uint tls_offs, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_read_raw_tls: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
IF_X86_ELSE({
MINSERT(ilist, where, INSTR_CREATE_mov_ld
(dcontext, opnd_create_reg(reg),
dr_raw_tls_opnd(drcontext, tls_register, tls_offs)));
}, {
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
dr_raw_tls_opnd(drcontext, tls_register, tls_offs)));
});
}
DR_API
void
dr_insert_write_raw_tls(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t tls_register, uint tls_offs, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_write_raw_tls: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
IF_X86_ELSE({
MINSERT(ilist, where, INSTR_CREATE_mov_st
(dcontext,
dr_raw_tls_opnd(drcontext, tls_register, tls_offs),
opnd_create_reg(reg)));
}, {
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, dr_raw_tls_opnd(drcontext, tls_register, tls_offs),
opnd_create_reg(reg)));
});
}
DR_API
/* Current thread gives up its time quantum. */
void
dr_thread_yield(void)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
else
dcontext->client_data->at_safe_to_terminate_syscall = true;
os_thread_yield();
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
else
dcontext->client_data->at_safe_to_terminate_syscall = false;
}
DR_API
/* Current thread sleeps for time_ms milliseconds. */
void
dr_sleep(int time_ms)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
else
dcontext->client_data->at_safe_to_terminate_syscall = true;
os_thread_sleep(time_ms);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
else
dcontext->client_data->at_safe_to_terminate_syscall = false;
}
#ifdef CLIENT_SIDELINE
DR_API
bool
dr_client_thread_set_suspendable(bool suspendable)
{
/* see notes in synch_with_all_threads() */
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (!IS_CLIENT_THREAD(dcontext))
return false;
dcontext->client_data->suspendable = suspendable;
return true;
}
#endif
DR_API
bool
dr_suspend_all_other_threads_ex(OUT void ***drcontexts,
OUT uint *num_suspended,
OUT uint *num_unsuspended,
dr_suspend_flags_t flags)
{
uint out_suspended = 0, out_unsuspended = 0;
thread_record_t **threads;
int num_threads;
dcontext_t *my_dcontext = get_thread_private_dcontext();
int i;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(OWN_NO_LOCKS(my_dcontext),
"dr_suspend_all_other_threads cannot be called while holding a lock");
CLIENT_ASSERT(drcontexts != NULL && num_suspended != NULL,
"dr_suspend_all_other_threads invalid params");
LOG(GLOBAL, LOG_FRAGMENT, 2,
"\ndr_suspend_all_other_threads: thread "TIDFMT" suspending all threads\n",
get_thread_id());
/* suspend all DR-controlled threads at safe locations */
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER,
&threads, &num_threads, THREAD_SYNCH_NO_LOCKS_NO_XFER,
/* if we fail to suspend a thread (e.g., for
* privilege reasons), ignore and continue
*/
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE)) {
LOG(GLOBAL, LOG_FRAGMENT, 2,
"\ndr_suspend_all_other_threads: failed to suspend every thread\n");
/* some threads may have been successfully suspended so we must return
* their info so they'll be resumed. I believe there is thus no
* scenario under which we return false.
*/
}
/* now we own the thread_initexit_lock */
CLIENT_ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock),
"internal locking error");
/* To avoid two passes we allocate the array now. It may be larger than
* necessary if we had suspend failures but taht's ok.
* We hide the threads num and array in extra slots.
*/
*drcontexts = (void **)
global_heap_alloc((num_threads+2)*sizeof(dcontext_t*) HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < num_threads; i++) {
dcontext_t *dcontext = threads[i]->dcontext;
if (dcontext != NULL) { /* include my_dcontext here */
if (dcontext != my_dcontext) {
/* must translate BEFORE freeing any memory! */
if (!thread_synch_successful(threads[i])) {
out_unsuspended++;
} else if (is_thread_currently_native(threads[i]) &&
!TEST(DR_SUSPEND_NATIVE, flags)) {
out_unsuspended++;
} else if (thread_synch_state_no_xfer(dcontext)) {
/* FIXME: for all other synchall callers, the app
* context should be sitting in their mcontext, even
* though we can't safely get their native context and
* translate it.
*/
(*drcontexts)[out_suspended] = (void *) dcontext;
out_suspended++;
CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext,
"internal inconsistency in where mcontext is");
/* officially get_mcontext() doesn't always set pc: we do anyway */
get_mcontext(dcontext)->pc = dcontext->next_tag;
dcontext->client_data->mcontext_in_dcontext = true;
} else {
(*drcontexts)[out_suspended] = (void *) dcontext;
out_suspended++;
/* It's not safe to clobber the thread's mcontext with
* its own translation b/c for shared_syscall we store
* the continuation pc in the esi slot.
* We could translate here into heap-allocated memory,
* but some clients may just want to stop
* the world but not examine the threads, so we lazily
* translate in dr_get_mcontext().
*/
CLIENT_ASSERT(!dcontext->client_data->suspended,
"inconsistent usage of dr_suspend_all_other_threads");
CLIENT_ASSERT(dcontext->client_data->cur_mc == NULL,
"inconsistent usage of dr_suspend_all_other_threads");
dcontext->client_data->suspended = true;
}
}
}
}
/* Hide the two extra vars we need the client to pass back to us */
(*drcontexts)[out_suspended] = (void *) threads;
(*drcontexts)[out_suspended+1] = (void *)(ptr_uint_t) num_threads;
*num_suspended = out_suspended;
if (num_unsuspended != NULL)
*num_unsuspended = out_unsuspended;
return true;
}
DR_API
bool
dr_suspend_all_other_threads(OUT void ***drcontexts,
OUT uint *num_suspended,
OUT uint *num_unsuspended)
{
return dr_suspend_all_other_threads_ex(drcontexts, num_suspended,
num_unsuspended, 0);
}
bool
dr_resume_all_other_threads(IN void **drcontexts,
IN uint num_suspended)
{
thread_record_t **threads;
int num_threads;
uint i;
CLIENT_ASSERT(drcontexts != NULL,
"dr_suspend_all_other_threads invalid params");
LOG(GLOBAL, LOG_FRAGMENT, 2,
"dr_resume_all_other_threads\n");
threads = (thread_record_t **) drcontexts[num_suspended];
num_threads = (int)(ptr_int_t) drcontexts[num_suspended+1];
for (i = 0; i < num_suspended; i++) {
dcontext_t *dcontext = (dcontext_t *) drcontexts[i];
if (dcontext->client_data->cur_mc != NULL) {
/* clear any cached mc from dr_get_mcontext_priv() */
heap_free(dcontext, dcontext->client_data->cur_mc,
sizeof(*dcontext->client_data->cur_mc) HEAPACCT(ACCT_CLIENT));
dcontext->client_data->cur_mc = NULL;
}
dcontext->client_data->suspended = false;
}
global_heap_free(drcontexts, (num_threads+2)*sizeof(dcontext_t*)
HEAPACCT(ACCT_THREAD_MGT));
end_synch_with_all_threads(threads, num_threads, true/*resume*/);
return true;
}
DR_API
bool
dr_is_thread_native(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid param");
return is_thread_currently_native(dcontext->thread_record);
}
DR_API
bool
dr_retakeover_suspended_native_thread(void *drcontext)
{
bool res;
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid param");
/* XXX: I don't quite see why I need to pop these 2 when I'm doing
* what a regular retakeover would do
*/
KSTOP_NOT_MATCHING_DC(dcontext, fcache_default);
KSTOP_NOT_MATCHING_DC(dcontext, dispatch_num_exits);
res = os_thread_take_over_suspended_native(dcontext);
return res;
}
# ifdef UNIX
DR_API
bool
dr_set_itimer(int which, uint millisec,
void (*func)(void *drcontext, dr_mcontext_t *mcontext))
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (func == NULL && millisec != 0)
return false;
return set_itimer_callback(dcontext, which, millisec, NULL,
(void (*)(dcontext_t *, dr_mcontext_t *))func);
}
DR_API
uint
dr_get_itimer(int which)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
return get_itimer_frequency(dcontext, which);
}
# endif /* UNIX */
DR_API
void
dr_track_where_am_i(void)
{
track_where_am_i = true;
}
bool
should_track_where_am_i(void)
{
return track_where_am_i || DYNAMO_OPTION(profile_pcs);
}
DR_API
bool
dr_is_tracking_where_am_i(void)
{
return should_track_where_am_i();
}
DR_API
dr_where_am_i_t
dr_where_am_i(void *drcontext, app_pc pc, OUT void **tag_out)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid param");
void *tag = NULL;
dr_where_am_i_t whereami = dcontext->whereami;
/* Further refine if pc is in the cache. */
if (whereami == DR_WHERE_FCACHE) {
fragment_t *fragment;
whereami = fcache_refine_whereami(dcontext, whereami, pc, &fragment);
if (fragment != NULL)
tag = fragment->tag;
}
if (tag_out != NULL)
*tag_out = tag;
return whereami;
}
#endif /* CLIENT_INTERFACE */
DR_API
/* Inserts inst as a non-application instruction into ilist prior to "where" */
void
instrlist_meta_preinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta(inst);
instrlist_preinsert(ilist, where, inst);
}
DR_API
/* Inserts inst as a non-application instruction into ilist after "where" */
void
instrlist_meta_postinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta(inst);
instrlist_postinsert(ilist, where, inst);
}
DR_API
/* Inserts inst as a non-application instruction onto the end of ilist */
void
instrlist_meta_append(instrlist_t *ilist, instr_t *inst)
{
instr_set_meta(inst);
instrlist_append(ilist, inst);
}
DR_API
void
instrlist_meta_fault_preinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_preinsert(ilist, where, inst);
}
DR_API
void
instrlist_meta_fault_postinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_postinsert(ilist, where, inst);
}
DR_API
void
instrlist_meta_fault_append(instrlist_t *ilist, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_append(ilist, inst);
}
static void
convert_va_list_to_opnd(dcontext_t *dcontext, opnd_t **args, uint num_args, va_list ap)
{
uint i;
ASSERT(num_args > 0);
/* allocate at least one argument opnd */
/* we don't check for GLOBAL_DCONTEXT since DR internally calls this */
*args = HEAP_ARRAY_ALLOC(dcontext, opnd_t, num_args,
ACCT_CLEANCALL, UNPROTECTED);
for (i = 0; i < num_args; i++) {
(*args)[i] = va_arg(ap, opnd_t);
CLIENT_ASSERT(opnd_is_valid((*args)[i]),
"Call argument: bad operand. Did you create a valid opnd_t?");
}
}
static void
free_va_opnd_list(dcontext_t *dcontext, uint num_args, opnd_t *args)
{
if (num_args != 0) {
HEAP_ARRAY_FREE(dcontext, args, opnd_t, num_args,
ACCT_CLEANCALL, UNPROTECTED);
}
}
/* dr_insert_* are used by general DR */
/* Inserts a complete call to callee with the passed-in arguments */
void
dr_insert_call(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
instr_t *label = INSTR_CREATE_label(drcontext);
dr_pred_type_t auto_pred = instrlist_get_auto_predicate(ilist);
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call: drcontext cannot be NULL");
instrlist_set_auto_predicate(ilist, DR_PRED_NONE);
#ifdef ARM
if (instr_predicate_is_cond(auto_pred)) {
/* auto_predicate is set, though we handle the clean call with a cbr
* because we require inserting instrumentation which modifies cpsr.
*/
MINSERT(ilist, where, XINST_CREATE_jump_cond
(drcontext,
instr_invert_predicate(auto_pred),
opnd_create_instr(label)));
}
#endif
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(dcontext, &args, num_args, ap);
va_end(ap);
}
insert_meta_call_vargs(dcontext, ilist, where, META_CALL_RETURNS,
vmcode_get_start(), callee, num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
MINSERT(ilist, where, label);
instrlist_set_auto_predicate(ilist, auto_pred);
}
bool
dr_insert_call_ex(void *drcontext, instrlist_t *ilist, instr_t *where,
byte *encode_pc, void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
bool direct;
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call: drcontext cannot be NULL");
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
direct = insert_meta_call_vargs(dcontext, ilist, where, META_CALL_RETURNS, encode_pc,
callee, num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
return direct;
}
/* Not exported. Currently used for ARM to avoid storing to %lr. */
void
dr_insert_call_noreturn(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call_noreturn: drcontext cannot be NULL");
CLIENT_ASSERT(instrlist_get_auto_predicate(ilist) == DR_PRED_NONE,
"Does not support auto-predication");
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(dcontext, &args, num_args, ap);
va_end(ap);
}
insert_meta_call_vargs(dcontext, ilist, where, 0, vmcode_get_start(), callee,
num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
}
/* Internal utility routine for inserting context save for a clean call.
* Returns the size of the data stored on the DR stack
* (in case the caller needs to align the stack pointer).
* XSP and XAX are modified by this call.
*/
static uint
prepare_for_call_ex(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *where, byte *encode_pc)
{
instr_t *in;
uint dstack_offs;
in = (where == NULL) ? instrlist_last(ilist) : instr_get_prev(where);
dstack_offs = prepare_for_clean_call(dcontext, cci, ilist, where, encode_pc);
/* now go through and mark inserted instrs as meta */
if (in == NULL)
in = instrlist_first(ilist);
else
in = instr_get_next(in);
while (in != where) {
instr_set_meta(in);
in = instr_get_next(in);
}
return dstack_offs;
}
/* Internal utility routine for inserting context restore for a clean call. */
static void
cleanup_after_call_ex(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *where, uint sizeof_param_area,
byte *encode_pc)
{
instr_t *in;
in = (where == NULL) ? instrlist_last(ilist) : instr_get_prev(where);
if (sizeof_param_area > 0) {
/* clean up the parameter area */
CLIENT_ASSERT(sizeof_param_area <= 127,
"cleanup_after_call_ex: sizeof_param_area must be <= 127");
/* mark it meta down below */
instrlist_preinsert(ilist, where,
XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT8(sizeof_param_area)));
}
cleanup_after_clean_call(dcontext, cci, ilist, where, encode_pc);
/* now go through and mark inserted instrs as meta */
if (in == NULL)
in = instrlist_first(ilist);
else
in = instr_get_next(in);
while (in != where) {
instr_set_meta(in);
in = instr_get_next(in);
}
}
/* Inserts a complete call to callee with the passed-in arguments, wrapped
* by an app save and restore.
*
* If "save_flags" includes DR_CLEANCALL_SAVE_FLOAT, saves the fp/mmx/sse state.
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_prepare_for_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*
* NOTE : dr_insert_cbr_instrumentation has assumption about the clean call
* instrumentation layout, changes to the clean call instrumentation may break
* dr_insert_cbr_instrumentation.
*/
void
dr_insert_clean_call_ex_varg(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, dr_cleancall_save_t save_flags,
uint num_args, opnd_t *args)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
uint dstack_offs, pad = 0;
size_t buf_sz = 0;
clean_call_info_t cci; /* information for clean call insertion. */
bool save_fpstate = TEST(DR_CLEANCALL_SAVE_FLOAT, save_flags);
meta_call_flags_t call_flags = META_CALL_CLEAN | META_CALL_RETURNS;
byte *encode_pc;
instr_t *label = INSTR_CREATE_label(drcontext);
dr_pred_type_t auto_pred = instrlist_get_auto_predicate(ilist);
CLIENT_ASSERT(drcontext != NULL, "dr_insert_clean_call: drcontext cannot be NULL");
STATS_INC(cleancall_inserted);
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: insert clean call to "PFX"\n", callee);
instrlist_set_auto_predicate(ilist, DR_PRED_NONE);
#ifdef ARM
if (instr_predicate_is_cond(auto_pred)) {
/* auto_predicate is set, though we handle the clean call with a cbr
* because we require inserting instrumentation which modifies cpsr.
*/
MINSERT(ilist, where, XINST_CREATE_jump_cond
(drcontext,
instr_invert_predicate(auto_pred),
opnd_create_instr(label)));
}
#endif
/* analyze the clean call, return true if clean call can be inlined. */
if (analyze_clean_call(dcontext, &cci, where, callee, save_fpstate,
TEST(DR_CLEANCALL_ALWAYS_OUT_OF_LINE, save_flags),
num_args, args) &&
!TEST(DR_CLEANCALL_ALWAYS_OUT_OF_LINE, save_flags)) {
#ifdef CLIENT_INTERFACE
/* we can perform the inline optimization and return. */
STATS_INC(cleancall_inlined);
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: inlined callee "PFX"\n", callee);
insert_inline_clean_call(dcontext, &cci, ilist, where, args);
MINSERT(ilist, where, label);
instrlist_set_auto_predicate(ilist, auto_pred);
return;
#else /* CLIENT_INTERFACE */
ASSERT_NOT_REACHED();
#endif /* CLIENT_INTERFACE */
}
/* honor requests from caller */
if (TEST(DR_CLEANCALL_NOSAVE_FLAGS, save_flags)) {
/* even if we remove flag saves we want to keep mcontext shape */
cci.preserve_mcontext = true;
cci.skip_save_flags = true;
/* we assume this implies DF should be 0 already */
cci.skip_clear_flags = true;
/* XXX: should also provide DR_CLEANCALL_NOSAVE_NONAFLAGS to
* preserve just arith flags on return from a call
*/
}
if (TESTANY(DR_CLEANCALL_NOSAVE_XMM |
DR_CLEANCALL_NOSAVE_XMM_NONPARAM |
DR_CLEANCALL_NOSAVE_XMM_NONRET, save_flags)) {
uint i;
/* even if we remove xmm saves we want to keep mcontext shape */
cci.preserve_mcontext = true;
/* start w/ all */
#if defined(X64) && defined(WINDOWS)
cci.num_simd_skip = 6;
#else
/* all 8 (or 16) are scratch */
cci.num_simd_skip = NUM_SIMD_REGS;
#endif
for (i=0; i<cci.num_simd_skip; i++)
cci.simd_skip[i] = true;
/* now remove those used for param/retval */
#ifdef X64
if (TEST(DR_CLEANCALL_NOSAVE_XMM_NONPARAM, save_flags)) {
/* xmm0-3 (-7 for linux) are used for params */
for (i=0; i<IF_UNIX_ELSE(7,3); i++)
cci.simd_skip[i] = false;
cci.num_simd_skip -= i;
}
if (TEST(DR_CLEANCALL_NOSAVE_XMM_NONRET, save_flags)) {
/* xmm0 (and xmm1 for linux) are used for retvals */
cci.simd_skip[0] = false;
cci.num_simd_skip--;
# ifdef UNIX
cci.simd_skip[1] = false;
cci.num_simd_skip--;
# endif
}
#endif
}
if (TEST(DR_CLEANCALL_INDIRECT, save_flags))
encode_pc = vmcode_unreachable_pc();
else
encode_pc = vmcode_get_start();
dstack_offs = prepare_for_call_ex(dcontext, &cci, ilist, where, encode_pc);
#ifdef X64
/* PR 218790: we assume that dr_prepare_for_call() leaves stack 16-byte
* aligned, which is what insert_meta_call_vargs requires. */
if (cci.should_align) {
CLIENT_ASSERT(ALIGNED(dstack_offs, 16),
"internal error: bad stack alignment");
}
#endif
if (save_fpstate) {
/* save on the stack: xref PR 202669 on clients using more stack */
buf_sz = proc_fpstate_save_size();
/* we need 16-byte-alignment */
pad = ALIGN_FORWARD_UINT(dstack_offs, 16) - dstack_offs;
IF_X64(CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_int(buf_sz + pad),
"dr_insert_clean_call: internal truncation error"));
MINSERT(ilist, where, XINST_CREATE_sub(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT32((int)(buf_sz + pad))));
dr_insert_save_fpstate(drcontext, ilist, where,
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0,
OPSZ_512));
}
/* PR 302951: restore state if clean call args reference app memory.
* We use a hack here: this is the only instance where we mark as our-mangling
* but do not have a translation target set, which indicates to the restore
* routines that this is a clean call. If the client adds instrs in the middle
* translation will fail; if the client modifies any instr, the our-mangling
* flag will disappear and translation will fail.
*/
instrlist_set_our_mangling(ilist, true);
if (TEST(DR_CLEANCALL_RETURNS_TO_NATIVE, save_flags))
call_flags |= META_CALL_RETURNS_TO_NATIVE;
insert_meta_call_vargs(dcontext, ilist, where, call_flags,
encode_pc, callee, num_args, args);
instrlist_set_our_mangling(ilist, false);
if (save_fpstate) {
dr_insert_restore_fpstate(drcontext, ilist, where,
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0,
OPSZ_512));
MINSERT(ilist, where, XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT32(buf_sz + pad)));
}
cleanup_after_call_ex(dcontext, &cci, ilist, where, 0, encode_pc);
MINSERT(ilist, where, label);
instrlist_set_auto_predicate(ilist, auto_pred);
}
void
dr_insert_clean_call_ex(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, dr_cleancall_save_t save_flags, uint num_args, ...)
{
opnd_t *args = NULL;
if (num_args != 0) {
va_list ap;
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
dr_insert_clean_call_ex_varg(drcontext, ilist, where, callee, save_flags,
num_args, args);
if (num_args != 0)
free_va_opnd_list(drcontext, num_args, args);
}
DR_API
void
dr_insert_clean_call(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, bool save_fpstate, uint num_args, ...)
{
dr_cleancall_save_t flags = (save_fpstate ? DR_CLEANCALL_SAVE_FLOAT : 0);
opnd_t *args = NULL;
if (num_args != 0) {
va_list ap;
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
dr_insert_clean_call_ex_varg(drcontext, ilist, where, callee, flags, num_args, args);
if (num_args != 0)
free_va_opnd_list(drcontext, num_args, args);
}
/* Utility routine for inserting a clean call to an instrumentation routine
* Returns the size of the data stored on the DR stack (in case the caller
* needs to align the stack pointer). XSP and XAX are modified by this call.
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* prepare_for_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*/
DR_API uint
dr_prepare_for_call(void *drcontext, instrlist_t *ilist, instr_t *where)
{
CLIENT_ASSERT(drcontext != NULL, "dr_prepare_for_call: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_prepare_for_call: drcontext is invalid");
return prepare_for_call_ex((dcontext_t *)drcontext, NULL, ilist, where,
vmcode_get_start());
}
DR_API void
dr_cleanup_after_call(void *drcontext, instrlist_t *ilist, instr_t *where,
uint sizeof_param_area)
{
CLIENT_ASSERT(drcontext != NULL, "dr_cleanup_after_call: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_cleanup_after_call: drcontext is invalid");
cleanup_after_call_ex((dcontext_t *)drcontext, NULL, ilist, where,
sizeof_param_area, vmcode_get_start());
}
#ifdef CLIENT_INTERFACE
DR_API void
dr_swap_to_clean_stack(void *drcontext, instrlist_t *ilist, instr_t *where)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_swap_to_clean_stack: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_swap_to_clean_stack: drcontext is invalid");
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
if (SCRATCH_ALWAYS_TLS()) {
MINSERT(ilist, where, instr_create_save_to_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
insert_get_mcontext_base(dcontext, ilist, where, SCRATCH_REG0);
/* save app xsp, and then bring in dstack to xsp */
MINSERT(ilist, where, instr_create_save_to_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, XSP_OFFSET));
/* DSTACK_OFFSET isn't within the upcontext so if it's separate this won't
* work right. FIXME - the dcontext accessing routines are a mess of shared
* vs. no shared support, separate context vs. no separate context support etc. */
ASSERT_NOT_IMPLEMENTED(!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, DSTACK_OFFSET));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
}
else {
MINSERT(ilist, where, instr_create_save_to_dcontext
(dcontext, REG_XSP, XSP_OFFSET));
MINSERT(ilist, where, instr_create_restore_dynamo_stack(dcontext));
}
}
DR_API void
dr_restore_app_stack(void *drcontext, instrlist_t *ilist, instr_t *where)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_restore_app_stack: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_app_stack: drcontext is invalid");
/* restore stack */
if (SCRATCH_ALWAYS_TLS()) {
/* use the register we're about to clobber as scratch space */
insert_get_mcontext_base(dcontext, ilist, where, REG_XSP);
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, REG_XSP, REG_XSP, XSP_OFFSET));
} else {
MINSERT(ilist, where, instr_create_restore_from_dcontext
(dcontext, REG_XSP, XSP_OFFSET));
}
}
#define SPILL_SLOT_TLS_MAX 2
#define NUM_TLS_SPILL_SLOTS (SPILL_SLOT_TLS_MAX + 1)
#define NUM_SPILL_SLOTS (SPILL_SLOT_MAX + 1)
/* The three tls slots we make available to clients. We reserve TLS_REG0_SLOT for our
* own use in dr convenience routines. Note the +1 is because the max is an array index
* (so zero based) while array size is number of slots. We don't need to +1 in
* SPILL_SLOT_MC_REG because subtracting SPILL_SLOT_TLS_MAX already accounts for it. */
static const ushort SPILL_SLOT_TLS_OFFS[NUM_TLS_SPILL_SLOTS] =
{ TLS_REG3_SLOT, TLS_REG2_SLOT, TLS_REG1_SLOT };
static const reg_id_t SPILL_SLOT_MC_REG[NUM_SPILL_SLOTS - NUM_TLS_SPILL_SLOTS] = {
#ifdef X86
/* The dcontext reg slots we make available to clients. We reserve XAX and XSP for
* our own use in dr convenience routines. */
# ifdef X64
REG_R15, REG_R14, REG_R13, REG_R12, REG_R11, REG_R10, REG_R9, REG_R8,
# endif
REG_XDI, REG_XSI, REG_XBP, REG_XDX, REG_XCX, REG_XBX
#elif defined(AARCHXX)
/* DR_REG_R0 is not used here. See prepare_for_clean_call. */
DR_REG_R6, DR_REG_R5, DR_REG_R4, DR_REG_R3, DR_REG_R2, DR_REG_R1
#endif /* X86/ARM */
};
DR_API void
dr_save_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg,
dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_save_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_save_reg: invalid spill slot selection");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_save_reg requires pointer-sized gpr");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
MINSERT(ilist, where,
XINST_CREATE_store(dcontext, opnd_create_tls_slot(offs),
opnd_create_reg(reg)));
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
if (SCRATCH_ALWAYS_TLS()) {
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
reg_id_t tmp = (reg == SCRATCH_REG0) ? SCRATCH_REG1 : SCRATCH_REG0;
MINSERT(ilist, where, instr_create_save_to_tls
(dcontext, tmp, TLS_REG0_SLOT));
insert_get_mcontext_base(dcontext, ilist, where, tmp);
MINSERT(ilist, where, instr_create_save_to_dc_via_reg
(dcontext, tmp, reg, offs));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, tmp, TLS_REG0_SLOT));
} else {
MINSERT(ilist, where, instr_create_save_to_dcontext(dcontext, reg, offs));
}
}
}
/* if want to save 8 or 16-bit reg, must pass in containing ptr-sized reg! */
DR_API void
dr_restore_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg,
dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_restore_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_restore_reg: invalid spill slot selection");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_restore_reg requires a pointer-sized gpr");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
MINSERT(ilist, where,
XINST_CREATE_load(dcontext, opnd_create_reg(reg),
opnd_create_tls_slot(offs)));
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
if (SCRATCH_ALWAYS_TLS()) {
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
/* use the register we're about to clobber as scratch space */
insert_get_mcontext_base(dcontext, ilist, where, reg);
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, reg, reg, offs));
} else {
MINSERT(ilist, where,
instr_create_restore_from_dcontext(dcontext, reg, offs));
}
}
}
DR_API dr_spill_slot_t
dr_max_opnd_accessible_spill_slot()
{
if (SCRATCH_ALWAYS_TLS())
return SPILL_SLOT_TLS_MAX;
else
return SPILL_SLOT_MAX;
}
/* creates an opnd to access spill slot slot, slot must be <=
* dr_max_opnd_accessible_spill_slot() */
opnd_t
reg_spill_slot_opnd(dcontext_t *dcontext, dr_spill_slot_t slot)
{
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
return opnd_create_tls_slot(offs);
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
ASSERT(!SCRATCH_ALWAYS_TLS()); /* client assert above should catch */
return opnd_create_dcontext_field(dcontext, offs);
}
}
DR_API
opnd_t
dr_reg_spill_slot_opnd(void *drcontext, dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_reg_spill_slot_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_reg_spill_slot_opnd: drcontext is invalid");
CLIENT_ASSERT(slot <= dr_max_opnd_accessible_spill_slot(),
"dr_reg_spill_slot_opnd: slot must be less than "
"dr_max_opnd_accessible_spill_slot()");
return reg_spill_slot_opnd(dcontext, slot);
}
DR_API
/* used to read a saved register spill slot from a clean call or a restore_state_event */
reg_t
dr_read_saved_reg(void *drcontext, dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(drcontext != NULL, "dr_read_saved_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_read_saved_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_read_saved_reg: invalid spill slot selection");
/* We do allow drcontext to not belong to the current thread, for state restoration
* during synchall and other scenarios.
*/
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = SPILL_SLOT_TLS_OFFS[slot];
return *(reg_t *)(((byte *)&dcontext->local_state->spill_space) + offs);
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
return reg_get_value_priv(reg_slot, get_mcontext(dcontext));
}
}
DR_API
/* used to write a saved register spill slot from a clean call */
void
dr_write_saved_reg(void *drcontext, dr_spill_slot_t slot, reg_t value)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(drcontext != NULL, "dr_write_saved_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_write_saved_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_write_saved_reg: invalid spill slot selection");
/* We do allow drcontext to not belong to the current thread, for state restoration
* during synchall and other scenarios.
*/
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = SPILL_SLOT_TLS_OFFS[slot];
*(reg_t *)(((byte *)&dcontext->local_state->spill_space) + offs) = value;
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
reg_set_value_priv(reg_slot, get_mcontext(dcontext), value);
}
}
DR_API
/**
* Inserts into ilist prior to "where" instruction(s) to read into the
* general-purpose full-size register reg from the user-controlled drcontext
* field for this thread.
*/
void
dr_insert_read_tls_field(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_read_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
if (SCRATCH_ALWAYS_TLS()) {
/* For thread-shared, since reg must be general-purpose we can
* use it as a base pointer (repeatedly). Plus it's already dead.
*/
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, reg, TLS_DCONTEXT_SLOT));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, reg, reg, CLIENT_DATA_OFFSET));
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
OPND_CREATE_MEMPTR(reg, offsetof(client_data_t, user_field))));
} else {
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
OPND_CREATE_ABSMEM(&dcontext->client_data->user_field, OPSZ_PTR)));
}
}
DR_API
/**
* Inserts into ilist prior to "where" instruction(s) to write the
* general-purpose full-size register reg to the user-controlled drcontext field
* for this thread.
*/
void
dr_insert_write_tls_field(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_write_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
if (SCRATCH_ALWAYS_TLS()) {
reg_id_t spill = SCRATCH_REG0;
if (reg == spill) /* don't need sub-reg test b/c we know it's pointer-sized */
spill = SCRATCH_REG1;
MINSERT(ilist, where, instr_create_save_to_tls(dcontext, spill, TLS_REG0_SLOT));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, spill, TLS_DCONTEXT_SLOT));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, spill, spill, CLIENT_DATA_OFFSET));
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, OPND_CREATE_MEMPTR(spill,
offsetof(client_data_t, user_field)),
opnd_create_reg(reg)));
MINSERT(ilist, where,
instr_create_restore_from_tls(dcontext, spill, TLS_REG0_SLOT));
} else {
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, OPND_CREATE_ABSMEM
(&dcontext->client_data->user_field, OPSZ_PTR),
opnd_create_reg(reg)));
}
}
DR_API void
dr_save_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where,
dr_spill_slot_t slot)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
CLIENT_ASSERT(drcontext != NULL,
"dr_save_arith_flags: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_arith_flags: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_save_arith_flags: invalid spill slot selection");
dr_save_reg(drcontext, ilist, where, reg, slot);
dr_save_arith_flags_to_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_restore_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where,
dr_spill_slot_t slot)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
CLIENT_ASSERT(drcontext != NULL,
"dr_restore_arith_flags: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_arith_flags: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_restore_arith_flags: invalid spill slot selection");
dr_restore_arith_flags_from_reg(drcontext, ilist, where, reg);
dr_restore_reg(drcontext, ilist, where, reg, slot);
}
DR_API void
dr_save_arith_flags_to_xax(void *drcontext, instrlist_t *ilist, instr_t *where)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
dr_save_arith_flags_to_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_restore_arith_flags_from_xax(void *drcontext, instrlist_t *ilist,
instr_t *where)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
dr_restore_arith_flags_from_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_save_arith_flags_to_reg(void *drcontext, instrlist_t *ilist,
instr_t *where, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_save_arith_flags_to_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_arith_flags_to_reg: drcontext is invalid");
#ifdef X86
CLIENT_ASSERT(reg == DR_REG_XAX,
"only xax should be used for save arith flags in X86");
/* flag saving code:
* lahf
* seto al
*/
MINSERT(ilist, where, INSTR_CREATE_lahf(dcontext));
MINSERT(ilist, where,
INSTR_CREATE_setcc(dcontext, OP_seto, opnd_create_reg(REG_AL)));
#elif defined(ARM)
/* flag saving code: mrs reg, cpsr */
MINSERT(ilist, where,
INSTR_CREATE_mrs(dcontext,
opnd_create_reg(reg),
opnd_create_reg(DR_REG_CPSR)));
#elif defined(AARCH64)
/* flag saving code: mrs reg, nzcv */
MINSERT(ilist, where,
INSTR_CREATE_mrs(dcontext,
opnd_create_reg(reg),
opnd_create_reg(DR_REG_NZCV)));
#endif /* X86/ARM/AARCH64 */
}
DR_API void
dr_restore_arith_flags_from_reg(void *drcontext, instrlist_t *ilist,
instr_t *where, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_restore_arith_flags_from_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_arith_flags_from_reg: drcontext is invalid");
#ifdef X86
CLIENT_ASSERT(reg == DR_REG_XAX,
"only xax should be used for save arith flags in X86");
/* flag restoring code:
* add 0x7f,%al
* sahf
*/
/* do an add such that OF will be set only if seto set
* the MSB of saveto to 1
*/
MINSERT(ilist, where,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_AL), OPND_CREATE_INT8(0x7f)));
MINSERT(ilist, where, INSTR_CREATE_sahf(dcontext));
#elif defined(ARM)
/* flag restoring code: mrs reg, apsr_nzcvqg */
MINSERT(ilist, where,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_CPSR),
OPND_CREATE_INT_MSR_NZCVQG(),
opnd_create_reg(reg)));
#elif defined(AARCH64)
/* flag restoring code: mrs reg, nzcv */
MINSERT(ilist, where,
INSTR_CREATE_msr(dcontext,
opnd_create_reg(DR_REG_NZCV),
opnd_create_reg(reg)));
#endif /* X86/ARM/AARCH64 */
}
/* providing functionality of old -instr_calls and -instr_branches flags
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*/
DR_API void
dr_insert_call_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
ptr_uint_t target, address;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_call_instrumentation: drcontext cannot be NULL");
address = (ptr_uint_t) instr_get_translation(instr);
/* dr_insert_ubr_instrumentation() uses this function */
CLIENT_ASSERT(instr_is_call(instr) || instr_is_ubr(instr),
"dr_insert_{ubr,call}_instrumentation must be applied to a ubr");
CLIENT_ASSERT(address != 0,
"dr_insert_{ubr,call}_instrumentation: can't determine app address");
if (opnd_is_pc(instr_get_target(instr))) {
if (opnd_is_far_pc(instr_get_target(instr))) {
/* FIXME: handle far pc */
CLIENT_ASSERT(false,
"dr_insert_{ubr,call}_instrumentation: far pc not supported");
}
/* In release build for far pc keep going assuming 0 base */
target = (ptr_uint_t) opnd_get_pc(instr_get_target(instr));
}
else if (opnd_is_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
target = (ptr_uint_t) instr_get_translation(tgt);
CLIENT_ASSERT(target != 0,
"dr_insert_{ubr,call}_instrumentation: unknown target");
if (opnd_is_far_instr(instr_get_target(instr))) {
/* FIXME: handle far instr */
CLIENT_ASSERT(false, "dr_insert_{ubr,call}_instrumentation: far instr "
"not supported");
}
} else {
CLIENT_ASSERT(false, "dr_insert_{ubr,call}_instrumentation: unknown target");
target = 0;
}
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 2,
/* address of call is 1st parameter */
OPND_CREATE_INTPTR(address),
/* call target is 2nd parameter */
OPND_CREATE_INTPTR(target));
}
/* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched. Since we need another
* tls spill slot in this routine we require the caller to give us one. */
DR_API void
dr_insert_mbr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, dr_spill_slot_t scratch_slot)
{
#ifdef X86
dcontext_t *dcontext = (dcontext_t *) drcontext;
ptr_uint_t address = (ptr_uint_t) instr_get_translation(instr);
opnd_t tls_opnd;
instr_t *newinst;
reg_id_t reg_target;
/* PR 214051: dr_insert_mbr_instrumentation() broken with -indcall2direct */
CLIENT_ASSERT(!DYNAMO_OPTION(indcall2direct),
"dr_insert_mbr_instrumentation not supported with -opt_speed");
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_mbr_instrumentation: drcontext cannot be NULL");
CLIENT_ASSERT(address != 0,
"dr_insert_mbr_instrumentation: can't determine app address");
CLIENT_ASSERT(instr_is_mbr(instr),
"dr_insert_mbr_instrumentation must be applied to an mbr");
/* We need a TLS spill slot to use. We can use any tls slot that is opnd
* accessible. */
CLIENT_ASSERT(scratch_slot <= dr_max_opnd_accessible_spill_slot(),
"dr_insert_mbr_instrumentation: scratch_slot must be less than "
"dr_max_opnd_accessible_spill_slot()");
/* It is possible for mbr instruction to use XCX register, so we have
* to use an unsed register.
*/
for (reg_target = REG_XAX; reg_target <= REG_XBX; reg_target++) {
if (!instr_uses_reg(instr, reg_target))
break;
}
/* PR 240265: we disallow clients to add post-mbr instrumentation, so we
* avoid doing that here even though it's a little less efficient since
* our mbr mangling will re-grab the target.
* We could keep it post-mbr and mark it w/ a special flag so we allow
* our own but not clients' instrumentation post-mbr: but then we
* hit post-syscall issues for wow64 where post-mbr equals post-syscall
* (PR 240258: though we might solve that some other way).
*/
/* Note that since we're using a client exposed slot we know it will be
* preserved across the clean call. */
tls_opnd = dr_reg_spill_slot_opnd(drcontext, scratch_slot);
newinst = XINST_CREATE_store(dcontext, tls_opnd, opnd_create_reg(reg_target));
/* PR 214962: ensure we'll properly translate the de-ref of app
* memory by marking the spill and de-ref as INSTR_OUR_MANGLING.
*/
instr_set_our_mangling(newinst, true);
MINSERT(ilist, instr, newinst);
if (instr_is_return(instr)) {
/* the retaddr operand is always the final source for all OP_ret* instrs */
opnd_t retaddr = instr_get_src(instr, instr_num_srcs(instr) - 1);
opnd_size_t sz = opnd_get_size(retaddr);
/* Even for far ret and iret, retaddr is at TOS
* but operand size needs to be set to stack size
* since iret pops more than return address.
*/
opnd_set_size(&retaddr, OPSZ_STACK);
newinst = instr_create_1dst_1src(dcontext, sz == OPSZ_2 ? OP_movzx : OP_mov_ld,
opnd_create_reg(reg_target), retaddr);
} else {
/* call* or jmp* */
opnd_t src = instr_get_src(instr, 0);
opnd_size_t sz = opnd_get_size(src);
/* if a far cti, we can't fit it into a register: asserted above.
* in release build we'll get just the address here.
*/
if (instr_is_far_cti(instr)) {
if (sz == OPSZ_10) {
sz = OPSZ_8;
} else if (sz == OPSZ_6) {
sz = OPSZ_4;
# ifdef X64
reg_target = reg_64_to_32(reg_target);
# endif
} else /* target has OPSZ_4 */ {
sz = OPSZ_2;
}
opnd_set_size(&src, sz);
}
# ifdef UNIX
/* xref i#1834 the problem with fs and gs segment is a general problem
* on linux, this fix is specific for mbr_instrumentation, but a general
* solution is needed.
*/
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(src)) {
src = mangle_seg_ref_opnd(dcontext, ilist, instr, src, reg_target);
}
# endif
newinst = instr_create_1dst_1src(dcontext,
sz == OPSZ_2 ? OP_movzx : OP_mov_ld,
opnd_create_reg(reg_target), src);
}
instr_set_our_mangling(newinst, true);
MINSERT(ilist, instr, newinst);
/* Now we want the true app state saved, for dr_get_mcontext().
* We specially recognize our OP_xchg as a restore in
* instr_is_reg_spill_or_restore().
*/
MINSERT(ilist, instr,
INSTR_CREATE_xchg(dcontext, tls_opnd, opnd_create_reg(reg_target)));
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 2,
/* address of mbr is 1st param */
OPND_CREATE_INTPTR(address),
/* indirect target (in tls, xchg-d from ecx) is 2nd param */
tls_opnd);
#elif defined (ARM)
/* i#1551: NYI on ARM.
* Also, we may want to split these out into arch/{x86,arm}/ files
*/
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
/* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*
* NOTE : this routine has assumption about the layout of the clean call,
* so any change to clean call instrumentation layout may break this routine.
*/
static void
dr_insert_cbr_instrumentation_help(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, bool has_fallthrough, opnd_t user_data)
{
#ifdef X86
dcontext_t *dcontext = (dcontext_t *) drcontext;
ptr_uint_t address, target;
int opc;
instr_t *app_flags_ok;
bool out_of_line_switch = false;;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_cbr_instrumentation: drcontext cannot be NULL");
address = (ptr_uint_t) instr_get_translation(instr);
CLIENT_ASSERT(address != 0,
"dr_insert_cbr_instrumentation: can't determine app address");
CLIENT_ASSERT(instr_is_cbr(instr),
"dr_insert_cbr_instrumentation must be applied to a cbr");
CLIENT_ASSERT(opnd_is_near_pc(instr_get_target(instr)) ||
opnd_is_near_instr(instr_get_target(instr)),
"dr_insert_cbr_instrumentation: target opnd must be a near pc or "
"near instr");
if (opnd_is_near_pc(instr_get_target(instr)))
target = (ptr_uint_t) opnd_get_pc(instr_get_target(instr));
else if (opnd_is_near_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
target = (ptr_uint_t) instr_get_translation(tgt);
CLIENT_ASSERT(target != 0, "dr_insert_cbr_instrumentation: unknown target");
} else {
CLIENT_ASSERT(false, "dr_insert_cbr_instrumentation: unknown target");
target = 0;
}
app_flags_ok = instr_get_prev(instr);
if (has_fallthrough) {
ptr_uint_t fallthrough = address + instr_length(drcontext, instr);
CLIENT_ASSERT(!opnd_uses_reg(user_data, DR_REG_XBX),
"register ebx should not be used");
CLIENT_ASSERT(fallthrough > address, "wrong fallthrough address");
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 5,
/* push address of mbr onto stack as 1st parameter */
OPND_CREATE_INTPTR(address),
/* target is 2nd parameter */
OPND_CREATE_INTPTR(target),
/* fall-throug is 3rd parameter */
OPND_CREATE_INTPTR(fallthrough),
/* branch direction (put in ebx below) is 4th parameter */
opnd_create_reg(REG_XBX),
/* user defined data is 5th parameter */
opnd_is_null(user_data) ? OPND_CREATE_INT32(0) : user_data);
} else {
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 3,
/* push address of mbr onto stack as 1st parameter */
OPND_CREATE_INTPTR(address),
/* target is 2nd parameter */
OPND_CREATE_INTPTR(target),
/* branch direction (put in ebx below) is 3rd parameter */
opnd_create_reg(REG_XBX));
}
/* calculate whether branch taken or not
* since the clean call mechanism clobbers eflags, we
* must insert our checks prior to that clobbering.
* since we do it AFTER the pusha, we don't have to save; but, we
* can't use a param that's part of any calling convention b/c w/
* PR 250976 our clean call will get it from the pusha.
* ebx is a good choice.
*/
/* We expect:
mov 0x400e5e34 -> %esp
pusha %esp %eax %ebx %ecx %edx %ebp %esi %edi -> %esp (%esp)
pushf %esp -> %esp (%esp)
push $0x00000000 %esp -> %esp (%esp)
popf %esp (%esp) -> %esp
mov 0x400e5e40 -> %eax
push %eax %esp -> %esp (%esp)
* We also assume all clean call instrs are expanded.
*/
/* Because the clean call might be optimized, we cannot assume the sequence.
* We assume that the clean call will not be inlined for having more than one
* arguments, so we scan to find either a call instr or a popf.
* if a popf, do as before.
* if a call, move back to right before push xbx or mov rbx => r3.
*/
if (app_flags_ok == NULL)
app_flags_ok = instrlist_first(ilist);
/* r2065 added out-of-line clean call context switch, so we need to check
* how the context switch code is inserted.
*/
while (!instr_opcode_valid(app_flags_ok) ||
instr_get_opcode(app_flags_ok) != OP_call) {
app_flags_ok = instr_get_next(app_flags_ok);
CLIENT_ASSERT(app_flags_ok != NULL,
"dr_insert_cbr_instrumentation: cannot find call instr");
if (instr_get_opcode(app_flags_ok) == OP_popf)
break;
}
if (instr_get_opcode(app_flags_ok) == OP_call) {
if (opnd_get_pc(instr_get_target(app_flags_ok)) == (app_pc)callee) {
/* call to clean callee
* move a few instrs back till right before push xbx, or mov rbx => r3
*/
while (app_flags_ok != NULL) {
if (instr_reg_in_src(app_flags_ok, DR_REG_XBX))
break;
app_flags_ok = instr_get_prev(app_flags_ok);
}
} else {
/* call to clean call context save */
ASSERT(opnd_get_pc(instr_get_target(app_flags_ok)) ==
get_clean_call_save(dcontext _IF_X64(GENCODE_X64)));
out_of_line_switch = true;
}
ASSERT(app_flags_ok != NULL);
}
/* i#1155: for out-of-line context switch
* we insert two parts of code to setup "taken" arg for clean call:
* - compute "taken" and put it onto the stack right before call to context
* save, where DR already swapped stack and adjusted xsp to point beyond
* mcontext plus temp stack size.
* It is 2 slots away b/c 1st is retaddr.
* - move the "taken" from stack to ebx to compatible with existing code
* right after context save returns and before arg setup, where xsp
* points beyond mcontext (xref emit_clean_call_save).
* It is 2 slots + temp stack size away.
* XXX: we could optimize the code by computing "taken" after clean call
* save if the eflags are not cleared.
*/
/* put our code before the popf or use of xbx */
opc = instr_get_opcode(instr);
if (opc == OP_jecxz || opc == OP_loop || opc == OP_loope || opc == OP_loopne) {
/* for 8-bit cbrs w/ multiple conditions and state, simpler to
* simply execute them -- they're rare so shouldn't be a perf hit.
* after all, ecx is saved, can clobber it.
* we do:
* loop/jecxz taken
* not_taken: mov 0, ebx
* jmp done
* taken: mov 1, ebx
* done:
*/
opnd_t opnd_taken = out_of_line_switch ?
/* 2 slots away from xsp, xref comment above for i#1155 */
OPND_CREATE_MEM32(REG_XSP, -2*(int)XSP_SZ /* ret+taken */) :
opnd_create_reg(REG_EBX);
instr_t *branch = instr_clone(dcontext, instr);
instr_t *not_taken =
INSTR_CREATE_mov_imm(dcontext, opnd_taken,
OPND_CREATE_INT32(0));
instr_t *taken =
INSTR_CREATE_mov_imm(dcontext, opnd_taken,
OPND_CREATE_INT32(1));
instr_t *done = INSTR_CREATE_label(dcontext);
instr_set_target(branch, opnd_create_instr(taken));
/* client-added meta instrs should not have translation set */
instr_set_translation(branch, NULL);
MINSERT(ilist, app_flags_ok, branch);
MINSERT(ilist, app_flags_ok, not_taken);
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_jmp_short(dcontext, opnd_create_instr(done)));
MINSERT(ilist, app_flags_ok, taken);
MINSERT(ilist, app_flags_ok, done);
if (out_of_line_switch) {
if (opc == OP_loop || opc == OP_loope || opc == OP_loopne) {
/* We executed OP_loop* before we saved xcx, so we must restore
* it. We should be able to use OP_lea b/c OP_loop* uses
* addr prefix to shrink pointer-sized xcx, not data prefix.
*/
reg_id_t xcx = opnd_get_reg(instr_get_dst(instr, 0));
MINSERT(ilist, app_flags_ok, INSTR_CREATE_lea
(dcontext, opnd_create_reg(xcx),
opnd_create_base_disp(xcx, DR_REG_NULL, 0, 1, OPSZ_lea)));
}
ASSERT(instr_get_opcode(app_flags_ok) == OP_call);
/* 2 slots + temp_stack_size away from xsp,
* xref comment above for i#1155
*/
opnd_taken = OPND_CREATE_MEM32
(REG_XSP, -2*(int)XSP_SZ-get_clean_call_temp_stack_size());
MINSERT(ilist, instr_get_next(app_flags_ok),
XINST_CREATE_load(dcontext,
opnd_create_reg(REG_EBX),
opnd_taken));
}
} else {
/* build a setcc equivalent of instr's jcc operation
* WARNING: this relies on order of OP_ enum!
*/
opnd_t opnd_taken = out_of_line_switch ?
/* 2 slots away from xsp, xref comment above for i#1155 */
OPND_CREATE_MEM8(REG_XSP, -2*(int)XSP_SZ /* ret+taken */) :
opnd_create_reg(REG_BL);
opc = instr_get_opcode(instr);
if (opc <= OP_jnle_short)
opc += (OP_jo - OP_jo_short);
CLIENT_ASSERT(opc >= OP_jo && opc <= OP_jnle,
"dr_insert_cbr_instrumentation: unknown opcode");
opc = opc - OP_jo + OP_seto;
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_setcc(dcontext, opc, opnd_taken));
if (out_of_line_switch) {
app_flags_ok = instr_get_next(app_flags_ok);
/* 2 slots + temp_stack_size away from xsp,
* xref comment above for i#1155
*/
opnd_taken = OPND_CREATE_MEM8
(REG_XSP, -2*(int)XSP_SZ-get_clean_call_temp_stack_size());
}
/* movzx ebx <- bl */
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX),
opnd_taken));
}
/* now branch dir is in ebx and will be passed to clean call */
#elif defined (ARM)
/* i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
DR_API void
dr_insert_cbr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
dr_insert_cbr_instrumentation_help(drcontext, ilist, instr, callee,
false /* no fallthrough */, opnd_create_null());
}
DR_API void
dr_insert_cbr_instrumentation_ex(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, opnd_t user_data)
{
dr_insert_cbr_instrumentation_help(drcontext, ilist, instr, callee,
true /* has fallthrough */, user_data);
}
DR_API void
dr_insert_ubr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
/* same as call */
dr_insert_call_instrumentation(drcontext, ilist, instr, callee);
}
/* This may seem like a pretty targeted API function, but there's no
* clean way for a client to do this on its own due to DR's
* restrictions on bb instrumentation (i#782).
*/
DR_API
bool
dr_clobber_retaddr_after_read(void *drcontext, instrlist_t *ilist, instr_t *instr,
ptr_uint_t value)
{
/* the client could be using note fields so we use a label and xfer to
* a note field during the mangling pass
*/
if (instr_is_return(instr)) {
instr_t *label = INSTR_CREATE_label(drcontext);
dr_instr_label_data_t *data = instr_get_label_data_area(label);
/* we could coordinate w/ drmgr and use some reserved note label value
* but only if we run out of instr flags. so we set to 0 to not
* overlap w/ any client uses (DRMGR_NOTE_NONE == 0).
*/
label->note = 0;
/* these values are read back in mangle() */
data->data[0] = (ptr_uint_t) instr;
data->data[1] = value;
label->flags |= INSTR_CLOBBER_RETADDR;
instr->flags |= INSTR_CLOBBER_RETADDR;
instrlist_meta_preinsert(ilist, instr, label);
return true;
}
return false;
}
DR_API bool
dr_mcontext_xmm_fields_valid(void)
{
return preserve_xmm_caller_saved();
}
#endif /* CLIENT_INTERFACE */
/* dr_get_mcontext() needed for translating clean call arg errors */
/* Fills in whichever of dmc or mc is non-NULL */
bool
dr_get_mcontext_priv(dcontext_t *dcontext, dr_mcontext_t *dmc, priv_mcontext_t *mc)
{
priv_mcontext_t *state;
CLIENT_ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)),
"DR context protection NYI");
if (mc == NULL) {
CLIENT_ASSERT(dmc != NULL, "invalid context");
/* catch uses that forget to set size: perhaps in a few releases,
* when most old clients have been converted, remove this (we'll
* still return false)
*/
CLIENT_ASSERT(dmc->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(dmc->flags != 0 && (dmc->flags & ~(DR_MC_ALL)) == 0,
"dr_mcontext_t.flags field not set properly");
} else
CLIENT_ASSERT(dmc == NULL, "invalid internal params");
#ifdef CLIENT_INTERFACE
/* i#117/PR 395156: support getting mcontext from events where mcontext is
* stable. It would be nice to support it from init and 1st thread init,
* but the mcontext is not available at those points.
*
* Since DR calls this routine when recreating state and wants the
* clean call version, can't distinguish by whereami=DR_WHERE_FCACHE,
* so we set a flag in the supported events. If client routine
* crashes and we recreate then we want clean call version anyway
* so should be ok. Note that we want in_pre_syscall for other
* reasons (dr_syscall_set_param() for Windows) so we keep it a
* separate flag.
*/
/* no support for init or initial thread init */
if (!dynamo_initialized)
return false;
if (dcontext->client_data->cur_mc != NULL) {
if (mc != NULL)
*mc = *dcontext->client_data->cur_mc;
else if (!priv_mcontext_to_dr_mcontext(dmc, dcontext->client_data->cur_mc))
return false;
return true;
}
if (!is_os_cxt_ptr_null(dcontext->client_data->os_cxt)) {
return os_context_to_mcontext(dmc, mc, dcontext->client_data->os_cxt);
}
if (dcontext->client_data->suspended) {
/* A thread suspended by dr_suspend_all_other_threads() has its
* context translated lazily here.
* We cache the result in cur_mc to avoid a translation cost next time.
*/
bool res;
priv_mcontext_t *mc_xl8;
if (mc != NULL)
mc_xl8 = mc;
else {
dcontext->client_data->cur_mc = (priv_mcontext_t *)
heap_alloc(dcontext, sizeof(*dcontext->client_data->cur_mc)
HEAPACCT(ACCT_CLIENT));
/* We'll clear this cache in dr_resume_all_other_threads() */
mc_xl8 = dcontext->client_data->cur_mc;
}
res = thread_get_mcontext(dcontext->thread_record, mc_xl8);
CLIENT_ASSERT(res, "failed to get mcontext of suspended thread");
res = translate_mcontext(dcontext->thread_record, mc_xl8,
false/*do not restore memory*/, NULL);
CLIENT_ASSERT(res, "failed to xl8 mcontext of suspended thread");
if (mc == NULL && !priv_mcontext_to_dr_mcontext(dmc, mc_xl8))
return false;
return true;
}
/* PR 207947: support mcontext access from syscall events */
if (dcontext->client_data->mcontext_in_dcontext ||
dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall) {
if (mc != NULL)
*mc = *get_mcontext(dcontext);
else if (!priv_mcontext_to_dr_mcontext(dmc, get_mcontext(dcontext)))
return false;
return true;
}
#endif
/* dr_prepare_for_call() puts the machine context on the dstack
* with pusha and pushf, but only fills in xmm values for
* preserve_xmm_caller_saved(): however, we tell the client that the xmm
* fields are not valid otherwise. so, we just have to copy the
* state from the dstack.
*/
state = get_priv_mcontext_from_dstack(dcontext);
if (mc != NULL)
*mc = *state;
else if (!priv_mcontext_to_dr_mcontext(dmc, state))
return false;
/* esp is a dstack value -- get the app stack's esp from the dcontext */
if (mc != NULL)
mc->xsp = get_mcontext(dcontext)->xsp;
else if (TEST(DR_MC_CONTROL, dmc->flags))
dmc->xsp = get_mcontext(dcontext)->xsp;
#ifdef ARM
if (TEST(DR_MC_INTEGER, dmc->flags)) {
/* get the stolen register's app value */
if (mc != NULL)
set_stolen_reg_val(mc, (reg_t) get_tls(os_tls_offset(TLS_REG_STOLEN_SLOT)));
else {
set_stolen_reg_val(dr_mcontext_as_priv_mcontext(dmc),
(reg_t) get_tls(os_tls_offset(TLS_REG_STOLEN_SLOT)));
}
}
#endif
/* XXX: should we set the pc field?
* If we do we'll have to adopt a different solution for i#1685 in our Windows
* hooks where today we use the pc slot for temp storage.
*/
return true;
}
DR_API bool
dr_get_mcontext(void *drcontext, dr_mcontext_t *dmc)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
return dr_get_mcontext_priv(dcontext, dmc, NULL);
}
#ifdef CLIENT_INTERFACE
DR_API bool
dr_set_mcontext(void *drcontext, dr_mcontext_t *context)
{
priv_mcontext_t *state;
dcontext_t *dcontext = (dcontext_t *)drcontext;
IF_ARM(reg_t reg_val = 0 /* silence the compiler warning */;)
CLIENT_ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)),
"DR context protection NYI");
CLIENT_ASSERT(context != NULL, "invalid context");
CLIENT_ASSERT(context->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(context->flags != 0 && (context->flags & ~(DR_MC_ALL)) == 0,
"dr_mcontext_t.flags field not set properly");
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
/* PR 207947: support mcontext access from syscall events */
if (dcontext->client_data->mcontext_in_dcontext ||
dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall) {
if (!dr_mcontext_to_priv_mcontext(get_mcontext(dcontext), context))
return false;
return true;
}
if (dcontext->client_data->cur_mc != NULL) {
return dr_mcontext_to_priv_mcontext(dcontext->client_data->cur_mc, context);
}
if (!is_os_cxt_ptr_null(dcontext->client_data->os_cxt)) {
/* It would be nice to fail for #DR_XFER_CALLBACK_RETURN but we'd need to
* store yet more state to do so. The pc will be ignored, and xsi
* changes will likely cause crashes.
*/
return mcontext_to_os_context(dcontext->client_data->os_cxt, context, NULL);
}
/* copy the machine context to the dstack area created with
* dr_prepare_for_call(). note that xmm0-5 copied there
* will override any save_fpstate xmm values, as desired.
*/
state = get_priv_mcontext_from_dstack(dcontext);
#ifdef ARM
if (TEST(DR_MC_INTEGER, context->flags)) {
/* Set the stolen register's app value in TLS, not on stack (we rely
* on our stolen reg retaining its value on the stack)
*/
priv_mcontext_t *mc = dr_mcontext_as_priv_mcontext(context);
set_tls(os_tls_offset(TLS_REG_STOLEN_SLOT), (void *) get_stolen_reg_val(mc));
/* save the reg val on the stack to be clobbered by the the copy below */
reg_val = get_stolen_reg_val(state);
}
#endif
if (!dr_mcontext_to_priv_mcontext(state, context))
return false;
#ifdef ARM
if (TEST(DR_MC_INTEGER, context->flags)) {
/* restore the reg val on the stack clobbered by the copy above */
set_stolen_reg_val(state, reg_val);
}
#endif
if (TEST(DR_MC_CONTROL, context->flags)) {
/* esp will be restored from a field in the dcontext */
get_mcontext(dcontext)->xsp = context->xsp;
}
/* XXX: should we support setting the pc field? */
return true;
}
DR_API
bool
dr_redirect_execution(dr_mcontext_t *mcontext)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
CLIENT_ASSERT(mcontext->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(mcontext->flags == DR_MC_ALL,
"dr_mcontext_t.flags must be DR_MC_ALL");
/* PR 352429: squash current trace.
* FIXME: will clients use this so much that this will be a perf issue?
* samples/cbr doesn't hit this even at -trace_threshold 1
*/
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_INTERP, 1, "squashing trace-in-progress\n");
trace_abort(dcontext);
}
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
dcontext->whereami = DR_WHERE_FCACHE;
set_last_exit(dcontext, (linkstub_t *)get_client_linkstub());
#ifdef CLIENT_INTERFACE
if (kernel_xfer_callbacks.num > 0) {
/* This can only be called from a clean call or an exception event.
* For both of those we can get the current mcontext via dr_get_mcontext()
* (the latter b/c we explicitly store to cur_mc just for this use case).
*/
dr_mcontext_t src_dmc;
src_dmc.size = sizeof(src_dmc);
src_dmc.flags = DR_MC_CONTROL | DR_MC_INTEGER;
dr_get_mcontext(dcontext, &src_dmc);
if (instrument_kernel_xfer(dcontext, DR_XFER_CLIENT_REDIRECT,
osc_empty, &src_dmc, NULL,
dcontext->next_tag, mcontext->xsp, osc_empty,
dr_mcontext_as_priv_mcontext(mcontext), 0))
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
}
#endif
transfer_to_dispatch(dcontext, dr_mcontext_as_priv_mcontext(mcontext),
true/*full_DR_state*/);
/* on success we won't get here */
return false;
}
DR_API
byte *
dr_redirect_native_target(void *drcontext)
{
#ifdef PROGRAM_SHEPHERDING
/* This feature is unavail for prog shep b/c of the cross-ib-type pollution,
* as well as the lack of source tag info when exiting the ibl (i#1150).
*/
return NULL;
#else
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_redirect_native_target(): drcontext cannot be NULL");
/* The client has no way to know the mode of our gencode so we set LSB here */
return PC_AS_JMP_TGT(DEFAULT_ISA_MODE, get_client_ibl_xfer_entry(dcontext));
#endif
}
/***************************************************************************
* ADAPTIVE OPTIMIZATION SUPPORT
* *Note for non owning thread support (i.e. sideline) all methods assume
* the dcontext valid, the client will have to insure this with a lock
* on thread_exit!!
*
* *need way for side thread to get a dcontext to use for logging and mem
* alloc, before do that should think more about mem alloc in/for adaptive
* routines
*
* *made local mem alloc by side thread safe (see heap.c)
*
* *loging not safe if not owning thread?
*/
DR_API
/* Schedules the fragment to be deleted. Once this call is completed,
* an existing executing fragment is allowed to complete, but control
* will not enter the fragment again before it is deleted.
*
* NOTE: this comment used to say, "after deletion, control may still
* reach the fragment by indirect branch.". We believe this is now only
* true for shared fragments, which are not currently supported.
*/
bool
dr_delete_fragment(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool deletable = false, waslinking;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(!SHARED_FRAGMENTS_ENABLED(),
"dr_delete_fragment() only valid with -thread_private");
CLIENT_ASSERT(drcontext != NULL, "dr_delete_fragment(): drcontext cannot be NULL");
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_delete_fragment not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return false;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
#ifdef CLIENT_SIDELINE
mutex_lock(&(dcontext->client_data->sideline_mutex));
fragment_get_fragment_delete_mutex(dcontext);
#else
CLIENT_ASSERT(drcontext == get_thread_private_dcontext(),
"dr_delete_fragment(): drcontext does not belong to current thread");
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL && (f->flags & FRAG_CANNOT_DELETE) == 0) {
client_todo_list_t * todo = HEAP_TYPE_ALLOC(dcontext, client_todo_list_t,
ACCT_CLIENT, UNPROTECTED);
client_todo_list_t * iter = dcontext->client_data->to_do;
todo->next = NULL;
todo->ilist = NULL;
todo->tag = tag;
if (iter == NULL)
dcontext->client_data->to_do = todo;
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = todo;
}
deletable = true;
/* unlink fragment so will return to dynamo and delete.
* Do not remove the fragment from the hashtable --
* we need to be able to look up the fragment when
* inspecting the to_do list in dispatch.
*/
if ((f->flags & FRAG_LINKED_INCOMING) != 0)
unlink_fragment_incoming(dcontext, f);
fragment_remove_from_ibt_tables(dcontext, f, false);
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
mutex_unlock(&(dcontext->client_data->sideline_mutex));
#endif
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return deletable;
}
DR_API
/* Schedules the fragment at 'tag' for replacement. Once this call is
* completed, an existing executing fragment is allowed to complete,
* but control will not enter the fragment again before it is replaced.
*
* NOTE: this comment used to say, "after replacement, control may still
* reach the fragment by indirect branch.". We believe this is now only
* true for shared fragments, which are not currently supported.
*
* Takes control of the ilist and all responsibility for deleting it and the
* instrs inside of it. The client should not keep, use, reference, etc. the
* instrlist or any of the instrs it contains after they are passed in.
*/
bool
dr_replace_fragment(void *drcontext, void *tag, instrlist_t *ilist)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
bool frag_found, waslinking;
fragment_t * f;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(!SHARED_FRAGMENTS_ENABLED(),
"dr_replace_fragment() only valid with -thread_private");
CLIENT_ASSERT(drcontext != NULL, "dr_replace_fragment(): drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_replace_fragment: drcontext is invalid");
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_replace_fragment not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return false;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
#ifdef CLIENT_SIDELINE
mutex_lock(&(dcontext->client_data->sideline_mutex));
fragment_get_fragment_delete_mutex(dcontext);
#else
CLIENT_ASSERT(drcontext == get_thread_private_dcontext(),
"dr_replace_fragment(): drcontext does not belong to current thread");
#endif
f = fragment_lookup(dcontext, tag);
frag_found = (f != NULL);
if (frag_found) {
client_todo_list_t * iter = dcontext->client_data->to_do;
client_todo_list_t * todo = HEAP_TYPE_ALLOC(dcontext, client_todo_list_t,
ACCT_CLIENT, UNPROTECTED);
todo->next = NULL;
todo->ilist = ilist;
todo->tag = tag;
if (iter == NULL)
dcontext->client_data->to_do = todo;
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = todo;
}
/* unlink fragment so will return to dynamo and replace for next time
* its executed
*/
if ((f->flags & FRAG_LINKED_INCOMING) != 0)
unlink_fragment_incoming(dcontext, f);
fragment_remove_from_ibt_tables(dcontext, f, false);
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
mutex_unlock(&(dcontext->client_data->sideline_mutex));
#endif
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return frag_found;
}
#ifdef UNSUPPORTED_API
/* FIXME - doesn't work with shared fragments. Consider removing since dr_flush_region
* and dr_delay_flush_region give us most of this functionality. */
DR_API
/* Flushes all fragments containing 'flush_tag', or the entire code
* cache if flush_tag is NULL. 'curr_tag' must specify the tag of the
* currently-executing fragment. If curr_tag is NULL, flushing can be
* delayed indefinitely. Note that flushing is performed across all
* threads, but other threads may continue to execute fragments
* containing 'curr_tag' until those fragments finish.
*/
void dr_flush_fragments(void *drcontext, void *curr_tag, void *flush_tag)
{
client_flush_req_t *iter, *flush;
dcontext_t *dcontext = (dcontext_t *)drcontext;
/* We want to unlink the currently executing fragment so we'll
* force a context switch to DR. That way, we'll perform the
* flush as soon as possible. Unfortunately, the client may not
* know the tag of the current trace. Therefore, we unlink all
* fragments in the region.
*
* Note that we aren't unlinking or ibl-invalidating (i.e., making
* unreachable) any fragments in other threads containing curr_tag
* until the delayed flush happens in enter_nolinking().
*/
if (curr_tag != NULL)
vm_area_unlink_incoming(dcontext, (app_pc)curr_tag);
flush = HEAP_TYPE_ALLOC(dcontext, client_flush_req_t, ACCT_CLIENT, UNPROTECTED);
flush->flush_callback = NULL;
if (flush_tag == NULL) {
flush->start = UNIVERSAL_REGION_BASE;
flush->size = UNIVERSAL_REGION_SIZE;
} else {
flush->start = (app_pc)flush_tag;
flush->size = 1;
}
flush->next = NULL;
iter = dcontext->client_data->flush_list;
if (iter == NULL) {
dcontext->client_data->flush_list = flush;
}
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = flush;
}
}
#endif /* UNSUPPORTED_API */
DR_API
/* Flush all fragments that contain code from the region [start, start+size).
* Uses a synchall flush to guarantee that no execution occurs out of the fragments
* flushed once this returns. Requires caller to be holding no locks (dr or client) and
* to be !couldbelinking (xref PR 199115, 227619). Caller must use
* dr_redirect_execution() to return to the cache. */
bool
dr_flush_region(app_pc start, size_t size)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n", __FUNCTION__, start, start+size);
/* Flush requires !couldbelinking. FIXME - not all event callbacks to the client are
* !couldbelinking (see PR 227619) restricting where this routine can be used. */
CLIENT_ASSERT(!is_couldbelinking(dcontext), "dr_flush_region: called from an event "
"callback that doesn't support calling this routine; see header file "
"for restrictions.");
/* Flush requires caller to hold no locks that might block a couldbelinking thread
* (which includes almost all dr locks). FIXME - some event callbacks are holding
* dr locks (see PR 227619) so can't call this routine. Since we are going to use
* a synchall flush, holding client locks is disallowed too (could block a thread
* at an unsafe spot for synch). */
CLIENT_ASSERT(OWN_NO_LOCKS(dcontext), "dr_flush_region: caller owns a client "
"lock or was called from an event callback that doesn't support "
"calling this routine; see header file for restrictions.");
CLIENT_ASSERT(size != 0, "dr_flush_region: 0 is invalid size for flush");
/* release build check of requirements, as many as possible at least */
if (size == 0 || is_couldbelinking(dcontext))
return false;
if (!executable_vm_area_executed_from(start, start + size))
return true;
flush_fragments_from_region(dcontext, start, size, true/*force synchall*/);
return true;
}
DR_API
/* Flush all fragments that contain code from the region [start, start+size).
* Uses an unlink flush which guarantees that no thread will enter a fragment that was
* flushed once this returns (threads already in a flushed fragment will continue).
* Requires caller to be holding no locks (dr or client) and to be !couldbelinking
* (xref PR 199115, 227619). */
bool
dr_unlink_flush_region(app_pc start, size_t size)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n", __FUNCTION__, start, start+size);
/* This routine won't work with coarse_units */
CLIENT_ASSERT(!DYNAMO_OPTION(coarse_units),
/* as of now, coarse_units are always disabled with -thread_private. */
"dr_unlink_flush_region is not supported with -opt_memory unless "
"-thread_private or -enable_full_api is also specified");
/* Flush requires !couldbelinking. FIXME - not all event callbacks to the client are
* !couldbelinking (see PR 227619) restricting where this routine can be used. */
CLIENT_ASSERT(!is_couldbelinking(dcontext), "dr_flush_region: called from an event "
"callback that doesn't support calling this routine, see header file "
"for restrictions.");
/* Flush requires caller to hold no locks that might block a couldbelinking thread
* (which includes almost all dr locks). FIXME - some event callbacks are holding
* dr locks (see PR 227619) so can't call this routine. FIXME - some event callbacks
* are couldbelinking (see PR 227619) so can't allow the caller to hold any client
* locks that could block threads in one of those events (otherwise we don't need
* to care about client locks) */
CLIENT_ASSERT(OWN_NO_LOCKS(dcontext), "dr_flush_region: caller owns a client "
"lock or was called from an event callback that doesn't support "
"calling this routine, see header file for restrictions.");
CLIENT_ASSERT(size != 0, "dr_unlink_flush_region: 0 is invalid size for flush");
/* release build check of requirements, as many as possible at least */
if (size == 0 || is_couldbelinking(dcontext))
return false;
if (!executable_vm_area_executed_from(start, start + size))
return true;
flush_fragments_from_region(dcontext, start, size, false/*don't force synchall*/);
return true;
}
DR_API
/* Flush all fragments that contain code from the region [start, start+size) at the next
* convenient time. Unlike dr_flush_region() this routine has no restrictions on lock
* or couldbelinking status; the downside is that the delay till the flush actually
* occurs is unbounded (FIXME - we could do something safely here to try to speed it
* up like unlinking shared_syscall etc.), but should occur before any new code is
* executed or any nudges are processed. */
bool
dr_delay_flush_region(app_pc start, size_t size, uint flush_id,
void (*flush_completion_callback) (int flush_id))
{
client_flush_req_t *flush;
LOG(THREAD_GET, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n",
__FUNCTION__, start, start+size);
if (size == 0) {
CLIENT_ASSERT(false, "dr_delay_flush_region: 0 is invalid size for flush");
return false;
}
/* With the new module load event at 1st execution (i#884), we get a lot of
* flush requests during creation of a bb from things like drwrap_replace().
* To avoid them flushing from a new module we check overlap up front here.
*/
if (!executable_vm_area_executed_from(start, start+size)) {
return true;
}
/* FIXME - would be nice if we could check the requirements and call
* dr_unlink_flush_region() here if it's safe. Is difficult to detect non-dr locks
* that could block a couldbelinking thread though. */
flush = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_flush_req_t, ACCT_CLIENT,
UNPROTECTED);
memset(flush, 0x0, sizeof(client_flush_req_t));
flush->start = (app_pc)start;
flush->size = size;
flush->flush_id = flush_id;
flush->flush_callback = flush_completion_callback;
mutex_lock(&client_flush_request_lock);
flush->next = client_flush_requests;
client_flush_requests = flush;
mutex_unlock(&client_flush_request_lock);
return true;
}
DR_API
/* returns whether or not there is a fragment in the drcontext fcache at tag
*/
bool
dr_fragment_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return f != NULL;
}
DR_API
bool
dr_bb_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f = fragment_lookup(dcontext, tag);
if (f != NULL && !TEST(FRAG_IS_TRACE, f->flags)) {
return true;
}
return false;
}
DR_API
/* Looks up the fragment associated with the application pc tag.
* If not found, returns 0.
* If found, returns the total size occupied in the cache by the fragment.
*/
uint
dr_fragment_size(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
fragment_t *f;
int size = 0;
CLIENT_ASSERT(drcontext != NULL, "dr_fragment_size: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_fragment_size: drcontext is invalid");
#ifdef CLIENT_SIDELINE
/* used to check to see if owning thread, if so don't need lock */
/* but the check for owning thread more expensive then just getting lock */
/* to check if owner get_thread_id() == dcontext->owning_thread */
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f == NULL)
size = 0;
else
size = f->size;
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return size;
}
DR_API
/* Retrieves the application PC of a fragment */
app_pc
dr_fragment_app_pc(void *tag)
{
#ifdef WINDOWS
tag = get_app_pc_from_intercept_pc_if_necessary((app_pc)tag);
CLIENT_ASSERT(tag != NULL, "dr_fragment_app_pc shouldn't be NULL");
DODEBUG({
/* Without -hide our DllMain routine ends up in the cache (xref PR 223120).
* On Linux fini() ends up in the cache.
*/
if (DYNAMO_OPTION(hide) && is_dynamo_address(tag) &&
/* support client interpreting code out of its library */
!is_in_client_lib(tag)) {
/* downgraded from assert for client interpreting its own generated code */
SYSLOG_INTERNAL_WARNING_ONCE("dr_fragment_app_pc is a DR/client pc");
}
});
#elif defined(LINUX) && defined(X86_32)
/* Point back at our hook, undoing the bb shift for SA_RESTART (i#2659). */
if ((app_pc)tag == vsyscall_sysenter_displaced_pc)
tag = vsyscall_sysenter_return_pc;
#endif
return tag;
}
DR_API
/* i#268: opposite of dr_fragment_app_pc() */
app_pc
dr_app_pc_for_decoding(app_pc pc)
{
#ifdef WINDOWS
app_pc displaced;
if (is_intercepted_app_pc(pc, &displaced))
return displaced;
#endif
return pc;
}
DR_API
app_pc
dr_app_pc_from_cache_pc(byte *cache_pc)
{
app_pc res = NULL;
dcontext_t *dcontext = get_thread_private_dcontext();
bool waslinking;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_app_pc_from_cache_pc not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return NULL;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
/* suppress asserts about faults in meta instrs */
DODEBUG({ dcontext->client_data->is_translating = true; });
res = recreate_app_pc(dcontext, cache_pc, NULL);
DODEBUG({ dcontext->client_data->is_translating = false; });
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return res;
}
DR_API
bool
dr_using_app_state(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
return os_using_app_state(dcontext);
}
DR_API
void
dr_switch_to_app_state(void *drcontext)
{
dr_switch_to_app_state_ex(drcontext, DR_STATE_ALL);
}
DR_API
void
dr_switch_to_app_state_ex(void *drcontext, dr_state_flags_t flags)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
os_swap_context(dcontext, true/*to app*/, flags);
}
DR_API
void
dr_switch_to_dr_state(void *drcontext)
{
dr_switch_to_dr_state_ex(drcontext, DR_STATE_ALL);
}
DR_API
void
dr_switch_to_dr_state_ex(void *drcontext, dr_state_flags_t flags)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
os_swap_context(dcontext, false/*to dr*/, flags);
}
/***************************************************************************
* CUSTOM TRACES SUPPORT
* *could use a method to unmark a trace head, would be nice if DR
* notified the client when it marked a trace head and gave the client a
* chance to override its decision
*/
DR_API
/* Marks the fragment associated with the application pc tag as
* a trace head. The fragment need not exist yet -- once it is
* created it will be marked as a trace head.
*
* DR associates a counter with a trace head and once it
* passes the -hot_threshold parameter, DR begins building
* a trace. Before each fragment is added to the trace, DR
* calls the client routine dr_end_trace to determine whether
* to end the trace. (dr_end_trace will be called both for
* standard DR traces and for client-defined traces.)
*
* Note, some fragments are unsuitable for trace heads. DR will
* ignore attempts to mark such fragments as trace heads and will return
* false. If the client marks a fragment that doesn't exist yet as a trace
* head and DR later determines that the fragment is unsuitable for
* a trace head it will unmark the fragment as a trace head without
* notifying the client.
*
* Returns true if the target fragment is marked as a trace head.
*
* If coarse, headness depends on path: currently this will only have
* links from tag's coarse unit unlinked.
*/
bool /* FIXME: dynamorio_app_init returns an int! */
dr_mark_trace_head(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
fragment_t *f;
fragment_t coarse_f;
bool success = true;
CLIENT_ASSERT(drcontext != NULL, "dr_mark_trace_head: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_mark_trace_head: drcontext is invalid");
/* Required to make the future-fragment lookup and add atomic and for
* mark_trace_head. We have to grab before fragment_delete_mutex so
* we pay the cost of acquiring up front even when f->flags doesn't
* require it.
*/
SHARED_FLAGS_RECURSIVE_LOCK(FRAG_SHARED, acquire, change_linking_lock);
#ifdef CLIENT_SIDELINE
/* used to check to see if owning thread, if so don't need lock */
/* but the check for owning thread more expensive then just getting lock */
/* to check if owner get_thread_id() == dcontext->owning_thread */
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup_fine_and_coarse(dcontext, tag, &coarse_f, NULL);
if (f == NULL) {
future_fragment_t *fut;
fut = fragment_lookup_future(dcontext, tag);
if (fut == NULL) {
/* need to create a future fragment */
fut = fragment_create_and_add_future(dcontext, tag, FRAG_IS_TRACE_HEAD);
} else {
/* don't call mark_trace_head, it will try to do some linking */
fut->flags |= FRAG_IS_TRACE_HEAD;
}
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : will mark fragment as trace head when built "
": address "PFX"\n", tag);
#endif
} else {
/* check precluding conditions */
if (TEST(FRAG_IS_TRACE, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : not marking as trace head, is already "
"a trace : address "PFX"\n", tag);
#endif
success = false;
} else if (TEST(FRAG_CANNOT_BE_TRACE, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : not marking as trace head, particular "
"fragment cannot be trace head : address "PFX"\n", tag);
#endif
success = false;
} else if (TEST(FRAG_IS_TRACE_HEAD, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : fragment already marked as trace head : "
"address "PFX"\n", tag);
#endif
success = true;
} else {
mark_trace_head(dcontext, f, NULL, NULL);
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 3,
"Client mark trace head : just marked as trace head : address "PFX"\n",
tag);
#endif
}
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
SHARED_FLAGS_RECURSIVE_LOCK(FRAG_SHARED, release, change_linking_lock);
return success;
}
DR_API
/* Checks to see if the fragment (or future fragment) in the drcontext
* fcache at tag is marked as a trace head
*/
bool
dr_trace_head_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool trace_head;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL)
trace_head = (f->flags & FRAG_IS_TRACE_HEAD) != 0;
else {
future_fragment_t *fut = fragment_lookup_future(dcontext, tag);
if (fut != NULL)
trace_head = (fut->flags & FRAG_IS_TRACE_HEAD) != 0;
else
trace_head = false;
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return trace_head;
}
DR_API
/* checks to see that if there is a trace in the drcontext fcache at tag
*/
bool
dr_trace_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool trace;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL)
trace = (f->flags & FRAG_IS_TRACE) != 0;
else
trace = false;
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return trace;
}
#ifdef UNSUPPORTED_API
DR_API
/* All basic blocks created after this routine is called will have a prefix
* that restores the ecx register. Exit ctis can be made to target this prefix
* instead of the normal entry point by using the instr_branch_set_prefix_target()
* routine.
* WARNING: this routine should almost always be called during client
* initialization, since having a mixture of prefixed and non-prefixed basic
* blocks can lead to trouble.
*/
void
dr_add_prefixes_to_basic_blocks(void)
{
if (DYNAMO_OPTION(coarse_units)) {
/* coarse_units doesn't support prefixes in general.
* the variation by addr prefix according to processor type
* is also not stored in pcaches.
*/
CLIENT_ASSERT(false,
"dr_add_prefixes_to_basic_blocks() not supported with -opt_memory");
}
options_make_writable();
dynamo_options.bb_prefixes = true;
options_restore_readonly();
}
#endif /* UNSUPPORTED_API */
DR_API
/* Insert code to get the segment base address pointed at by seg into
* register reg. In Linux, it is only supported with -mangle_app_seg option.
* In Windows, it only supports getting base address of the TLS segment.
*/
bool
dr_insert_get_seg_base(void *drcontext, instrlist_t *ilist, instr_t *instr,
reg_id_t seg, reg_id_t reg)
{
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_get_seg_base: reg has wrong size\n");
#ifdef X86
CLIENT_ASSERT(reg_is_segment(seg),
"dr_insert_get_seg_base: seg is not a segment register");
# ifdef UNIX
CLIENT_ASSERT(INTERNAL_OPTION(mangle_app_seg),
"dr_insert_get_seg_base is supported"
"with -mangle_app_seg only");
/* FIXME: we should remove the constraint below by always mangling SEG_TLS,
* 1. Getting TLS base could be a common request by clients.
* 2. The TLS descriptor setup and selector setup can be separated,
* so we must intercept all descriptor setup. It will not be large
* runtime overhead for keeping track of the app's TLS segment base.
*/
CLIENT_ASSERT(INTERNAL_OPTION(private_loader) || seg != SEG_TLS,
"dr_insert_get_seg_base supports TLS seg"
"only with -private_loader");
if (!INTERNAL_OPTION(mangle_app_seg) ||
!(INTERNAL_OPTION(private_loader) || seg != SEG_TLS))
return false;
if (seg == SEG_FS || seg == SEG_GS) {
instrlist_meta_preinsert
(ilist, instr,
instr_create_restore_from_tls(drcontext, reg,
os_get_app_tls_base_offset(seg)));
} else {
instrlist_meta_preinsert
(ilist, instr,
INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(reg),
OPND_CREATE_INTPTR(0)));
}
# else /* Windows */
if (seg == SEG_TLS) {
instrlist_meta_preinsert
(ilist, instr,
XINST_CREATE_load(drcontext,
opnd_create_reg(reg),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, SELF_TIB_OFFSET, OPSZ_PTR)));
} else if (seg == SEG_CS || seg == SEG_DS || seg == SEG_ES || seg == SEG_SS) {
/* XXX: we assume flat address space */
instrlist_meta_preinsert
(ilist, instr,
INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(reg),
OPND_CREATE_INTPTR(0)));
} else
return false;
# endif /* UNIX/Windows */
#elif defined (ARM)
/* i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
return true;
}
DR_API
reg_id_t
dr_get_stolen_reg()
{
return IF_X86_ELSE(REG_NULL, dr_reg_stolen);
}
DR_API
bool
dr_insert_get_stolen_reg_value(void *drcontext, instrlist_t *ilist,
instr_t *instr, reg_id_t reg)
{
IF_X86(CLIENT_ASSERT(false, "dr_insert_get_stolen_reg: should not be reached\n"));
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_get_stolen_reg: reg has wrong size\n");
CLIENT_ASSERT(!reg_is_stolen(reg),
"dr_insert_get_stolen_reg: reg is used by DynamoRIO\n");
#ifdef AARCHXX
instrlist_meta_preinsert
(ilist, instr,
instr_create_restore_from_tls(drcontext, reg, TLS_REG_STOLEN_SLOT));
#endif
return true;
}
DR_API
bool
dr_insert_set_stolen_reg_value(void *drcontext, instrlist_t *ilist,
instr_t *instr, reg_id_t reg)
{
IF_X86(CLIENT_ASSERT(false, "dr_insert_set_stolen_reg: should not be reached\n"));
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_set_stolen_reg: reg has wrong size\n");
CLIENT_ASSERT(!reg_is_stolen(reg),
"dr_insert_set_stolen_reg: reg is used by DynamoRIO\n");
#ifdef AARCHXX
instrlist_meta_preinsert
(ilist, instr,
instr_create_save_to_tls(drcontext, reg, TLS_REG_STOLEN_SLOT));
#endif
return true;
}
DR_API
int
dr_remove_it_instrs(void *drcontext, instrlist_t *ilist)
{
#if !defined(ARM)
return 0;
#else
int res = 0;
instr_t *inst, *next;
for (inst = instrlist_first(ilist); inst != NULL; inst = next) {
next = instr_get_next(inst);
if (instr_get_opcode(inst) == OP_it) {
res++;
instrlist_remove(ilist, inst);
instr_destroy(drcontext, inst);
}
}
return res;
#endif
}
DR_API
int
dr_insert_it_instrs(void *drcontext, instrlist_t *ilist)
{
#if !defined(ARM)
return 0;
#else
instr_t *first = instrlist_first(ilist);
if (first == NULL || instr_get_isa_mode(first) != DR_ISA_ARM_THUMB)
return 0;
return reinstate_it_blocks((dcontext_t*)drcontext, ilist,
instrlist_first(ilist), NULL);
#endif
}
DR_API
bool
dr_prepopulate_cache(app_pc *tags, size_t tags_count)
{
/* We expect get_thread_private_dcontext() to return NULL b/c we're between
* dr_app_setup() and dr_app_start() and are considered a "native" thread
* with disabled TLS. We do set up TLS as too many routines fail (e.g.,
* clean call analysis) with NULL from TLS, but we do not set up signal
* handling: the caller has to handle decode faults, as we do not
* want to enable our signal handlers, which might disrupt the app running
* natively in parallel with us.
*/
thread_record_t *tr = thread_lookup(get_thread_id());
dcontext_t *dcontext = tr->dcontext;
uint i;
if (dcontext == NULL)
return false;
SHARED_BB_LOCK();
SYSLOG_INTERNAL_INFO("pre-building code cache from %d tags", tags_count);
#ifdef UNIX
os_swap_context(dcontext, false/*to dr*/, DR_STATE_GO_NATIVE);
#endif
for (i = 0; i < tags_count; i++) {
/* There could be duplicates if sthg was deleted and re-added during profiling */
fragment_t coarse_f;
fragment_t *f;
#ifdef UNIX
/* We silently skip DR-segment-reading addresses to help out a caller
* who sampled and couldn't avoid self-sampling for decoding.
*/
if (is_DR_segment_reader_entry(tags[i]))
continue;
#endif
f = fragment_lookup_fine_and_coarse(dcontext, tags[i], &coarse_f, NULL);
if (f == NULL) {
/* For coarse-grain we won't link as that's done during execution,
* but for fine-grained this should produce a fully warmed cache.
*/
f = build_basic_block_fragment(dcontext, tags[i],
0, true/*link*/, true/*visible*/
_IF_CLIENT(false/*!for_trace*/)
_IF_CLIENT(NULL));
}
ASSERT(f != NULL);
/* We're ok making a thread-private fragment: might be a waste if this
* thread never runs it, but simpler than trying to skip them or sthg.
*/
}
#ifdef UNIX
os_swap_context(dcontext, true/*to app*/, DR_STATE_GO_NATIVE);
#endif
SHARED_BB_UNLOCK();
return true;
}
DR_API
bool
dr_prepopulate_indirect_targets(dr_indirect_branch_type_t branch_type,
app_pc *tags, size_t tags_count)
{
/* We do the same setup as for dr_prepopulate_cache(). */
thread_record_t *tr = thread_lookup(get_thread_id());
dcontext_t *dcontext = tr->dcontext;
ibl_branch_type_t ibl_type;
uint i;
if (dcontext == NULL)
return false;
#ifdef UNIX
os_swap_context(dcontext, false/*to dr*/, DR_STATE_GO_NATIVE);
#endif
/* Initially I took in an opcode and used extract_branchtype(instr_branch_type())
* but every use case had to make a fake instr to get the opcode and had no
* good cross-platform method so I switched to an enum. We're unlikely to
* change our ibt split and we can add new enums in any case.
*/
switch (branch_type) {
case DR_INDIRECT_RETURN: ibl_type = IBL_RETURN; break;
case DR_INDIRECT_CALL: ibl_type = IBL_INDCALL; break;
case DR_INDIRECT_JUMP: ibl_type = IBL_INDJMP; break;
default: return false;
}
SYSLOG_INTERNAL_INFO("pre-populating ibt[%d] table for %d tags",
ibl_type, tags_count);
for (i = 0; i < tags_count; i++) {
fragment_add_ibl_target(dcontext, tags[i], ibl_type);
}
#ifdef UNIX
os_swap_context(dcontext, true/*to app*/, DR_STATE_GO_NATIVE);
#endif
return true;
}
DR_API
bool
dr_get_stats(dr_stats_t *drstats)
{
return stats_get_snapshot(drstats);
}
/***************************************************************************
* PERSISTENCE
*/
/* Up to caller to synchronize. */
uint
instrument_persist_ro_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
size_t i;
/* Store the set of clients in use as we require the same set in order
* to validate the pcache on use. Note that we can't just have -client_lib
* be OP_PCACHE_GLOBAL b/c it contains client options too.
* We have no unique guids for clients so we store the full path.
* We ignore ids. We do care about priority order: clients must
* be in the same order in addition to having the same path.
*
* XXX: we could go further and store client library checksum, etc. hashes,
* but that precludes clients from doing their own proper versioning.
*
* XXX: we could also put the set of clients into the pcache namespace to allow
* simultaneous use of pcaches with different sets of clients (empty set
* vs under tool, in particular): but doesn't really seem useful enough
* for the trouble
*/
for (i=0; i<num_client_libs; i++) {
sz += strlen(client_libs[i].path) + 1/*NULL*/;
}
sz++; /* double NULL ends it */
/* Now for clients' own data.
* For user_data, we assume each sequence of <size, patch, persist> is
* atomic: caller holds a mutex across the sequence. Thus, we can use
* global storage.
*/
if (persist_ro_size_callbacks.num > 0) {
call_all_ret(sz, +=, , persist_ro_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
}
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_ro(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
size_t i;
char nul = '\0';
ASSERT(fd != INVALID_FILE);
for (i=0; i<num_client_libs; i++) {
size_t sz = strlen(client_libs[i].path) + 1/*NULL*/;
if (os_write(fd, client_libs[i].path, sz) != (ssize_t)sz)
return false;
}
/* double NULL ends it */
if (os_write(fd, &nul, sizeof(nul)) != (ssize_t)sizeof(nul))
return false;
/* Now for clients' own data */
if (persist_ro_size_callbacks.num > 0) {
call_all_ret(res, = res &&, , persist_ro_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
}
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_ro(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
size_t i;
const char *c;
ASSERT(map != NULL);
/* Ensure we have the same set of tools (see comments above) */
i = 0;
c = (const char *) map;
while (*c != '\0') {
if (i >= num_client_libs)
return false; /* too many clients */
if (strcmp(client_libs[i].path, c) != 0)
return false; /* client path mismatch */
c += strlen(c) + 1;
i++;
}
if (i < num_client_libs)
return false; /* too few clients */
c++;
/* Now for clients' own data */
if (resurrect_ro_callbacks.num > 0) {
call_all_ret(res, = res &&, , resurrect_ro_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, (byte **) &c);
}
return res;
}
/* Up to caller to synchronize. */
uint
instrument_persist_rx_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
if (persist_rx_size_callbacks.num == 0)
return 0;
call_all_ret(sz, +=, , persist_rx_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_rx(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
ASSERT(fd != INVALID_FILE);
if (persist_rx_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_rx_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_rx(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
ASSERT(map != NULL);
if (resurrect_rx_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , resurrect_rx_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, &map);
return res;
}
/* Up to caller to synchronize. */
uint
instrument_persist_rw_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
if (persist_rw_size_callbacks.num == 0)
return 0;
call_all_ret(sz, +=, , persist_rw_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_rw(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
ASSERT(fd != INVALID_FILE);
if (persist_rw_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_rw_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_rw(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
ASSERT(map != NULL);
if (resurrect_rw_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , resurrect_rx_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, &map);
return res;
}
bool
instrument_persist_patch(dcontext_t *dcontext, void *perscxt,
byte *bb_start, size_t bb_size)
{
bool res = true;
if (persist_patch_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_patch_callbacks,
bool (*)(void *, void *, byte *, size_t, void *),
(void *)dcontext, perscxt, bb_start, bb_size,
persist_user_data[idx]);
return res;
}
DR_API
bool
dr_register_persist_ro(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_ro_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_ro_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_ro_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_ro(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_ro_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_ro_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_ro_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_rx(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_rx_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_rx_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_rx_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_rx(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_rx_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_rx_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_rx_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_rw(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_rw_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_rw_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_rw_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_rw(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_rw_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_rw_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_rw_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_patch(bool (*func_patch)(void *drcontext, void *perscxt,
byte *bb_start, size_t bb_size,
void *user_data))
{
if (func_patch == NULL)
return false;
add_callback(&persist_patch_callbacks, (void (*)(void))func_patch, true);
return true;
}
DR_API
bool
dr_unregister_persist_patch(bool (*func_patch)(void *drcontext, void *perscxt,
byte *bb_start, size_t bb_size,
void *user_data))
{
return remove_callback(&persist_patch_callbacks, (void (*)(void))func_patch, true);
}
DR_API
/* Create instructions for storing pointer-size integer val to dst,
* and then insert them into ilist prior to where.
* The "first" and "last" created instructions are returned.
*/
void
instrlist_insert_mov_immed_ptrsz(void *drcontext, ptr_int_t val, opnd_t dst,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
CLIENT_ASSERT(opnd_get_size(dst) == OPSZ_PTR, "wrong dst size");
insert_mov_immed_ptrsz((dcontext_t *)drcontext, val, dst,
ilist, where, first, last);
}
DR_API
/* Create instructions for pushing pointer-size integer val on the stack,
* and then insert them into ilist prior to where.
* The "first" and "last" created instructions are returned.
*/
void
instrlist_insert_push_immed_ptrsz(void *drcontext, ptr_int_t val,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
insert_push_immed_ptrsz((dcontext_t *)drcontext, val, ilist, where,
first, last);
}
DR_API
void
instrlist_insert_mov_instr_addr(void *drcontext, instr_t *src_inst, byte *encode_pc,
opnd_t dst, instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
CLIENT_ASSERT(opnd_get_size(dst) == OPSZ_PTR, "wrong dst size");
if (encode_pc == NULL) {
/* Pass highest code cache address.
* XXX: unless we're beyond the reservation! Would still be reachable
* from rest of vmcode, but might be higher than vmcode_get_end()!
*/
encode_pc = vmcode_get_end();
}
insert_mov_instr_addr((dcontext_t *)drcontext, src_inst, encode_pc, dst,
ilist, where, first, last);
}
DR_API
void
instrlist_insert_push_instr_addr(void *drcontext, instr_t *src_inst, byte *encode_pc,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
if (encode_pc == NULL) {
/* Pass highest code cache address.
* XXX: unless we're beyond the reservation! Would still be reachable
* from rest of vmcode, but might be higher than vmcode_get_end()!
*/
encode_pc = vmcode_get_end();
}
insert_push_instr_addr((dcontext_t *)drcontext, src_inst, encode_pc,
ilist, where, first, last);
}
#endif /* CLIENT_INTERFACE */
| 1 | 13,853 | The api/docs/release.dox changelog message is missing: maybe you planned to add it once NtAllocateVirtualMemoryEx and NtMapViewOfSectionEx support is in? I would say, add it here in the same diff that raises max_supported_os_version. | DynamoRIO-dynamorio | c |
@@ -0,0 +1,6 @@
+var aria = 'aria-hidden';
+if (node && node.hasAttribute(aria)) {
+ return false;
+}
+
+return true; | 1 | 1 | 11,054 | Just do: `return node.hasAttribute('aria-hidden')` | dequelabs-axe-core | js |
|
@@ -10,6 +10,18 @@ describe "Saved Searches" do
click_link 'Saved Searches'
expect(page).to have_content 'You have no saved searches'
end
+
+ it 'can be saved and forgotten from a search result' do
+ visit catalog_index_path(q: 'book')
+ within '.search-widgets' do
+ click_button 'save'
+ end
+ expect(page).to have_content 'Successfully saved your search.'
+ within '.search-widgets' do
+ click_button 'forget'
+ end
+ expect(page).to have_content 'Successfully removed that saved search.'
+ end
describe "with a saved search 'book'" do
before do | 1 | require 'spec_helper'
describe "Saved Searches" do
before do
sign_in 'user1'
visit root_path
end
it "should be empty" do
click_link 'Saved Searches'
expect(page).to have_content 'You have no saved searches'
end
describe "with a saved search 'book'" do
before do
fill_in "q", with: 'book'
click_button 'search'
click_link "History"
click_button "save"
click_link 'Saved Searches'
end
it "should show saved searches" do
expect(page).to have_content 'Your saved searches'
expect(page).to have_content 'book'
end
it "should delete saved searches" do
click_button 'delete'
expect(page).to have_content 'Successfully removed that saved search.'
end
describe "and a saved search 'dang'" do
before do
visit root_path
fill_in "q", with: 'dang'
click_button 'search'
click_link "History"
click_button "save"
click_link 'Saved Searches'
end
it "should clear the searhes" do
click_link "Clear Saved Searches"
expect(page).to have_content 'Cleared your saved searches.'
expect(page).to have_content 'You have no saved searches'
end
end
end
end
| 1 | 6,015 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | projectblacklight-blacklight | rb |
@@ -9,7 +9,10 @@ namespace Datadog.Trace.Util
internal static class DomainMetadata
{
private const string UnknownName = "unknown";
- private static Process _currentProcess;
+ private static bool _initialized;
+ private static string _currentProcessName;
+ private static string _currentProcessMachineName;
+ private static int _currentProcessId;
private static bool _processDataPoisoned;
private static bool _domainDataPoisoned;
private static bool? _isAppInsightsAppDomain; | 1 | using System;
using System.Diagnostics;
namespace Datadog.Trace.Util
{
/// <summary>
/// Dedicated helper class for consistently referencing Process and AppDomain information.
/// </summary>
internal static class DomainMetadata
{
private const string UnknownName = "unknown";
private static Process _currentProcess;
private static bool _processDataPoisoned;
private static bool _domainDataPoisoned;
private static bool? _isAppInsightsAppDomain;
static DomainMetadata()
{
TrySetProcess();
}
public static string ProcessName
{
get
{
try
{
return !_processDataPoisoned ? _currentProcess.ProcessName : UnknownName;
}
catch
{
_processDataPoisoned = true;
return UnknownName;
}
}
}
public static string MachineName
{
get
{
try
{
return !_processDataPoisoned ? _currentProcess.MachineName : UnknownName;
}
catch
{
_processDataPoisoned = true;
return UnknownName;
}
}
}
public static int ProcessId
{
get
{
try
{
return !_processDataPoisoned ? _currentProcess.Id : -1;
}
catch
{
_processDataPoisoned = true;
return -1;
}
}
}
public static string AppDomainName
{
get
{
try
{
return !_domainDataPoisoned ? AppDomain.CurrentDomain.FriendlyName : UnknownName;
}
catch
{
_domainDataPoisoned = true;
return UnknownName;
}
}
}
public static int AppDomainId
{
get
{
try
{
return !_domainDataPoisoned ? AppDomain.CurrentDomain.Id : -1;
}
catch
{
_domainDataPoisoned = true;
return -1;
}
}
}
public static bool ShouldAvoidAppDomain()
{
if (_isAppInsightsAppDomain == null)
{
_isAppInsightsAppDomain = AppDomainName.IndexOf("ApplicationInsights", StringComparison.OrdinalIgnoreCase) >= 0;
}
return _isAppInsightsAppDomain.Value;
}
private static void TrySetProcess()
{
try
{
if (!_processDataPoisoned && _currentProcess == null)
{
_currentProcess = Process.GetCurrentProcess();
}
}
catch
{
_processDataPoisoned = true;
}
}
}
}
| 1 | 18,050 | Do we need `_processDataPoisoned`? Can the name or the id of the current process ever change? Or the machine name? | DataDog-dd-trace-dotnet | .cs |
@@ -114,10 +114,17 @@ func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
return err
}
}
+
+ notifySocket := os.Getenv("NOTIFY_SOCKET")
+ os.Unsetenv("NOTIFY_SOCKET")
+
if err := setupTunnelAndRunAgent(ctx, nodeConfig, cfg, proxy); err != nil {
return err
}
+ os.Setenv("NOTIFY_SOCKET", notifySocket)
+ systemd.SdNotify(true, "READY=1\n")
+
coreClient, err := coreClient(nodeConfig.AgentConfig.KubeConfigKubelet)
if err != nil {
return err | 1 | package agent
import (
"context"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"time"
systemd "github.com/coreos/go-systemd/daemon"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/agent/config"
"github.com/rancher/k3s/pkg/agent/containerd"
"github.com/rancher/k3s/pkg/agent/flannel"
"github.com/rancher/k3s/pkg/agent/netpol"
"github.com/rancher/k3s/pkg/agent/proxy"
"github.com/rancher/k3s/pkg/agent/syssetup"
"github.com/rancher/k3s/pkg/agent/tunnel"
"github.com/rancher/k3s/pkg/cgroups"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/clientaccess"
cp "github.com/rancher/k3s/pkg/cloudprovider"
"github.com/rancher/k3s/pkg/daemons/agent"
daemonconfig "github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/executor"
"github.com/rancher/k3s/pkg/nodeconfig"
"github.com/rancher/k3s/pkg/rootless"
"github.com/rancher/k3s/pkg/util"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/controller-manager/app"
app2 "k8s.io/kubernetes/cmd/kube-proxy/app"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
utilsnet "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
const (
dockershimSock = "unix:///var/run/dockershim.sock"
containerdSock = "unix:///run/k3s/containerd/containerd.sock"
)
// setupCriCtlConfig creates the crictl config file and populates it
// with the given data from config.
func setupCriCtlConfig(cfg cmds.Agent, nodeConfig *daemonconfig.Node) error {
cre := nodeConfig.ContainerRuntimeEndpoint
if cre == "" {
switch {
case cfg.Docker:
cre = dockershimSock
default:
cre = containerdSock
}
}
agentConfDir := filepath.Join(cfg.DataDir, "agent", "etc")
if _, err := os.Stat(agentConfDir); os.IsNotExist(err) {
if err := os.MkdirAll(agentConfDir, 0700); err != nil {
return err
}
}
crp := "runtime-endpoint: " + cre + "\n"
return ioutil.WriteFile(agentConfDir+"/crictl.yaml", []byte(crp), 0600)
}
func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
nodeConfig := config.Get(ctx, cfg, proxy)
dualCluster, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ClusterCIDRs)
if err != nil {
return errors.Wrap(err, "failed to validate cluster-cidr")
}
dualService, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ServiceCIDRs)
if err != nil {
return errors.Wrap(err, "failed to validate service-cidr")
}
dualNode, err := utilsnet.IsDualStackIPs(nodeConfig.AgentConfig.NodeIPs)
if err != nil {
return errors.Wrap(err, "failed to validate node-ip")
}
enableIPv6 := dualCluster || dualService || dualNode
conntrackConfig, err := getConntrackConfig(nodeConfig)
if err != nil {
return errors.Wrap(err, "failed to validate kube-proxy conntrack configuration")
}
syssetup.Configure(enableIPv6, conntrackConfig)
if err := setupCriCtlConfig(cfg, nodeConfig); err != nil {
return err
}
if err := executor.Bootstrap(ctx, nodeConfig, cfg); err != nil {
return err
}
if !nodeConfig.NoFlannel {
if err := flannel.Prepare(ctx, nodeConfig); err != nil {
return err
}
}
if !nodeConfig.Docker && nodeConfig.ContainerRuntimeEndpoint == "" {
if err := containerd.Run(ctx, nodeConfig); err != nil {
return err
}
}
if err := setupTunnelAndRunAgent(ctx, nodeConfig, cfg, proxy); err != nil {
return err
}
coreClient, err := coreClient(nodeConfig.AgentConfig.KubeConfigKubelet)
if err != nil {
return err
}
app.WaitForAPIServer(coreClient, 30*time.Second)
if !nodeConfig.NoFlannel {
if err := flannel.Run(ctx, nodeConfig, coreClient.CoreV1().Nodes()); err != nil {
return err
}
}
if err := configureNode(ctx, &nodeConfig.AgentConfig, coreClient.CoreV1().Nodes()); err != nil {
return err
}
if !nodeConfig.AgentConfig.DisableNPC {
if err := netpol.Run(ctx, nodeConfig); err != nil {
return err
}
}
<-ctx.Done()
return ctx.Err()
}
// getConntrackConfig uses the kube-proxy code to parse the user-provided kube-proxy-arg values, and
// extract the conntrack settings so that K3s can set them itself. This allows us to soft-fail when
// running K3s in Docker, where kube-proxy is no longer allowed to set conntrack sysctls on newer kernels.
// When running rootless, we do not attempt to set conntrack sysctls - this behavior is copied from kubeadm.
func getConntrackConfig(nodeConfig *daemonconfig.Node) (*kubeproxyconfig.KubeProxyConntrackConfiguration, error) {
ctConfig := &kubeproxyconfig.KubeProxyConntrackConfiguration{
MaxPerCore: utilpointer.Int32Ptr(0),
Min: utilpointer.Int32Ptr(0),
TCPEstablishedTimeout: &metav1.Duration{},
TCPCloseWaitTimeout: &metav1.Duration{},
}
if nodeConfig.AgentConfig.Rootless {
return ctConfig, nil
}
cmd := app2.NewProxyCommand()
if err := cmd.ParseFlags(daemonconfig.GetArgsList(map[string]string{}, nodeConfig.AgentConfig.ExtraKubeProxyArgs)); err != nil {
return nil, err
}
maxPerCore, err := cmd.Flags().GetInt32("conntrack-max-per-core")
if err != nil {
return nil, err
}
ctConfig.MaxPerCore = &maxPerCore
min, err := cmd.Flags().GetInt32("conntrack-min")
if err != nil {
return nil, err
}
ctConfig.Min = &min
establishedTimeout, err := cmd.Flags().GetDuration("conntrack-tcp-timeout-established")
if err != nil {
return nil, err
}
ctConfig.TCPEstablishedTimeout.Duration = establishedTimeout
closeWaitTimeout, err := cmd.Flags().GetDuration("conntrack-tcp-timeout-close-wait")
if err != nil {
return nil, err
}
ctConfig.TCPCloseWaitTimeout.Duration = closeWaitTimeout
return ctConfig, nil
}
func coreClient(cfg string) (kubernetes.Interface, error) {
restConfig, err := clientcmd.BuildConfigFromFlags("", cfg)
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(restConfig)
}
func Run(ctx context.Context, cfg cmds.Agent) error {
if err := cgroups.Validate(); err != nil {
return err
}
if cfg.Rootless && !cfg.RootlessAlreadyUnshared {
if err := rootless.Rootless(cfg.DataDir); err != nil {
return err
}
}
agentDir := filepath.Join(cfg.DataDir, "agent")
if err := os.MkdirAll(agentDir, 0700); err != nil {
return err
}
proxy, err := proxy.NewSupervisorProxy(ctx, !cfg.DisableLoadBalancer, agentDir, cfg.ServerURL, cfg.LBServerPort)
if err != nil {
return err
}
for {
newToken, err := clientaccess.ParseAndValidateTokenForUser(proxy.SupervisorURL(), cfg.Token, "node")
if err != nil {
logrus.Error(err)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(2 * time.Second):
}
continue
}
cfg.Token = newToken.String()
break
}
systemd.SdNotify(true, "READY=1\n")
return run(ctx, cfg, proxy)
}
func configureNode(ctx context.Context, agentConfig *daemonconfig.Agent, nodes v1.NodeInterface) error {
count := 0
for {
node, err := nodes.Get(ctx, agentConfig.NodeName, metav1.GetOptions{})
if err != nil {
if count%30 == 0 {
logrus.Infof("Waiting for kubelet to be ready on node %s: %v", agentConfig.NodeName, err)
}
count++
time.Sleep(1 * time.Second)
continue
}
updateNode := false
if labels, changed := updateMutableLabels(agentConfig, node.Labels); changed {
node.Labels = labels
updateNode = true
}
if !agentConfig.DisableCCM {
if annotations, changed := updateAddressAnnotations(agentConfig, node.Annotations); changed {
node.Annotations = annotations
updateNode = true
}
if labels, changed := updateLegacyAddressLabels(agentConfig, node.Labels); changed {
node.Labels = labels
updateNode = true
}
}
// inject node config
if changed, err := nodeconfig.SetNodeConfigAnnotations(node); err != nil {
return err
} else if changed {
updateNode = true
}
if updateNode {
if _, err := nodes.Update(ctx, node, metav1.UpdateOptions{}); err != nil {
logrus.Infof("Failed to update node %s: %v", agentConfig.NodeName, err)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Second):
continue
}
}
logrus.Infof("labels have been set successfully on node: %s", agentConfig.NodeName)
} else {
logrus.Infof("labels have already set on node: %s", agentConfig.NodeName)
}
break
}
return nil
}
func updateMutableLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]string) (map[string]string, bool) {
result := map[string]string{}
for _, m := range agentConfig.NodeLabels {
var (
v string
p = strings.SplitN(m, `=`, 2)
k = p[0]
)
if len(p) > 1 {
v = p[1]
}
result[k] = v
}
result = labels.Merge(nodeLabels, result)
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
}
func updateLegacyAddressLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]string) (map[string]string, bool) {
ls := labels.Set(nodeLabels)
if ls.Has(cp.InternalIPKey) || ls.Has(cp.HostnameKey) {
result := map[string]string{
cp.InternalIPKey: agentConfig.NodeIP,
cp.HostnameKey: agentConfig.NodeName,
}
if agentConfig.NodeExternalIP != "" {
result[cp.ExternalIPKey] = agentConfig.NodeExternalIP
}
result = labels.Merge(nodeLabels, result)
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
}
return nil, false
}
func updateAddressAnnotations(agentConfig *daemonconfig.Agent, nodeAnnotations map[string]string) (map[string]string, bool) {
result := map[string]string{
cp.InternalIPKey: util.JoinIPs(agentConfig.NodeIPs),
cp.HostnameKey: agentConfig.NodeName,
}
if agentConfig.NodeExternalIP != "" {
result[cp.ExternalIPKey] = util.JoinIPs(agentConfig.NodeExternalIPs)
}
result = labels.Merge(nodeAnnotations, result)
return result, !equality.Semantic.DeepEqual(nodeAnnotations, result)
}
// setupTunnelAndRunAgent should start the setup tunnel before starting kubelet and kubeproxy
// there are special case for etcd agents, it will wait until it can find the apiaddress from
// the address channel and update the proxy with the servers addresses, if in rke2 we need to
// start the agent before the tunnel is setup to allow kubelet to start first and start the pods
func setupTunnelAndRunAgent(ctx context.Context, nodeConfig *daemonconfig.Node, cfg cmds.Agent, proxy proxy.Proxy) error {
var agentRan bool
if cfg.ETCDAgent {
// only in rke2 run the agent before the tunnel setup and check for that later in the function
if proxy.IsAPIServerLBEnabled() {
if err := agent.Agent(&nodeConfig.AgentConfig); err != nil {
return err
}
agentRan = true
}
select {
case address := <-cfg.APIAddressCh:
cfg.ServerURL = address
u, err := url.Parse(cfg.ServerURL)
if err != nil {
logrus.Warn(err)
}
proxy.Update([]string{fmt.Sprintf("%s:%d", u.Hostname(), nodeConfig.ServerHTTPSPort)})
case <-ctx.Done():
return ctx.Err()
}
} else if cfg.ClusterReset && proxy.IsAPIServerLBEnabled() {
if err := agent.Agent(&nodeConfig.AgentConfig); err != nil {
return err
}
agentRan = true
}
if err := tunnel.Setup(ctx, nodeConfig, proxy); err != nil {
return err
}
if !agentRan {
return agent.Agent(&nodeConfig.AgentConfig)
}
return nil
}
| 1 | 9,689 | Did it not work out to wait until after containerd and kubelet are started? | k3s-io-k3s | go |
@@ -159,6 +159,10 @@ const std::unordered_map<std::string, ItemParseAttributes_t> ItemParseAttributes
{"elementenergy", ITEM_PARSE_ELEMENTENERGY},
{"elementdeath", ITEM_PARSE_ELEMENTDEATH},
{"elementholy", ITEM_PARSE_ELEMENTHOLY},
+ {"cooldownreduction", ITEM_PARSE_COOLDOWNREDUCTION},
+ {"increasedamage", ITEM_PARSE_INCREASEDAMAGE},
+ {"increasehealing", ITEM_PARSE_INCREASEHEALING},
+ {"increasemanaget", ITEM_PARSE_INCREASEMANAGAIN},
{"walkstack", ITEM_PARSE_WALKSTACK},
{"blocking", ITEM_PARSE_BLOCKING},
{"allowdistread", ITEM_PARSE_ALLOWDISTREAD}, | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "items.h"
#include "spells.h"
#include "movement.h"
#include "weapons.h"
#include "pugicast.h"
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
const std::unordered_map<std::string, ItemParseAttributes_t> ItemParseAttributesMap = {
{"type", ITEM_PARSE_TYPE},
{"description", ITEM_PARSE_DESCRIPTION},
{"runespellname", ITEM_PARSE_RUNESPELLNAME},
{"weight", ITEM_PARSE_WEIGHT},
{"showcount", ITEM_PARSE_SHOWCOUNT},
{"armor", ITEM_PARSE_ARMOR},
{"defense", ITEM_PARSE_DEFENSE},
{"extradef", ITEM_PARSE_EXTRADEF},
{"attack", ITEM_PARSE_ATTACK},
{"attackspeed", ITEM_PARSE_ATTACK_SPEED},
{"rotateto", ITEM_PARSE_ROTATETO},
{"moveable", ITEM_PARSE_MOVEABLE},
{"movable", ITEM_PARSE_MOVEABLE},
{"blockprojectile", ITEM_PARSE_BLOCKPROJECTILE},
{"allowpickupable", ITEM_PARSE_PICKUPABLE},
{"pickupable", ITEM_PARSE_PICKUPABLE},
{"forceserialize", ITEM_PARSE_FORCESERIALIZE},
{"forcesave", ITEM_PARSE_FORCESERIALIZE},
{"floorchange", ITEM_PARSE_FLOORCHANGE},
{"corpsetype", ITEM_PARSE_CORPSETYPE},
{"containersize", ITEM_PARSE_CONTAINERSIZE},
{"fluidsource", ITEM_PARSE_FLUIDSOURCE},
{"readable", ITEM_PARSE_READABLE},
{"writeable", ITEM_PARSE_WRITEABLE},
{"maxtextlen", ITEM_PARSE_MAXTEXTLEN},
{"writeonceitemid", ITEM_PARSE_WRITEONCEITEMID},
{"weapontype", ITEM_PARSE_WEAPONTYPE},
{"slottype", ITEM_PARSE_SLOTTYPE},
{"ammotype", ITEM_PARSE_AMMOTYPE},
{"shoottype", ITEM_PARSE_SHOOTTYPE},
{"effect", ITEM_PARSE_EFFECT},
{"range", ITEM_PARSE_RANGE},
{"stopduration", ITEM_PARSE_STOPDURATION},
{"decayto", ITEM_PARSE_DECAYTO},
{"transformequipto", ITEM_PARSE_TRANSFORMEQUIPTO},
{"transformdeequipto", ITEM_PARSE_TRANSFORMDEEQUIPTO},
{"duration", ITEM_PARSE_DURATION},
{"showduration", ITEM_PARSE_SHOWDURATION},
{"charges", ITEM_PARSE_CHARGES},
{"showcharges", ITEM_PARSE_SHOWCHARGES},
{"showattributes", ITEM_PARSE_SHOWATTRIBUTES},
{"hitchance", ITEM_PARSE_HITCHANCE},
{"maxhitchance", ITEM_PARSE_MAXHITCHANCE},
{"invisible", ITEM_PARSE_INVISIBLE},
{"speed", ITEM_PARSE_SPEED},
{"healthgain", ITEM_PARSE_HEALTHGAIN},
{"healthticks", ITEM_PARSE_HEALTHTICKS},
{"managain", ITEM_PARSE_MANAGAIN},
{"manaticks", ITEM_PARSE_MANATICKS},
{"manashield", ITEM_PARSE_MANASHIELD},
{"skillsword", ITEM_PARSE_SKILLSWORD},
{"skillaxe", ITEM_PARSE_SKILLAXE},
{"skillclub", ITEM_PARSE_SKILLCLUB},
{"skilldist", ITEM_PARSE_SKILLDIST},
{"skillfish", ITEM_PARSE_SKILLFISH},
{"skillshield", ITEM_PARSE_SKILLSHIELD},
{"skillfist", ITEM_PARSE_SKILLFIST},
{"maxhitpoints", ITEM_PARSE_MAXHITPOINTS},
{"maxhitpointspercent", ITEM_PARSE_MAXHITPOINTSPERCENT},
{"maxmanapoints", ITEM_PARSE_MAXMANAPOINTS},
{"maxmanapointspercent", ITEM_PARSE_MAXMANAPOINTSPERCENT},
{"magicpoints", ITEM_PARSE_MAGICPOINTS},
{"magiclevelpoints", ITEM_PARSE_MAGICPOINTS},
{"magicpointspercent", ITEM_PARSE_MAGICPOINTSPERCENT},
{"criticalhitchance", ITEM_PARSE_CRITICALHITCHANCE},
{"criticalhitamount", ITEM_PARSE_CRITICALHITAMOUNT},
{"lifeleechchance", ITEM_PARSE_LIFELEECHCHANCE},
{"lifeleechamount", ITEM_PARSE_LIFELEECHAMOUNT},
{"manaleechchance", ITEM_PARSE_MANALEECHCHANCE},
{"manaleechamount", ITEM_PARSE_MANALEECHAMOUNT},
{"fieldabsorbpercentenergy", ITEM_PARSE_FIELDABSORBPERCENTENERGY},
{"fieldabsorbpercentfire", ITEM_PARSE_FIELDABSORBPERCENTFIRE},
{"fieldabsorbpercentpoison", ITEM_PARSE_FIELDABSORBPERCENTPOISON},
{"fieldabsorbpercentearth", ITEM_PARSE_FIELDABSORBPERCENTPOISON},
{"absorbpercentall", ITEM_PARSE_ABSORBPERCENTALL},
{"absorbpercentallelements", ITEM_PARSE_ABSORBPERCENTALL},
{"absorbpercentelements", ITEM_PARSE_ABSORBPERCENTELEMENTS},
{"absorbpercentmagic", ITEM_PARSE_ABSORBPERCENTMAGIC},
{"absorbpercentenergy", ITEM_PARSE_ABSORBPERCENTENERGY},
{"absorbpercentfire", ITEM_PARSE_ABSORBPERCENTFIRE},
{"absorbpercentpoison", ITEM_PARSE_ABSORBPERCENTPOISON},
{"absorbpercentearth", ITEM_PARSE_ABSORBPERCENTPOISON},
{"absorbpercentice", ITEM_PARSE_ABSORBPERCENTICE},
{"absorbpercentholy", ITEM_PARSE_ABSORBPERCENTHOLY},
{"absorbpercentdeath", ITEM_PARSE_ABSORBPERCENTDEATH},
{"absorbpercentlifedrain", ITEM_PARSE_ABSORBPERCENTLIFEDRAIN},
{"absorbpercentmanadrain", ITEM_PARSE_ABSORBPERCENTMANADRAIN},
{"absorbpercentdrown", ITEM_PARSE_ABSORBPERCENTDROWN},
{"absorbpercentphysical", ITEM_PARSE_ABSORBPERCENTPHYSICAL},
{"absorbpercenthealing", ITEM_PARSE_ABSORBPERCENTHEALING},
{"absorbpercentundefined", ITEM_PARSE_ABSORBPERCENTUNDEFINED},
{"magiclevelenergy", ITEM_PARSE_MAGICLEVELENERGY},
{"magiclevelfire", ITEM_PARSE_MAGICLEVELFIRE},
{"magiclevelpoison", ITEM_PARSE_MAGICLEVELPOISON},
{"magiclevelearth", ITEM_PARSE_MAGICLEVELPOISON},
{"magiclevelice", ITEM_PARSE_MAGICLEVELICE},
{"magiclevelholy", ITEM_PARSE_MAGICLEVELHOLY},
{"magicleveldeath", ITEM_PARSE_MAGICLEVELDEATH},
{"magiclevellifedrain", ITEM_PARSE_MAGICLEVELLIFEDRAIN},
{"magiclevelmanadrain", ITEM_PARSE_MAGICLEVELMANADRAIN},
{"magicleveldrown", ITEM_PARSE_MAGICLEVELDROWN},
{"magiclevelphysical", ITEM_PARSE_MAGICLEVELPHYSICAL},
{"magiclevelhealing", ITEM_PARSE_MAGICLEVELHEALING},
{"magiclevelundefined", ITEM_PARSE_MAGICLEVELUNDEFINED},
{"suppressdrunk", ITEM_PARSE_SUPPRESSDRUNK},
{"suppressenergy", ITEM_PARSE_SUPPRESSENERGY},
{"suppressfire", ITEM_PARSE_SUPPRESSFIRE},
{"suppresspoison", ITEM_PARSE_SUPPRESSPOISON},
{"suppressdrown", ITEM_PARSE_SUPPRESSDROWN},
{"suppressphysical", ITEM_PARSE_SUPPRESSPHYSICAL},
{"suppressfreeze", ITEM_PARSE_SUPPRESSFREEZE},
{"suppressdazzle", ITEM_PARSE_SUPPRESSDAZZLE},
{"suppresscurse", ITEM_PARSE_SUPPRESSCURSE},
{"field", ITEM_PARSE_FIELD},
{"replaceable", ITEM_PARSE_REPLACEABLE},
{"partnerdirection", ITEM_PARSE_PARTNERDIRECTION},
{"leveldoor", ITEM_PARSE_LEVELDOOR},
{"maletransformto", ITEM_PARSE_MALETRANSFORMTO},
{"malesleeper", ITEM_PARSE_MALETRANSFORMTO},
{"femaletransformto", ITEM_PARSE_FEMALETRANSFORMTO},
{"femalesleeper", ITEM_PARSE_FEMALETRANSFORMTO},
{"transformto", ITEM_PARSE_TRANSFORMTO},
{"destroyto", ITEM_PARSE_DESTROYTO},
{"elementice", ITEM_PARSE_ELEMENTICE},
{"elementearth", ITEM_PARSE_ELEMENTEARTH},
{"elementfire", ITEM_PARSE_ELEMENTFIRE},
{"elementenergy", ITEM_PARSE_ELEMENTENERGY},
{"elementdeath", ITEM_PARSE_ELEMENTDEATH},
{"elementholy", ITEM_PARSE_ELEMENTHOLY},
{"walkstack", ITEM_PARSE_WALKSTACK},
{"blocking", ITEM_PARSE_BLOCKING},
{"allowdistread", ITEM_PARSE_ALLOWDISTREAD},
{"storeitem", ITEM_PARSE_STOREITEM},
{"worth", ITEM_PARSE_WORTH},
};
const std::unordered_map<std::string, ItemTypes_t> ItemTypesMap = {
{"key", ITEM_TYPE_KEY},
{"magicfield", ITEM_TYPE_MAGICFIELD},
{"container", ITEM_TYPE_CONTAINER},
{"depot", ITEM_TYPE_DEPOT},
{"mailbox", ITEM_TYPE_MAILBOX},
{"trashholder", ITEM_TYPE_TRASHHOLDER},
{"teleport", ITEM_TYPE_TELEPORT},
{"door", ITEM_TYPE_DOOR},
{"bed", ITEM_TYPE_BED},
{"rune", ITEM_TYPE_RUNE},
};
const std::unordered_map<std::string, tileflags_t> TileStatesMap = {
{"down", TILESTATE_FLOORCHANGE_DOWN},
{"north", TILESTATE_FLOORCHANGE_NORTH},
{"south", TILESTATE_FLOORCHANGE_SOUTH},
{"southalt", TILESTATE_FLOORCHANGE_SOUTH_ALT},
{"west", TILESTATE_FLOORCHANGE_WEST},
{"east", TILESTATE_FLOORCHANGE_EAST},
{"eastalt", TILESTATE_FLOORCHANGE_EAST_ALT},
};
const std::unordered_map<std::string, RaceType_t> RaceTypesMap = {
{"venom", RACE_VENOM},
{"blood", RACE_BLOOD},
{"undead", RACE_UNDEAD},
{"fire", RACE_FIRE},
{"energy", RACE_ENERGY},
{"ink", RACE_INK},
};
const std::unordered_map<std::string, WeaponType_t> WeaponTypesMap = {
{"sword", WEAPON_SWORD},
{"club", WEAPON_CLUB},
{"axe", WEAPON_AXE},
{"shield", WEAPON_SHIELD},
{"distance", WEAPON_DISTANCE},
{"wand", WEAPON_WAND},
{"ammunition", WEAPON_AMMO},
};
const std::unordered_map<std::string, FluidTypes_t> FluidTypesMap = {
{"water", FLUID_WATER},
{"blood", FLUID_BLOOD},
{"beer", FLUID_BEER},
{"slime", FLUID_SLIME},
{"lemonade", FLUID_LEMONADE},
{"milk", FLUID_MILK},
{"mana", FLUID_MANA},
{"life", FLUID_LIFE},
{"oil", FLUID_OIL},
{"urine", FLUID_URINE},
{"coconut", FLUID_COCONUTMILK},
{"wine", FLUID_WINE},
{"mud", FLUID_MUD},
{"fruitjuice", FLUID_FRUITJUICE},
{"lava", FLUID_LAVA},
{"rum", FLUID_RUM},
{"swamp", FLUID_SWAMP},
{"tea", FLUID_TEA},
{"mead", FLUID_MEAD},
{"ink", FLUID_INK},
};
Items::Items()
{
items.reserve(45000);
nameToItems.reserve(45000);
}
void Items::clear()
{
items.clear();
clientIdToServerIdMap.clear();
nameToItems.clear();
currencyItems.clear();
inventory.clear();
}
bool Items::reload()
{
clear();
loadFromOtb("data/items/items.otb");
if (!loadFromXml()) {
return false;
}
g_moveEvents->reload();
g_weapons->reload();
g_weapons->loadDefaults();
return true;
}
constexpr auto OTBI = OTB::Identifier{{'O','T', 'B', 'I'}};
bool Items::loadFromOtb(const std::string& file)
{
OTB::Loader loader{file, OTBI};
auto& root = loader.parseTree();
PropStream props;
if (loader.getProps(root, props)) {
//4 byte flags
//attributes
//0x01 = version data
uint32_t flags;
if (!props.read<uint32_t>(flags)) {
return false;
}
uint8_t attr;
if (!props.read<uint8_t>(attr)) {
return false;
}
if (attr == ROOT_ATTR_VERSION) {
uint16_t datalen;
if (!props.read<uint16_t>(datalen)) {
return false;
}
if (datalen != sizeof(VERSIONINFO)) {
return false;
}
VERSIONINFO vi;
if (!props.read(vi)) {
return false;
}
majorVersion = vi.dwMajorVersion; //items otb format file version
minorVersion = vi.dwMinorVersion; //client version
buildNumber = vi.dwBuildNumber; //revision
}
}
if (majorVersion == 0xFFFFFFFF) {
std::cout << "[Warning - Items::loadFromOtb] items.otb using generic client version." << std::endl;
} else if (majorVersion != 3) {
std::cout << "Old version detected, a newer version of items.otb is required." << std::endl;
return false;
} else if (minorVersion < CLIENT_VERSION_LAST) {
std::cout << "A newer version of items.otb is required." << std::endl;
return false;
}
for (auto& itemNode : root.children) {
PropStream stream;
if (!loader.getProps(itemNode, stream)) {
return false;
}
uint32_t flags;
if (!stream.read<uint32_t>(flags)) {
return false;
}
uint16_t serverId = 0;
uint16_t clientId = 0;
uint16_t speed = 0;
uint16_t wareId = 0;
uint8_t lightLevel = 0;
uint8_t lightColor = 0;
uint8_t alwaysOnTopOrder = 0;
uint8_t classification = 0;
uint8_t attrib;
while (stream.read<uint8_t>(attrib)) {
uint16_t datalen;
if (!stream.read<uint16_t>(datalen)) {
return false;
}
switch (attrib) {
case ITEM_ATTR_SERVERID: {
if (datalen != sizeof(uint16_t)) {
return false;
}
if (!stream.read<uint16_t>(serverId)) {
return false;
}
break;
}
case ITEM_ATTR_CLIENTID: {
if (datalen != sizeof(uint16_t)) {
return false;
}
if (!stream.read<uint16_t>(clientId)) {
return false;
}
break;
}
case ITEM_ATTR_SPEED: {
if (datalen != sizeof(uint16_t)) {
return false;
}
if (!stream.read<uint16_t>(speed)) {
return false;
}
break;
}
case ITEM_ATTR_LIGHT2: {
if (datalen != sizeof(lightBlock2)) {
return false;
}
lightBlock2 lb2;
if (!stream.read(lb2)) {
return false;
}
lightLevel = static_cast<uint8_t>(lb2.lightLevel);
lightColor = static_cast<uint8_t>(lb2.lightColor);
break;
}
case ITEM_ATTR_TOPORDER: {
if (datalen != sizeof(uint8_t)) {
return false;
}
if (!stream.read<uint8_t>(alwaysOnTopOrder)) {
return false;
}
break;
}
case ITEM_ATTR_WAREID: {
if (datalen != sizeof(uint16_t)) {
return false;
}
if (!stream.read<uint16_t>(wareId)) {
return false;
}
break;
}
case ITEM_ATTR_CLASS: {
if (datalen != sizeof(uint8_t)) {
return false;
}
if (!stream.read<uint8_t>(classification)) {
return false;
}
break;
}
default: {
//skip unknown attributes
if (!stream.skip(datalen)) {
return false;
}
break;
}
}
}
clientIdToServerIdMap.emplace(clientId, serverId);
// store the found item
if (serverId >= items.size()) {
items.resize(serverId + 1);
}
ItemType& iType = items[serverId];
iType.group = static_cast<itemgroup_t>(itemNode.type);
switch (itemNode.type) {
case ITEM_GROUP_CONTAINER:
iType.type = ITEM_TYPE_CONTAINER;
break;
case ITEM_GROUP_DOOR:
//not used
iType.type = ITEM_TYPE_DOOR;
break;
case ITEM_GROUP_MAGICFIELD:
//not used
iType.type = ITEM_TYPE_MAGICFIELD;
break;
case ITEM_GROUP_TELEPORT:
//not used
iType.type = ITEM_TYPE_TELEPORT;
break;
case ITEM_GROUP_NONE:
case ITEM_GROUP_GROUND:
case ITEM_GROUP_SPLASH:
case ITEM_GROUP_FLUID:
case ITEM_GROUP_CHARGES:
case ITEM_GROUP_DEPRECATED:
case ITEM_GROUP_PODIUM:
break;
default:
return false;
}
iType.blockSolid = hasBitSet(FLAG_BLOCK_SOLID, flags);
iType.blockProjectile = hasBitSet(FLAG_BLOCK_PROJECTILE, flags);
iType.blockPathFind = hasBitSet(FLAG_BLOCK_PATHFIND, flags);
iType.hasHeight = hasBitSet(FLAG_HAS_HEIGHT, flags);
iType.useable = hasBitSet(FLAG_USEABLE, flags);
iType.pickupable = hasBitSet(FLAG_PICKUPABLE, flags);
iType.moveable = hasBitSet(FLAG_MOVEABLE, flags);
iType.stackable = hasBitSet(FLAG_STACKABLE, flags);
iType.alwaysOnTop = hasBitSet(FLAG_ALWAYSONTOP, flags);
iType.isVertical = hasBitSet(FLAG_VERTICAL, flags);
iType.isHorizontal = hasBitSet(FLAG_HORIZONTAL, flags);
iType.isHangable = hasBitSet(FLAG_HANGABLE, flags);
iType.allowDistRead = hasBitSet(FLAG_ALLOWDISTREAD, flags);
iType.rotatable = hasBitSet(FLAG_ROTATABLE, flags);
iType.canReadText = hasBitSet(FLAG_READABLE, flags);
iType.lookThrough = hasBitSet(FLAG_LOOKTHROUGH, flags);
iType.isAnimation = hasBitSet(FLAG_ANIMATION, flags);
// iType.walkStack = !hasBitSet(FLAG_FULLTILE, flags);
iType.forceUse = hasBitSet(FLAG_FORCEUSE, flags);
iType.id = serverId;
iType.clientId = clientId;
iType.speed = speed;
iType.lightLevel = lightLevel;
iType.lightColor = lightColor;
iType.wareId = wareId;
iType.classification = classification;
iType.alwaysOnTopOrder = alwaysOnTopOrder;
}
items.shrink_to_fit();
return true;
}
bool Items::loadFromXml()
{
pugi::xml_document doc;
pugi::xml_parse_result result = doc.load_file("data/items/items.xml");
if (!result) {
printXMLError("Error - Items::loadFromXml", "data/items/items.xml", result);
return false;
}
for (auto itemNode : doc.child("items").children()) {
pugi::xml_attribute idAttribute = itemNode.attribute("id");
if (idAttribute) {
parseItemNode(itemNode, pugi::cast<uint16_t>(idAttribute.value()));
continue;
}
pugi::xml_attribute fromIdAttribute = itemNode.attribute("fromid");
if (!fromIdAttribute) {
std::cout << "[Warning - Items::loadFromXml] No item id found" << std::endl;
continue;
}
pugi::xml_attribute toIdAttribute = itemNode.attribute("toid");
if (!toIdAttribute) {
std::cout << "[Warning - Items::loadFromXml] fromid (" << fromIdAttribute.value() << ") without toid" << std::endl;
continue;
}
uint16_t id = pugi::cast<uint16_t>(fromIdAttribute.value());
uint16_t toId = pugi::cast<uint16_t>(toIdAttribute.value());
while (id <= toId) {
parseItemNode(itemNode, id++);
}
}
return true;
}
void Items::parseItemNode(const pugi::xml_node& itemNode, uint16_t id)
{
if (id > 0 && id < 100) {
ItemType& iType = items[id];
iType.id = id;
}
ItemType& it = getItemType(id);
if (it.id == 0) {
return;
}
if (!it.name.empty()) {
std::cout << "[Warning - Items::parseItemNode] Duplicate item with id: " << id << std::endl;
return;
}
it.name = itemNode.attribute("name").as_string();
if (!it.name.empty()) {
std::string lowerCaseName = asLowerCaseString(it.name);
if (nameToItems.find(lowerCaseName) == nameToItems.end()) {
nameToItems.emplace(std::move(lowerCaseName), id);
}
}
pugi::xml_attribute articleAttribute = itemNode.attribute("article");
if (articleAttribute) {
it.article = articleAttribute.as_string();
}
pugi::xml_attribute pluralAttribute = itemNode.attribute("plural");
if (pluralAttribute) {
it.pluralName = pluralAttribute.as_string();
}
Abilities& abilities = it.getAbilities();
for (auto attributeNode : itemNode.children()) {
pugi::xml_attribute keyAttribute = attributeNode.attribute("key");
if (!keyAttribute) {
continue;
}
pugi::xml_attribute valueAttribute = attributeNode.attribute("value");
if (!valueAttribute) {
continue;
}
std::string tmpStrValue = asLowerCaseString(keyAttribute.as_string());
auto parseAttribute = ItemParseAttributesMap.find(tmpStrValue);
if (parseAttribute != ItemParseAttributesMap.end()) {
ItemParseAttributes_t parseType = parseAttribute->second;
switch (parseType) {
case ITEM_PARSE_TYPE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = ItemTypesMap.find(tmpStrValue);
if (it2 != ItemTypesMap.end()) {
it.type = it2->second;
if (it.type == ITEM_TYPE_CONTAINER) {
it.group = ITEM_GROUP_CONTAINER;
}
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown type: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_DESCRIPTION: {
it.description = valueAttribute.as_string();
break;
}
case ITEM_PARSE_RUNESPELLNAME: {
it.runeSpellName = valueAttribute.as_string();
break;
}
case ITEM_PARSE_WEIGHT: {
it.weight = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SHOWCOUNT: {
it.showCount = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_ARMOR: {
it.armor = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_DEFENSE: {
it.defense = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_EXTRADEF: {
it.extraDefense = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ATTACK: {
it.attack = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ATTACK_SPEED: {
it.attackSpeed = pugi::cast<uint32_t>(valueAttribute.value());
if (it.attackSpeed > 0 && it.attackSpeed < 100) {
std::cout << "[Warning - Items::parseItemNode] AttackSpeed lower than 100 for item: " << it.id << std::endl;
it.attackSpeed = 100;
}
break;
}
case ITEM_PARSE_ROTATETO: {
it.rotateTo = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MOVEABLE: {
it.moveable = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_BLOCKPROJECTILE: {
it.blockProjectile = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_PICKUPABLE: {
it.allowPickupable = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_FORCESERIALIZE: {
it.forceSerialize = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_FLOORCHANGE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = TileStatesMap.find(tmpStrValue);
if (it2 != TileStatesMap.end()) {
it.floorChange |= it2->second;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown floorChange: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_CORPSETYPE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = RaceTypesMap.find(tmpStrValue);
if (it2 != RaceTypesMap.end()) {
it.corpseType = it2->second;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown corpseType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_CONTAINERSIZE: {
it.maxItems = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_FLUIDSOURCE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = FluidTypesMap.find(tmpStrValue);
if (it2 != FluidTypesMap.end()) {
it.fluidSource = it2->second;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown fluidSource: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_READABLE: {
it.canReadText = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_WRITEABLE: {
it.canWriteText = valueAttribute.as_bool();
it.canReadText = it.canWriteText;
break;
}
case ITEM_PARSE_MAXTEXTLEN: {
it.maxTextLen = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_WRITEONCEITEMID: {
it.writeOnceItemId = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_WEAPONTYPE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = WeaponTypesMap.find(tmpStrValue);
if (it2 != WeaponTypesMap.end()) {
it.weaponType = it2->second;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown weaponType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_SLOTTYPE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
if (tmpStrValue == "head") {
it.slotPosition |= SLOTP_HEAD;
} else if (tmpStrValue == "body") {
it.slotPosition |= SLOTP_ARMOR;
} else if (tmpStrValue == "legs") {
it.slotPosition |= SLOTP_LEGS;
} else if (tmpStrValue == "feet") {
it.slotPosition |= SLOTP_FEET;
} else if (tmpStrValue == "backpack") {
it.slotPosition |= SLOTP_BACKPACK;
} else if (tmpStrValue == "two-handed") {
it.slotPosition |= SLOTP_TWO_HAND;
} else if (tmpStrValue == "right-hand") {
it.slotPosition &= ~SLOTP_LEFT;
} else if (tmpStrValue == "left-hand") {
it.slotPosition &= ~SLOTP_RIGHT;
} else if (tmpStrValue == "necklace") {
it.slotPosition |= SLOTP_NECKLACE;
} else if (tmpStrValue == "ring") {
it.slotPosition |= SLOTP_RING;
} else if (tmpStrValue == "ammo") {
it.slotPosition |= SLOTP_AMMO;
} else if (tmpStrValue == "hand") {
it.slotPosition |= SLOTP_HAND;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown slotType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_AMMOTYPE: {
it.ammoType = getAmmoType(asLowerCaseString(valueAttribute.as_string()));
if (it.ammoType == AMMO_NONE) {
std::cout << "[Warning - Items::parseItemNode] Unknown ammoType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_SHOOTTYPE: {
ShootType_t shoot = getShootType(asLowerCaseString(valueAttribute.as_string()));
if (shoot != CONST_ANI_NONE) {
it.shootType = shoot;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown shootType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_EFFECT: {
MagicEffectClasses effect = getMagicEffect(asLowerCaseString(valueAttribute.as_string()));
if (effect != CONST_ME_NONE) {
it.magicEffect = effect;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown effect: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_RANGE: {
it.shootRange = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_STOPDURATION: {
it.stopTime = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_DECAYTO: {
it.decayTo = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_TRANSFORMEQUIPTO: {
it.transformEquipTo = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_TRANSFORMDEEQUIPTO: {
it.transformDeEquipTo = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_DURATION: {
it.decayTime = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SHOWDURATION: {
it.showDuration = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_CHARGES: {
it.charges = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SHOWCHARGES: {
it.showCharges = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_SHOWATTRIBUTES: {
it.showAttributes = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_HITCHANCE: {
it.hitChance = std::min<int8_t>(100, std::max<int8_t>(-100, pugi::cast<int16_t>(valueAttribute.value())));
break;
}
case ITEM_PARSE_MAXHITCHANCE: {
it.maxHitChance = std::min<uint32_t>(100, pugi::cast<uint32_t>(valueAttribute.value()));
break;
}
case ITEM_PARSE_INVISIBLE: {
abilities.invisible = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_SPEED: {
abilities.speed = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_HEALTHGAIN: {
abilities.regeneration = true;
abilities.healthGain = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_HEALTHTICKS: {
abilities.regeneration = true;
abilities.healthTicks = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANAGAIN: {
abilities.regeneration = true;
abilities.manaGain = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANATICKS: {
abilities.regeneration = true;
abilities.manaTicks = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANASHIELD: {
abilities.manaShield = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_SKILLSWORD: {
abilities.skills[SKILL_SWORD] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLAXE: {
abilities.skills[SKILL_AXE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLCLUB: {
abilities.skills[SKILL_CLUB] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLDIST: {
abilities.skills[SKILL_DISTANCE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLFISH: {
abilities.skills[SKILL_FISHING] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLSHIELD: {
abilities.skills[SKILL_SHIELD] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLFIST: {
abilities.skills[SKILL_FIST] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_CRITICALHITAMOUNT: {
abilities.specialSkills[SPECIALSKILL_CRITICALHITAMOUNT] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_CRITICALHITCHANCE: {
abilities.specialSkills[SPECIALSKILL_CRITICALHITCHANCE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANALEECHAMOUNT: {
abilities.specialSkills[SPECIALSKILL_MANALEECHAMOUNT] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANALEECHCHANCE: {
abilities.specialSkills[SPECIALSKILL_MANALEECHCHANCE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_LIFELEECHAMOUNT: {
abilities.specialSkills[SPECIALSKILL_LIFELEECHAMOUNT] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_LIFELEECHCHANCE: {
abilities.specialSkills[SPECIALSKILL_LIFELEECHCHANCE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAXHITPOINTS: {
abilities.stats[STAT_MAXHITPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAXHITPOINTSPERCENT: {
abilities.statsPercent[STAT_MAXHITPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAXMANAPOINTS: {
abilities.stats[STAT_MAXMANAPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAXMANAPOINTSPERCENT: {
abilities.statsPercent[STAT_MAXMANAPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICPOINTS: {
abilities.stats[STAT_MAGICPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICPOINTSPERCENT: {
abilities.statsPercent[STAT_MAGICPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_FIELDABSORBPERCENTENERGY: {
abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_FIELDABSORBPERCENTFIRE: {
abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_FIELDABSORBPERCENTPOISON: {
abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTALL: {
int16_t value = pugi::cast<int16_t>(valueAttribute.value());
for (auto& i : abilities.absorbPercent) {
i += value;
}
break;
}
case ITEM_PARSE_ABSORBPERCENTELEMENTS: {
int16_t value = pugi::cast<int16_t>(valueAttribute.value());
abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += value;
break;
}
case ITEM_PARSE_ABSORBPERCENTMAGIC: {
int16_t value = pugi::cast<int16_t>(valueAttribute.value());
abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_HOLYDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_DEATHDAMAGE)] += value;
break;
}
case ITEM_PARSE_ABSORBPERCENTENERGY: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTFIRE: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTPOISON: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTICE: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTHOLY: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_HOLYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTDEATH: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_DEATHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTLIFEDRAIN: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_LIFEDRAIN)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTMANADRAIN: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_MANADRAIN)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTDROWN: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_DROWNDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTPHYSICAL: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_PHYSICALDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTHEALING: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_HEALING)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTUNDEFINED: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_UNDEFINEDDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELENERGY: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELFIRE: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_FIREDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELPOISON: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELICE: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_ICEDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELHOLY: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_HOLYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELDEATH: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_DEATHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELLIFEDRAIN: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_LIFEDRAIN)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELMANADRAIN: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_MANADRAIN)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELDROWN: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_DROWNDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELPHYSICAL: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_PHYSICALDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELHEALING: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_HEALING)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICLEVELUNDEFINED: {
abilities.specialMagicLevelSkill[combatTypeToIndex(COMBAT_UNDEFINEDDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SUPPRESSDRUNK: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_DRUNK;
}
break;
}
case ITEM_PARSE_SUPPRESSENERGY: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_ENERGY;
}
break;
}
case ITEM_PARSE_SUPPRESSFIRE: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_FIRE;
}
break;
}
case ITEM_PARSE_SUPPRESSPOISON: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_POISON;
}
break;
}
case ITEM_PARSE_SUPPRESSDROWN: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_DROWN;
}
break;
}
case ITEM_PARSE_SUPPRESSPHYSICAL: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_BLEEDING;
}
break;
}
case ITEM_PARSE_SUPPRESSFREEZE: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_FREEZING;
}
break;
}
case ITEM_PARSE_SUPPRESSDAZZLE: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_DAZZLED;
}
break;
}
case ITEM_PARSE_SUPPRESSCURSE: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_CURSED;
}
break;
}
case ITEM_PARSE_FIELD: {
it.group = ITEM_GROUP_MAGICFIELD;
it.type = ITEM_TYPE_MAGICFIELD;
CombatType_t combatType = COMBAT_NONE;
ConditionDamage* conditionDamage = nullptr;
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
if (tmpStrValue == "fire") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_FIRE);
combatType = COMBAT_FIREDAMAGE;
} else if (tmpStrValue == "energy") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_ENERGY);
combatType = COMBAT_ENERGYDAMAGE;
} else if (tmpStrValue == "poison") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_POISON);
combatType = COMBAT_EARTHDAMAGE;
} else if (tmpStrValue == "drown") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_DROWN);
combatType = COMBAT_DROWNDAMAGE;
} else if (tmpStrValue == "physical") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_BLEEDING);
combatType = COMBAT_PHYSICALDAMAGE;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown field value: " << valueAttribute.as_string() << std::endl;
}
if (combatType != COMBAT_NONE) {
it.combatType = combatType;
it.conditionDamage.reset(conditionDamage);
uint32_t ticks = 0;
int32_t start = 0;
int32_t count = 1;
int32_t initDamage = -1;
int32_t damage = 0;
for (auto subAttributeNode : attributeNode.children()) {
pugi::xml_attribute subKeyAttribute = subAttributeNode.attribute("key");
if (!subKeyAttribute) {
continue;
}
pugi::xml_attribute subValueAttribute = subAttributeNode.attribute("value");
if (!subValueAttribute) {
continue;
}
tmpStrValue = asLowerCaseString(subKeyAttribute.as_string());
if (tmpStrValue == "initdamage") {
initDamage = pugi::cast<int32_t>(subValueAttribute.value());
} else if (tmpStrValue == "ticks") {
ticks = pugi::cast<uint32_t>(subValueAttribute.value());
} else if (tmpStrValue == "count") {
count = std::max<int32_t>(1, pugi::cast<int32_t>(subValueAttribute.value()));
} else if (tmpStrValue == "start") {
start = std::max<int32_t>(0, pugi::cast<int32_t>(subValueAttribute.value()));
} else if (tmpStrValue == "damage") {
damage = -pugi::cast<int32_t>(subValueAttribute.value());
if (start > 0) {
std::list<int32_t> damageList;
ConditionDamage::generateDamageList(damage, start, damageList);
for (int32_t damageValue : damageList) {
conditionDamage->addDamage(1, ticks, -damageValue);
}
start = 0;
} else {
conditionDamage->addDamage(count, ticks, damage);
}
}
}
// datapack compatibility, presume damage to be initialdamage if initialdamage is not declared.
// initDamage = 0 (don't override initDamage with damage, don't set any initDamage)
// initDamage = -1 (undefined, override initDamage with damage)
if (initDamage > 0 || initDamage < -1) {
conditionDamage->setInitDamage(-initDamage);
} else if (initDamage == -1 && start != 0) {
conditionDamage->setInitDamage(start);
}
conditionDamage->setParam(CONDITION_PARAM_FIELD, 1);
if (conditionDamage->getTotalDamage() > 0) {
conditionDamage->setParam(CONDITION_PARAM_FORCEUPDATE, 1);
}
}
break;
}
case ITEM_PARSE_REPLACEABLE: {
it.replaceable = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_PARTNERDIRECTION: {
it.bedPartnerDir = getDirection(valueAttribute.as_string());
break;
}
case ITEM_PARSE_LEVELDOOR: {
it.levelDoor = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MALETRANSFORMTO: {
uint16_t value = pugi::cast<uint16_t>(valueAttribute.value());
it.transformToOnUse[PLAYERSEX_MALE] = value;
ItemType& other = getItemType(value);
if (other.transformToFree == 0) {
other.transformToFree = it.id;
}
if (it.transformToOnUse[PLAYERSEX_FEMALE] == 0) {
it.transformToOnUse[PLAYERSEX_FEMALE] = value;
}
break;
}
case ITEM_PARSE_FEMALETRANSFORMTO: {
uint16_t value = pugi::cast<uint16_t>(valueAttribute.value());
it.transformToOnUse[PLAYERSEX_FEMALE] = value;
ItemType& other = getItemType(value);
if (other.transformToFree == 0) {
other.transformToFree = it.id;
}
if (it.transformToOnUse[PLAYERSEX_MALE] == 0) {
it.transformToOnUse[PLAYERSEX_MALE] = value;
}
break;
}
case ITEM_PARSE_TRANSFORMTO: {
it.transformToFree = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_DESTROYTO: {
it.destroyTo = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ELEMENTICE: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_ICEDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTEARTH: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_EARTHDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTFIRE: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_FIREDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTENERGY: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_ENERGYDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTDEATH: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_DEATHDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTHOLY: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_HOLYDAMAGE;
break;
}
case ITEM_PARSE_WALKSTACK: {
it.walkStack = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_BLOCKING: {
it.blockSolid = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_ALLOWDISTREAD: {
it.allowDistRead = booleanString(valueAttribute.as_string());
break;
}
case ITEM_PARSE_STOREITEM: {
it.storeItem = booleanString(valueAttribute.as_string());
break;
}
case ITEM_PARSE_WORTH: {
uint64_t worth = pugi::cast<uint64_t>(valueAttribute.value());
if (currencyItems.find(worth) != currencyItems.end()) {
std::cout << "[Warning - Items::parseItemNode] Duplicated currency worth. Item " << id << " redefines worth " << worth << std::endl;
} else {
currencyItems.insert(CurrencyMap::value_type(worth, id));
it.worth = worth;
}
break;
}
default: {
// It should not ever get to here, only if you add a new key to the map and don't configure a case for it.
std::cout << "[Warning - Items::parseItemNode] Not configured key value: " << keyAttribute.as_string() << std::endl;
break;
}
}
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown key value: " << keyAttribute.as_string() << std::endl;
}
}
//check bed items
if ((it.transformToFree != 0 || it.transformToOnUse[PLAYERSEX_FEMALE] != 0 || it.transformToOnUse[PLAYERSEX_MALE] != 0) && it.type != ITEM_TYPE_BED) {
std::cout << "[Warning - Items::parseItemNode] Item " << it.id << " is not set as a bed-type" << std::endl;
}
}
ItemType& Items::getItemType(size_t id)
{
if (id < items.size()) {
return items[id];
}
return items.front();
}
const ItemType& Items::getItemType(size_t id) const
{
if (id < items.size()) {
return items[id];
}
return items.front();
}
const ItemType& Items::getItemIdByClientId(uint16_t spriteId) const
{
if (spriteId >= 100) {
if (uint16_t serverId = clientIdToServerIdMap.getServerId(spriteId)) {
return getItemType(serverId);
}
}
return items.front();
}
uint16_t Items::getItemIdByName(const std::string& name)
{
if (name.empty()) {
return 0;
}
auto result = nameToItems.find(asLowerCaseString(name));
if (result == nameToItems.end())
return 0;
return result->second;
}
| 1 | 19,876 | variable names suggestions from me: - damageboost, ITEM_PARSE_DAMAGEBOOST, "damage boost +x%" - healingboost, ITEM_PARSE_HEALINGBOOST, "healing power +y%" - managainboost, ITEM_PARSE_MANAGAINBOOST, "mana restoration +z%" alternatively the other names can stay, because "increase" convention isn't that bad, just change mana values to: - "increasemanarestored", ITEM_PARSE_INCREASEMANARESTORED, "mana restoration +z%" | otland-forgottenserver | cpp |
@@ -24,6 +24,7 @@ CREATE_TABLE = """
`complete_time` datetime DEFAULT NULL,
`status` enum('SUCCESS','RUNNING','FAILURE',
'PARTIAL_SUCCESS','TIMEOUT') DEFAULT NULL,
+ `has_all_data` bool DEFAULT NULL,
`schema_version` varchar(255) DEFAULT NULL,
`cycle_timestamp` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`), | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""SQL queries for Snapshot Cycles tables."""
RESOURCE_NAME = 'snapshot_cycles'
CREATE_TABLE = """
CREATE TABLE `snapshot_cycles` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`start_time` datetime DEFAULT NULL,
`complete_time` datetime DEFAULT NULL,
`status` enum('SUCCESS','RUNNING','FAILURE',
'PARTIAL_SUCCESS','TIMEOUT') DEFAULT NULL,
`schema_version` varchar(255) DEFAULT NULL,
`cycle_timestamp` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `cycle_timestamp_UNIQUE` (`cycle_timestamp`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
SELECT_SNAPSHOT_CYCLES_TABLE = """
SELECT TABLE_NAME from information_schema.tables
WHERE TABLE_NAME = 'snapshot_cycles'
AND TABLE_SCHEMA in (
SELECT DATABASE()
);"""
INSERT_CYCLE = """
INSERT INTO snapshot_cycles
(cycle_timestamp, start_time, status, schema_version)
VALUES (%s, %s, %s, %s);
"""
UPDATE_CYCLE = """
UPDATE snapshot_cycles
SET status=%s, complete_time=%s
WHERE cycle_timestamp=%s;
"""
| 1 | 27,360 | Why can't this be determined by 'PARTIAL_SUCCESS' in the `status` field? | forseti-security-forseti-security | py |
@@ -35,11 +35,15 @@ final class IdGenerator implements GeneratorInterface
public function generatePath(MediaInterface $media): string
{
- $mediaId = (int) $media->getId();
+ $id = $media->getId();
- $rep_first_level = (int) ($mediaId / $this->firstLevel);
- $rep_second_level = (int) (($mediaId - ($rep_first_level * $this->firstLevel)) / $this->secondLevel);
+ if (!is_numeric($id)) {
+ return '';
+ }
- return sprintf('%s/%04s/%02s', $media->getContext(), $rep_first_level + 1, $rep_second_level + 1);
+ $repFirstLevel = (int) ($id / $this->firstLevel);
+ $repSecondLevel = (int) (($id - ($repFirstLevel * $this->firstLevel)) / $this->secondLevel);
+
+ return sprintf('%s/%04s/%02s', $media->getContext() ?? '', $repFirstLevel + 1, $repSecondLevel + 1);
}
} | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Generator;
use Sonata\MediaBundle\Model\MediaInterface;
final class IdGenerator implements GeneratorInterface
{
/**
* @var int
*/
private $firstLevel;
/**
* @var int
*/
private $secondLevel;
public function __construct(int $firstLevel = 100000, int $secondLevel = 1000)
{
$this->firstLevel = $firstLevel;
$this->secondLevel = $secondLevel;
}
public function generatePath(MediaInterface $media): string
{
$mediaId = (int) $media->getId();
$rep_first_level = (int) ($mediaId / $this->firstLevel);
$rep_second_level = (int) (($mediaId - ($rep_first_level * $this->firstLevel)) / $this->secondLevel);
return sprintf('%s/%04s/%02s', $media->getContext(), $rep_first_level + 1, $rep_second_level + 1);
}
}
| 1 | 12,401 | Does it make sense to generate a Exception here? Maybe yes, because it wouldn't make sense to generate a path without the id of the media right? | sonata-project-SonataMediaBundle | php |
@@ -48,8 +48,16 @@ import javax.net.ssl.SSLSocketFactory;
*/
public class SalesforceTLSSocketFactory extends SSLSocketFactory {
+ private static SalesforceTLSSocketFactory INSTANCE;
private SSLSocketFactory ssLSocketFactory;
+ public static SalesforceTLSSocketFactory getInstance() throws KeyManagementException, NoSuchAlgorithmException {
+ if (INSTANCE == null) {
+ INSTANCE = new SalesforceTLSSocketFactory();
+ }
+ return INSTANCE;
+ }
+
public SalesforceTLSSocketFactory() throws KeyManagementException, NoSuchAlgorithmException {
final SSLContext context = SSLContext.getInstance("TLS");
context.init(null, null, null); | 1 | /*
* Copyright (c) 2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.auth;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
/**
* Custom SSLSocketFactory implementation to disable the default usage of TLS 1.0, and use
* only TLS 1.1 or 1.2, as per Salesforce server's new requirements. This implementation is
* only required for Android 4.4 (KitKat), since TLS 1.1 or 1.2 are the default in Android
* Lollipop (5.0) and above.
*
* FIXME: Remove this class once minApi >= Lollipop.
*
* @author bhariharan
*/
public class SalesforceTLSSocketFactory extends SSLSocketFactory {
private SSLSocketFactory ssLSocketFactory;
public SalesforceTLSSocketFactory() throws KeyManagementException, NoSuchAlgorithmException {
final SSLContext context = SSLContext.getInstance("TLS");
context.init(null, null, null);
ssLSocketFactory = context.getSocketFactory();
}
@Override
public String[] getDefaultCipherSuites() {
return ssLSocketFactory.getDefaultCipherSuites();
}
@Override
public String[] getSupportedCipherSuites() {
return ssLSocketFactory.getSupportedCipherSuites();
}
@Override
public Socket createSocket(Socket s, String host, int port, boolean autoClose) throws IOException {
return disableTLS1Dot0(ssLSocketFactory.createSocket(s, host, port, autoClose));
}
@Override
public Socket createSocket(String host, int port) throws IOException {
return disableTLS1Dot0(ssLSocketFactory.createSocket(host, port));
}
@Override
public Socket createSocket(String host, int port, InetAddress localHost, int localPort) throws IOException {
return disableTLS1Dot0(ssLSocketFactory.createSocket(host, port, localHost, localPort));
}
@Override
public Socket createSocket(InetAddress host, int port) throws IOException {
return disableTLS1Dot0(ssLSocketFactory.createSocket(host, port));
}
@Override
public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) throws IOException {
return disableTLS1Dot0(ssLSocketFactory.createSocket(address, port, localAddress, localPort));
}
private Socket disableTLS1Dot0(Socket socket) {
if (socket != null && (socket instanceof SSLSocket)) {
((SSLSocket)socket).setEnabledProtocols(new String[] {
"TLSv1.1",
"TLSv1.2"
});
}
return socket;
}
}
| 1 | 14,946 | This should be made private. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -267,6 +267,7 @@ func (a *Agent) bootstrap() error {
BaseSVID: a.BaseSVID,
BaseSVIDKey: a.baseSVIDKey,
BaseRegEntries: regEntries,
+ BaseSVIDPath: a.getBaseSVIDPath(),
Logger: a.config.Log,
}
| 1 | package agent
import (
"context"
"crypto/ecdsa"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"syscall"
"time"
"github.com/sirupsen/logrus"
"github.com/spiffe/go-spiffe/uri"
"github.com/spiffe/spire/pkg/agent/auth"
"github.com/spiffe/spire/pkg/agent/cache"
"github.com/spiffe/spire/pkg/agent/catalog"
common_catalog "github.com/spiffe/spire/pkg/common/catalog"
"github.com/spiffe/spire/proto/agent/keymanager"
"github.com/spiffe/spire/proto/agent/nodeattestor"
"github.com/spiffe/spire/proto/api/node"
"github.com/spiffe/spire/proto/api/workload"
"github.com/spiffe/spire/proto/common"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
spiffe_tls "github.com/spiffe/go-spiffe/tls"
)
type Config struct {
// Address to bind the workload api to
BindAddress *net.UnixAddr
// Distinguished Name to use for all CSRs
CertDN *pkix.Name
// Directory to store runtime data
DataDir string
// Directory for plugin configs
PluginDir string
Log logrus.FieldLogger
// Address of SPIRE server
ServerAddress *net.TCPAddr
// A channel for receiving errors from agent goroutines
ErrorCh chan error
// Trust domain and associated CA bundle
TrustDomain url.URL
TrustBundle *x509.CertPool
// Join token to use for attestation, if needed
JoinToken string
// Umask value to use
Umask int
}
type Agent struct {
BaseSVID []byte
baseSVIDKey *ecdsa.PrivateKey
BaseSVIDTTL int32
config *Config
grpcServer *grpc.Server
CacheMgr cache.Manager
Catalog catalog.Catalog
serverCerts []*x509.Certificate
ctx context.Context
cancel context.CancelFunc
}
func New(ctx context.Context, c *Config) *Agent {
config := &catalog.Config{
ConfigDir: c.PluginDir,
Log: c.Log.WithField("subsystem_name", "catalog"),
}
ctx, cancel := context.WithCancel(ctx)
return &Agent{
config: c,
Catalog: catalog.New(config),
ctx: ctx,
cancel: cancel,
}
}
// Run the agent
// This method initializes the agent, including its plugins,
// and then blocks on the main event loop.
func (a *Agent) Run() error {
a.prepareUmask()
err := a.initPlugins()
if err != nil {
return err
}
err = a.bootstrap()
if err != nil {
return err
}
err = a.initEndpoints()
if err != nil {
return err
}
// Main event loop
a.config.Log.Info("SPIRE Agent is now running")
for {
select {
case err = <-a.config.ErrorCh:
e := a.Shutdown()
if e != nil {
a.config.Log.Debug(e)
}
return err
case <-a.ctx.Done():
return a.Shutdown()
}
}
}
func (a *Agent) prepareUmask() {
a.config.Log.Debug("Setting umask to ", a.config.Umask)
syscall.Umask(a.config.Umask)
}
func (a *Agent) Shutdown() error {
defer a.cancel()
if a.Catalog != nil {
a.Catalog.Stop()
}
a.grpcServer.GracefulStop()
// Drain error channel, last one wins
var err error
Drain:
for {
select {
case e := <-a.config.ErrorCh:
err = e
default:
break Drain
}
}
return err
}
func (a *Agent) initPlugins() error {
err := a.Catalog.Run()
if err != nil {
return err
}
return nil
}
func (a *Agent) initEndpoints() error {
a.config.Log.Info("Starting the workload API")
maxWorkloadTTL := time.Duration(a.BaseSVIDTTL/2) * time.Second
log := a.config.Log.WithField("subsystem_name", "workload")
ws := &workloadServer{
bundle: a.serverCerts[1].Raw, // TODO: Fix handling of serverCerts
cache: a.CacheMgr.Cache(),
catalog: a.Catalog,
l: log,
maxTTL: maxWorkloadTTL,
}
// Create a gRPC server with our custom "credential" resolver
a.grpcServer = grpc.NewServer(grpc.Creds(auth.NewCredentials()))
workload.RegisterWorkloadServer(a.grpcServer, ws)
addr := a.config.BindAddress
if addr.Network() == "unix" {
_ = os.Remove(addr.String())
}
listener, err := net.Listen(addr.Network(), addr.String())
if err != nil {
return fmt.Errorf("Error creating GRPC listener: %s", err)
}
if addr.Network() == "unix" {
// Any process should be able to use this unix socket
os.Chmod(addr.String(), os.ModePerm)
}
go func() {
a.config.ErrorCh <- a.grpcServer.Serve(listener)
}()
return nil
}
func (a *Agent) bootstrap() error {
a.config.Log.Info("Bootstrapping SPIRE agent")
plugins := a.Catalog.KeyManagers()
if len(plugins) != 1 {
return fmt.Errorf("Expected only one key manager plugin, found %i", len(plugins))
}
keyManager := plugins[0]
// Fetch or generate private key
res, err := keyManager.FetchPrivateKey(&keymanager.FetchPrivateKeyRequest{})
if err != nil {
return err
}
if len(res.PrivateKey) > 0 {
key, err := x509.ParseECPrivateKey(res.PrivateKey)
if err != nil {
return err
}
err = a.loadBaseSVID()
if err != nil {
return err
}
a.baseSVIDKey = key
} else {
if a.BaseSVID != nil {
a.config.Log.Info("Certificate configured but no private key found!")
}
a.config.Log.Info("Generating private key for new base SVID")
res, err := keyManager.GenerateKeyPair(&keymanager.GenerateKeyPairRequest{})
if err != nil {
return fmt.Errorf("Failed to generate private key: %s", err)
}
key, err := x509.ParseECPrivateKey(res.PrivateKey)
if err != nil {
return err
}
a.baseSVIDKey = key
// If we're here, we need to attest/Re-attest
regEntries, err := a.attest()
if err != nil {
return err
}
serverId := url.URL{
Scheme: "spiffe",
Host: a.config.TrustDomain.Host,
Path: path.Join("spiffe", "cp"),
}
cmgrConfig := &cache.MgrConfig{
ServerCerts: a.serverCerts,
ServerSPIFFEID: serverId.String(),
ServerAddr: a.config.ServerAddress.String(),
BaseSVID: a.BaseSVID,
BaseSVIDKey: a.baseSVIDKey,
BaseRegEntries: regEntries,
Logger: a.config.Log,
}
a.CacheMgr, err = cache.NewManager(a.ctx, cmgrConfig)
a.CacheMgr.Init()
go func() {
<-a.CacheMgr.Done()
a.config.Log.Info("Cache Update Stopped")
if a.CacheMgr.Err() != nil {
a.config.Log.Warning(a.CacheMgr.Err())
}
}()
}
a.config.Log.Info("Bootstrapping done")
return nil
}
// Attest the agent, obtain a new Base SVID. Returns a spiffeid->registration entries map
// which is used to generate CSRs for non-base SVIDs and update the agent cache entries
//
// TODO: Refactor me for length, testability
func (a *Agent) attest() ([]*common.RegistrationEntry, error) {
var err error
a.config.Log.Info("Preparing to attest against ", a.config.ServerAddress.String())
// Handle the join token seperately, if defined
pluginResponse := &nodeattestor.FetchAttestationDataResponse{}
if a.config.JoinToken != "" {
a.config.Log.Info("Preparing to attest this node against ",
a.config.ServerAddress.String(), " using strategy 'join-token'")
data := &common.AttestedData{
Type: "join_token",
Data: []byte(a.config.JoinToken),
}
id := &url.URL{
Scheme: "spiffe",
Host: a.config.TrustDomain.Host,
Path: path.Join("spire", "agent", "join_token", a.config.JoinToken),
}
pluginResponse.AttestedData = data
pluginResponse.SpiffeId = id.String()
} else {
plugins := a.Catalog.NodeAttestors()
if len(plugins) != 1 {
return nil, fmt.Errorf("Expected only one node attestor plugin, found %i", len(plugins))
}
attestor := plugins[0]
attestorInfo := a.Catalog.Find(attestor.(common_catalog.Plugin))
a.config.Log.Info("Preparing to attest this node against ", a.config.ServerAddress.String(),
" using strategy '", attestorInfo.Config.PluginName, "'")
pluginResponse, err = attestor.FetchAttestationData(&nodeattestor.FetchAttestationDataRequest{})
if err != nil {
return nil, fmt.Errorf("Failed to get attestation data from plugin: %s", err)
}
}
// Parse the SPIFFE ID, form a CSR with it
id, err := url.Parse(pluginResponse.SpiffeId)
if err != nil {
return nil, fmt.Errorf("Failed to form SPIFFE ID: %s", err)
}
csr, err := a.generateCSR(id, a.baseSVIDKey)
if err != nil {
return nil, fmt.Errorf("Failed to generate CSR for attestation: %s", err)
}
// Since we are bootstrapping, this is explicitly _not_ mTLS
conn, err := a.getNodeAPIClientConn(false, a.BaseSVID, a.baseSVIDKey)
if err != nil {
return nil, err
}
defer conn.Close()
nodeClient := node.NewNodeClient(conn)
// Perform attestation
req := &node.FetchBaseSVIDRequest{
AttestedData: pluginResponse.AttestedData,
Csr: csr,
}
calloptPeer := new(peer.Peer)
serverResponse, err := nodeClient.FetchBaseSVID(context.Background(), req, grpc.Peer(calloptPeer))
if err != nil {
return nil, fmt.Errorf("Failed attestation against spire server: %s", err)
}
if tlsInfo, ok := calloptPeer.AuthInfo.(credentials.TLSInfo); ok {
a.serverCerts = tlsInfo.State.PeerCertificates
}
// Pull base SVID out of the response
svids := serverResponse.SvidUpdate.Svids
if len(svids) > 1 {
a.config.Log.Info("More than one SVID received during attestation!")
}
svid, ok := svids[id.String()]
if !ok {
return nil, fmt.Errorf("Base SVID not found in attestation response")
}
a.BaseSVID = svid.SvidCert
a.BaseSVIDTTL = svid.Ttl
a.storeBaseSVID()
a.config.Log.Info("Attestation complete")
return serverResponse.SvidUpdate.RegistrationEntries, nil
}
// Generate a CSR for the given SPIFFE ID
func (a *Agent) generateCSR(spiffeID *url.URL, key *ecdsa.PrivateKey) ([]byte, error) {
a.config.Log.Info("Generating a CSR for ", spiffeID.String())
uriSANs, err := uri.MarshalUriSANs([]string{spiffeID.String()})
if err != nil {
return []byte{}, err
}
uriSANExtension := []pkix.Extension{{
Id: uri.OidExtensionSubjectAltName,
Value: uriSANs,
Critical: true,
}}
csrData := &x509.CertificateRequest{
Subject: *a.config.CertDN,
SignatureAlgorithm: x509.ECDSAWithSHA256,
ExtraExtensions: uriSANExtension,
}
csr, err := x509.CreateCertificateRequest(rand.Reader, csrData, key)
if err != nil {
return nil, err
}
return csr, nil
}
// Read base SVID from data dir and load it
func (a *Agent) loadBaseSVID() error {
a.config.Log.Info("Loading base SVID from disk")
certPath := path.Join(a.config.DataDir, "base_svid.crt")
if _, err := os.Stat(certPath); os.IsNotExist(err) {
a.config.Log.Info("A base SVID could not be found. A new one will be generated")
return nil
}
data, err := ioutil.ReadFile(certPath)
if err != nil {
return fmt.Errorf("Could not read Base SVID at path %s: %s", certPath, err)
}
// Sanity check
_, err = x509.ParseCertificate(data)
if err != nil {
return fmt.Errorf("Certificate at %s could not be understood: %s", certPath, err)
}
a.BaseSVID = data
return nil
}
// Write base SVID to storage dir
func (a *Agent) storeBaseSVID() {
certPath := path.Join(a.config.DataDir, "base_svid.crt")
f, err := os.Create(certPath)
defer f.Close()
if err != nil {
a.config.Log.Info("Unable to store Base SVID at path ", certPath)
return
}
f.Write(a.BaseSVID)
f.Sync()
return
}
func (a *Agent) getNodeAPIClientConn(mtls bool, svid []byte, key *ecdsa.PrivateKey) (conn *grpc.ClientConn, err error) {
serverID := a.config.TrustDomain
serverID.Path = "spiffe/cp"
var spiffePeer *spiffe_tls.TLSPeer
var tlsCert []tls.Certificate
var tlsConfig *tls.Config
if !mtls {
spiffePeer = &spiffe_tls.TLSPeer{
SpiffeIDs: []string{serverID.String()},
TrustRoots: a.config.TrustBundle,
}
tlsConfig = spiffePeer.NewTLSConfig(tlsCert)
} else {
certPool := x509.NewCertPool()
for _, cert := range a.serverCerts {
certPool.AddCert(cert)
}
spiffePeer = &spiffe_tls.TLSPeer{
SpiffeIDs: []string{serverID.String()},
TrustRoots: certPool,
}
tlsCert = append(tlsCert, tls.Certificate{Certificate: [][]byte{svid}, PrivateKey: key})
tlsConfig = spiffePeer.NewTLSConfig(tlsCert)
}
dialCreds := grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
conn, err = grpc.DialContext(a.ctx, a.config.ServerAddress.String(), dialCreds)
if err != nil {
return
}
return
}
| 1 | 8,863 | perhaps this is better modeled as a pkg-level var? | spiffe-spire | go |
@@ -86,6 +86,10 @@
#include "CmpMain.h"
#define MAX_NODE_NAME 9
+#define MAX_PRECISION_ALLOWED 18
+#define HIVE_MAX_PRECISION_ALLOWED 38
+#define MAX_SCALE_ALLOWED 6
+#define MAX_NUM_LEN 16
#include "SqlParserGlobals.h"
| 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
**************************************************************************
*
* File: NATable.C
* Description: A Non-Alcoholic table
* Created: 4/27/94
* Language: C++
*
*
**************************************************************************
*/
#define SQLPARSERGLOBALS_FLAGS // must precede all #include's
#undef _DP2NT_
#define _DP2NT_
// #define NA_ARKFS
#define __ROSETTA
#undef _DP2NT_
// #undef NA_ARKFS
#undef __ROSETTA
#include "NATable.h"
#include "Sqlcomp.h"
#include "Const.h"
#include "desc.h"
#include "dfs2rec.h"
#include "hs_read.h"
#include "parser.h"
#include "BindWA.h"
#include "ComAnsiNamePart.h"
#include "ItemColRef.h"
#include "ItemFunc.h"
#include "ItemOther.h"
#include "PartFunc.h"
#include "EncodedValue.h"
#include "SchemaDB.h"
#include "NAClusterInfo.h"
#include "MVInfo.h"
#include "ComMPLoc.h"
#include "NATable.h"
#include "opt.h"
#include "CmpStatement.h"
#include "ControlDB.h"
#include "ComCextdecs.h"
#include "ComSysUtils.h"
#include "ComObjectName.h"
#include "SequenceGeneratorAttributes.h"
#include "security/uid.h"
#include "HDFSHook.h"
#include "ExpLOBexternal.h"
#include "ComCextdecs.h"
#include "ExpHbaseInterface.h"
#include "CmpSeabaseDDL.h"
#include "RelScan.h"
#include "exp_clause_derived.h"
#include "PrivMgrCommands.h"
#include "ComDistribution.h"
#include "ExExeUtilCli.h"
#include "CmpDescribe.h"
#include "Globals.h"
#include "ComUser.h"
#include "ComSmallDefs.h"
#include "CmpMain.h"
#define MAX_NODE_NAME 9
#include "SqlParserGlobals.h"
//#define __ROSETTA
//#include "rosetta_ddl_include.h"
#include "SqlParserGlobals.h"
extern desc_struct *generateSpecialDesc(const CorrName& corrName);
#include "CmpMemoryMonitor.h"
#include "OptimizerSimulator.h"
#include "SQLCLIdev.h"
#include "sql_id.h"
SQLMODULE_ID __SQL_mod_natable = {
/* version */ SQLCLI_CURRENT_VERSION,
/* module name */ "HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.READDEF_N29_000",
/* time stamp */ 866668761818000LL,
/* char set */ "ISO88591",
/* name length */ 47
};
// -----------------------------------------------------------------------
// skipLeadingBlanks()
// Examines the given string keyValueBuffer from 'startIndex' for
// 'length' bytes and skips any blanks that appear as a prefix of the
// first non-blank character.
// -----------------------------------------------------------------------
Int64 HistogramsCacheEntry::getLastUpdateStatsTime()
{
return cmpCurrentContext->getLastUpdateStatsTime();
}
void HistogramsCacheEntry::setUpdateStatsTime(Int64 updateTime)
{
cmpCurrentContext->setLastUpdateStatsTime(updateTime);
}
static Int64 getCurrentTime()
{
// GETTIMEOFDAY returns -1, in case of an error
Int64 currentTime;
TimeVal currTime;
if (GETTIMEOFDAY(&currTime, 0) != -1)
currentTime = currTime.tv_sec;
else
currentTime = 0;
return currentTime;
}
void HistogramsCacheEntry::updateRefreshTime()
{
Int64 currentTime = getCurrentTime();
this->setRefreshTime(currentTime);
}
static Lng32 skipLeadingBlanks(const char * keyValueBuffer,
const Lng32 startIndex,
const Lng32 length)
{
Lng32 newIndex = startIndex;
Lng32 stopIndex = newIndex + length;
// Localize the search for blanks between the startIndex and stopIndex.
while ((newIndex <= stopIndex) AND (keyValueBuffer[newIndex] == ' '))
newIndex++;
return newIndex;
} // static skipLeadingBlanks()
// -----------------------------------------------------------------------
// skipTrailingBlanks()
// Examines the given string keyValueBuffer from startIndex down through
// 0 and skips any blanks that appear as a suffix of the first non-blank
// character.
// -----------------------------------------------------------------------
static Lng32 skipTrailingBlanks(const char * keyValueBuffer,
const Lng32 startIndex)
{
Lng32 newIndex = startIndex;
while ((newIndex >= 0) AND (keyValueBuffer[newIndex] == ' '))
newIndex--;
return newIndex;
} // static skipTrailingBlanks
//----------------------------------------------------------------------
// qualNameHashFunc()
// calculates a hash value given a QualifiedName.Hash value is mod by
// the hashTable size in HashDictionary.
//----------------------------------------------------------------------
ULng32 qualNameHashFunc(const QualifiedName& qualName)
{
ULng32 index = 0;
const NAString& name = qualName.getObjectName();
for(UInt32 i=0;i<name.length();i++)
{
index += (ULng32) (name[i]);
}
return index;
}
//-------------------------------------------------------------------------
//constructor() for HistogramCache
//-------------------------------------------------------------------------
HistogramCache::HistogramCache(NAMemory * heap,Lng32 initSize)
: heap_(heap),
hits_(0),
lookups_(0),
memoryLimit_(33554432),
lruQ_(heap), tfd_(NULL), mfd_(NULL), size_(0)
{
//create the actual cache
HashFunctionPtr hashFunc = (HashFunctionPtr)(&qualNameHashFunc);
histogramsCache_ = new (heap_)
NAHashDictionary<QualifiedName,HistogramsCacheEntry>
(hashFunc,initSize,TRUE,heap_);
}
//reset all entries to not accessedInCurrentStatement
void HistogramCache::resetAfterStatement()
{
for (CollIndex x=lruQ_.entries(); x>0; x--)
{
if (lruQ_[x-1]->accessedInCurrentStatement())
lruQ_[x-1]->resetAfterStatement();
}
}
//-------------------------------------------------------------------------
//invalidate what is in the cache
//-------------------------------------------------------------------------
void HistogramCache::invalidateCache()
{
while (lruQ_.entries())
{
HistogramsCacheEntry* entry = lruQ_[0];
deCache(&entry);
}
histogramsCache_->clearAndDestroy();
lruQ_.clear();
}
//--------------------------------------------------------------------------
// HistogramCache::getCachedHistogram()
// Looks for the histogram in the cache if it is there then makes a deep copy
// of it on the statementHeap() and returns it. If the histogram is not in
// the cache then it fetches the histogram and makes a deep copy of it on the
// context heap to store it in the hash table.
//--------------------------------------------------------------------------
#pragma nowarn(770) // warning elimination
void HistogramCache::getHistograms(NATable& table)
{
const QualifiedName& qualifiedName = table.getFullyQualifiedGuardianName();
ExtendedQualName::SpecialTableType type = table.getTableType();
const NAColumnArray& colArray = table.getNAColumnArray();
StatsList& colStatsList = *(table.getColStats());
const Int64& redefTime = table.getRedefTime();
Int64& statsTime = const_cast<Int64&>(table.getStatsTime());
//1//
//This 'flag' is set to NULL if FetchHistogram has to be called to
//get the statistics in case
//1. If a table's histograms are not in the cache
//2. If some kind of timestamp mismatch occurs and therefore the
// cached histogram has to be refreshed from disk.
//Pointer to cache entry for histograms on this table
HistogramsCacheEntry * cachedHistograms = NULL;
// skip reading the histograms if they have not been changed in last
// CACHE_HISTOGRAMS_REFRESH_INTERVAL hours
NABoolean skipRead = FALSE;
//Do we need to use the cache
//Depends on :
//1. If histogram caching is ON
//2. If the table is a normal table
if(CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
type == ExtendedQualName::NORMAL_TABLE)
{ //2//
// Do we have cached histograms for this table
// look up the cache and get a reference to statistics for this table
cachedHistograms = lookUp(table);
// first thing to check is, if the table to which the histograms are cached
// has been updated
if (cachedHistograms && (cachedHistograms->getRedefTime() != redefTime))
{
deCache(&cachedHistograms);
}
// If the histograms exist in the cache, then we want to avoid reading
// timestamps, if the histograms have not been updated in last default
// refresh time (CACHE_HISTOGRAMS_REFRESH_INTERVAL) or if the histograms in the cache
// are less than CACHE_HISTOGRAMS_REFRESH_INTERVAL old.
Int64 lastRefTimeDef, lastFakeRefTimeDef, currentTime;
if (cachedHistograms)
{
lastRefTimeDef = uint32ToInt64(CURRSTMT_OPTDEFAULTS->defRefTime());
lastFakeRefTimeDef = uint32ToInt64(CURRSTMT_OPTDEFAULTS->defFakeRefTime());
currentTime = getCurrentTime();
Int64 histLastRefreshedTime = cachedHistograms->getRefreshTime();
if (currentTime && cachedHistograms->isAllStatsFake())
{
// Check if it has been more than 'lastFakeRefTimeDef' secs
// (equal to CQD HIST_NO_STATS_REFRESH_INTERVAL) since histograms have
// been checked OR if update statistics automation is ON and it has
// been more than 'lastFakeRefTimeDef'/360 (should = 10 by default).
Int64 timeSinceLastHistRefresh = currentTime - histLastRefreshedTime;
if(!CURRSTMT_OPTDEFAULTS->ustatAutomation() && timeSinceLastHistRefresh > lastFakeRefTimeDef ||
CURRSTMT_OPTDEFAULTS->ustatAutomation() && timeSinceLastHistRefresh > lastFakeRefTimeDef/360)
{
//the histograms are in the cache but we need to re-read them because
//their default values might have been re-estimated
deCache(&cachedHistograms);
}
}
// Histograms are not fake. Check to see if we need to do anymore timestamp checks
if (currentTime && cachedHistograms && lastRefTimeDef > 0)
{
Int64 lastUpdateStatsTime = HistogramsCacheEntry::getLastUpdateStatsTime();
if ((lastUpdateStatsTime != -1) &&
((currentTime - lastUpdateStatsTime) < lastRefTimeDef))
{
// Last known update stats time for this table occurred less than
// CACHE_HISTOGRAMS_REFRESH_INTERVAL secs ago.
if (lastUpdateStatsTime < histLastRefreshedTime)
{
// Last time the histograms cache was refreshed for this table is newer
// than last known update stats time. Skip read of hists.
skipRead = TRUE;
}
}
else
// No update stats time recorded OR last known update stats time occurred
// more than CACHE_HISTOGRAMS_REFRESH_INTERVAL secs ago.
if ((currentTime - histLastRefreshedTime) < lastRefTimeDef)
// Histograms were refreshed less than CACHE_REFRESH_HISTOGRAMS_INTERVAL
// secs ago. Skip read of hists.
skipRead = TRUE;
}
}
//assumption:
//if tempHist is not NULL then it should have a pointer to full Histograms
//check if histogram preFetching is on
if(CURRSTMT_OPTDEFAULTS->preFetchHistograms() && cachedHistograms)
{ //3//
//we do need to preFetch histograms
if(!cachedHistograms->preFetched())
{ //4//
//preFetching is on but these histograms
//were not preFetched so delete them and
//re-Read them
deCache(&cachedHistograms);
} //4//
} //3//
//Check if there is a timestamp mis-match
if(cachedHistograms AND cachedHistograms->getRedefTime() != redefTime)
{ //5//
//the histograms are in the cache but we need to re-read them because of
//a time stamp mismatch
deCache(&cachedHistograms);
} //5//
else if (!skipRead)
{ //6//
//Do some more timestamp calculations and set re-Read flag if
//there is a mis-match
if(cachedHistograms)
{ //9 //
// Check when the histogram table was last modified. If this time doesn't equal
// the modification time of the cached histograms, OR this time is more than
// lastRefTimeDef secs old, call FetchStatsTime to read STATS_TIME field of
// the actual histogram. The last condition here is used to force a call of
// FetchStatsTime() after awhile. This is for update stats automation:
// FetchStatsTime() will update the READ_TIME field of the histogram.
Int64 modifTime;
Int64 currentJulianTime = NA_JulianTimestamp();
GetHSModifyTime(qualifiedName, type, modifTime, FALSE);
Int64 readCntInterval = (Int64)CmpCommon::getDefaultLong(USTAT_AUTO_READTIME_UPDATE_INTERVAL);
if (modifTime != 0)
// If the HISTOGRAMS table was modified since the last time FetchStatsTime()
// called and the time is not the same as the cached histograms OR
// if it was modified more than READTIME_UPDATE_INTERVAL secs ago and
// ustat automation is ON:
if (cachedHistograms->getModifTime() != modifTime ||
(currentJulianTime - modifTime > readCntInterval*1000000 &&
CmpCommon::getDefaultLong(USTAT_AUTOMATION_INTERVAL) > 0))
{ //10//
FetchStatsTime(qualifiedName,type,colArray,statsTime,FALSE);
cachedHistograms->updateRefreshTime();
// If ustat automation is on, FetchStatsTime will modify the HISTOGRAMS table.
// So, the new modification time of the HISTOGRAMS table must be saved to the
// cached histograms when automation is on, so that only changes to HISTOGRAMS
// by update stats cause the above 'if' to be TRUE.
if (CmpCommon::getDefaultLong(USTAT_AUTOMATION_INTERVAL) > 0)
{
GetHSModifyTime(qualifiedName, type, modifTime, FALSE);
cachedHistograms->setModifTime(modifTime);
}
if (cachedHistograms->getStatsTime() != statsTime)
{ //11//
deCache(&cachedHistograms);
} //11//
} //10//
} //9//
} //6//
} //2//
if( cachedHistograms )
{
hits_++;
}
else
{
lookups_++;
}
//retrieve the statistics for the table in colStatsList
createColStatsList(table, cachedHistograms);
//if not using histogram cache, then invalidate cache
if(!CURRSTMT_OPTDEFAULTS->cacheHistograms())
invalidateCache();
} //1//
#pragma warn(770) // warning elimination
//----------------------------------------------------------------------------
// HistogramCache::createColStatsList()
// This method actually puts the statistics for columns that require statistics
// into colStatsList.
// 1. If reRead is false meaning that the table's statistics exist in the cache,
// then this method gets statistics from the cache and copies them into
// colStatsList. If statistics for some columns are not found in the cache, then
// this method calls FetchHistograms to get statistics for these columns. It
// then puts these missing statistics into the cache, then copies the statistics
// from the cache into colStatsList
// 2. If reRead is true meaning that we need to get statistics from disk via
// FetchHistograms. reRead can be true for any of the following cases:
// a. Histogram Caching is on but we updated statistics since we last read them
// so we have deleted the old statistics and we need to read the tables
// statistics again from disk.
// 3. If histograms are being Fetched on demand meaning that histogram caching is off,
// then this method will fetch statistics into colStatsList using FetchHistograms.
//
// Now that we also have the option of reducing the number of intervals in histograms
// this method also factors that in.
//
// Each entry of the colArray contains information about a column that tells
// us what kind of histograms is required by that colum. The decision on what
// kind of a histograms is required for a column is base on the following factors
//
// 1. A column that is not referenced and neither is a index/primary key does
// not need histogram
//
// 2. Column that is a index/primary key or is referenced in the query but not part
// of a predicate or groupby or orderby clause requires compressed histogram.
// A full histogram can be altered to make it seem like a compressed histogram.
//
// 3. Columns that are part of a predicate or are in orderby or groupby clause requires
// full histogram are referencedForHistogram. A full histogram can only satisfy
// the requirement for a full histogram.
//
// Just to for the sake of reitirating the main point:
// Columns that are referencedForHistogram needs full histogram
// Columns that are just referenced or is a index/primary key only requires a
// compressed histogram
//----------------------------------------------------------------------------
void HistogramCache::createColStatsList
(NATable& table, HistogramsCacheEntry* cachedHistograms)
{
StatsList& colStatsList = *(table.getColStats());
NAColumnArray& colArray = const_cast<NAColumnArray&>
(table.getNAColumnArray());
const QualifiedName& qualifiedName = table.getFullyQualifiedGuardianName();
ExtendedQualName::SpecialTableType type = table.getTableType();
const Int64& redefTime = table.getRedefTime();
Int64& statsTime = const_cast<Int64&>(table.getStatsTime());
// The singleColsFound is used to prevent stats from being inserted
// more than once in the output list.
ColumnSet singleColsFound(STMTHEAP);
//"lean" cachedHistograms/are in the context heap.
//colStatsList is in the statement heap.
//The context heap persists for the life of this mxcmp.
//The statement heap is deleted at end of a compilation.
//getStatsListFromCache will expand "lean" cachedHistograms
//into "fat" colStatsList.
//this points to the stats list
//that is used to fetch statistics
//that are not in the cache
StatsList * statsListForFetch=NULL;
// Used to count the number of columns
// whose histograms are in the cache.
UInt32 coveredList = 0;
//Do we need to use the cache
//Depends on :
//1. If histogram caching is ON
//2. If the table is a normal table
if(cachedHistograms && (CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
type == ExtendedQualName::NORMAL_TABLE))
{
// getStatsListFromCache will unmark columns that have statistics
// in cachedHistograms. All columns whose statistics are not in
// cachedHistogram are still marked as needing histograms.
// This is then passed into FetchHistograms, which will
// return statistics for columns marked as needing histograms.
// colArray tells getStatsListFromCache what columns need
// histograms. getStatsListFromCache uses colArray to tell
// us what columns were not found in cachedHistograms.
// get statistics from cachedHistograms into list.
// colArray has the columns whose histograms we need.
coveredList = getStatsListFromCache
(colStatsList, colArray, cachedHistograms, singleColsFound);
}
Int64 modifTime = 0;
// set to TRUE if all columns in the table have default statistics
NABoolean allFakeStats = TRUE;
//if some of the needed statistics were not found in the cache
//then call FetchHistograms to get those statistics
if (colArray.entries() > coveredList)
{
//this is the stats list into which statistics will be fetched
statsListForFetch = &colStatsList;
if(CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
type == ExtendedQualName::NORMAL_TABLE)
{
//if histogram caching is on and not all histograms where found in the cache
//then create a new stats list object to get histograms that were missing
statsListForFetch = new(CmpCommon::statementHeap())
StatsList(CmpCommon::statementHeap(),2*colArray.entries());
}
//set pre-fetching to false by default
NABoolean preFetch = FALSE;
//turn prefetching on if caching is on and
//we want to prefetch histograms
if(CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
CURRSTMT_OPTDEFAULTS->preFetchHistograms() &&
(type == ExtendedQualName::NORMAL_TABLE))
preFetch = TRUE;
// flag the unique columns so the uec can be set correctly
// specially in the case of columns with fake stats
for (CollIndex j = 0; j < colArray.entries(); j++)
{
NAList<NAString> keyColList(STMTHEAP, 1);
NAColumn *col = colArray[j];
if (!col->isUnique())
{
const NAString &colName = col->getColName();
keyColList.insert(colName);
// is there a unique index on this column?
if (col->needHistogram () &&
table.getCorrespondingIndex(keyColList, // input columns
TRUE, // look for explicit index
TRUE, // look for unique index
FALSE, // look for primary key
FALSE, // look for any index or primary key
FALSE, // sequence of cols doesn't matter
FALSE, // don't exclude computed cols
NULL // index name
))
col->setIsUnique();
}
}
FetchHistograms(qualifiedName,
type,
(colArray),
(*statsListForFetch),
FALSE,
CmpCommon::statementHeap(),
modifTime,
statsTime,
allFakeStats,//set to TRUE if all columns have default stats
preFetch,
(Int64) CURRSTMT_OPTDEFAULTS->histDefaultSampleSize()
);
}
//check if we are using the cache
if(CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
type == ExtendedQualName::NORMAL_TABLE)
{
//we are using the cache but did we already
//have the statistics in cache
if(cachedHistograms)
{
// yes some of the statistics where already in cache
// Did we find statistics in the cache for all the columns
// whose statistics we needed?
if (colArray.entries() > coveredList)
{
// not all the required statistics were in the cache,
// some statistics were missing from the cache entry.
// therefore must have done a FetchHistograms to get
// the missing histograms. Now update the cache entry
// by adding the missing histograms that were just fetched
ULng32 histCacheHeapSize = heap_->getAllocSize();
cachedHistograms->addToCachedEntry(colArray,(*statsListForFetch));
ULng32 entrySizeGrowth = (heap_->getAllocSize() - histCacheHeapSize);
ULng32 entrySize = cachedHistograms->getSize() + entrySizeGrowth;
cachedHistograms->setSize(entrySize);
size_ += entrySizeGrowth;
//get statistics from the cache that where missing from the
//cache earlier and have since been added to the cache
coveredList = getStatsListFromCache
(colStatsList, colArray, cachedHistograms, singleColsFound);
}
}
else
{
CMPASSERT(statsListForFetch);
// used the cache but had to re-read
// all the table's histograms from disk
// put the re-read histograms into cache
putStatsListIntoCache((*statsListForFetch), colArray, qualifiedName,
modifTime, statsTime, redefTime, allFakeStats);
// look up the cache and get a reference to statistics for this table
cachedHistograms = lookUp(table);
// get statistics from the cache
coveredList = getStatsListFromCache
(colStatsList, colArray, cachedHistograms, singleColsFound);
}
}
if(CURRSTMT_OPTDEFAULTS->reduceBaseHistograms())
colStatsList.reduceNumHistIntsAfterFetch(table);
//clean up
if(statsListForFetch != &colStatsList)
delete statsListForFetch;
// try to decache any old entries if we're over the memory limit
if(CURRSTMT_OPTDEFAULTS->cacheHistograms())
{
enforceMemorySpaceConstraints();
}
traceTable(table);
}
//------------------------------------------------------------------------
//HistogramCache::getStatsListFromCache()
//gets the StatsList into list from cachedHistograms and
//returns the number of columns whose statistics were
//found in the cache. The columns whose statistics are required
//are passed in through colArray.
//------------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
Int32 HistogramCache::getStatsListFromCache
( StatsList& list, //In \ Out
NAColumnArray& colArray, //In
HistogramsCacheEntry* cachedHistograms, // In
ColumnSet& singleColsFound) //In \ Out
{
// cachedHistograms points to the memory-efficient contextheap
// representation of table's histograms.
// list points to statementheap list container that caller is
// expecting us to fill-in with ColStats required by colArray.
// counts columns whose histograms are in cache or not needed
UInt32 columnsCovered = 0;
// Collect the mc stats with this temporary list. If the
// mc stats objects are stored in the middle of the output 'list',
// IndexDescHistograms::appendHistogramForColumnPosition() will
// abort, because "There must be a ColStatDesc for every key column!".
StatsList mcStatsList(CmpCommon::statementHeap());
//iterate over all the columns in the colArray
for(UInt32 i=0;i<colArray.entries();i++)
{
//get a reference to the column
NAColumn * column = colArray[i];
//get the position of the column in the table
CollIndex colPos = column->getPosition();
// singleColsFound is used to prevent stats from
// being inserted more than once in the output list.
if (singleColsFound.contains(colPos))
{
columnsCovered++;
continue;
}
NABoolean columnNeedsHist = column->needHistogram();
NABoolean columnNeedsFullHist = column->needFullHistogram();
// Did histograms for this column get added
NABoolean colAdded = FALSE;
if (NOT columnNeedsHist)
{
//if the column was marked as not needing any histogram
//then increment columnsCovered & skip to next column, as neither
//single interval nor full histograms are required for this column.
columnsCovered++;
}
else if (cachedHistograms->contains(colPos) AND columnNeedsHist)
{
//we have full histograms for this column
columnsCovered++;
colAdded = TRUE;
//set flag in column not to fetch histogram
//the histogram is already in cache
column->setDontNeedHistogram();
NABoolean copyIntervals=TRUE;
ColStatsSharedPtr const singleColStats =
cachedHistograms->getHistForCol(*column);
if (NOT columnNeedsFullHist)
{
//full histograms are not required. get single interval histogram
//from the full histogram and insert it into the user's statslist
copyIntervals=FALSE;
}
//since we've tested containment, we are guaranteed to get a
//non-null histogram for column
list.insertAt
(list.entries(),
ColStats::deepCopySingleColHistFromCache
(*singleColStats, *column, list.heap(), copyIntervals));
}
//Assumption: a multi-column histogram is retrieved when
//histograms for any of its columns are retrieved.
if (columnNeedsHist)
{
// insert all multicolumns referencing column
// use singleColsFound to avoid duplicates
cachedHistograms->getMCStatsForColFromCacheIntoList
(mcStatsList, *column, singleColsFound);
}
// if column was added, then add it to the duplist
if (colAdded) singleColsFound += colPos;
}
// append the mc stats at the end of the output lit.
for (Lng32 i=0; i<mcStatsList.entries(); i++ ) {
list.insertAt(list.entries(), mcStatsList[i]);
}
return columnsCovered;
}
#pragma warn(1506) // warning elimination
//this method is used to put into the cache stats lists, that
//needed to be re-read or were not there in the cache
void HistogramCache::putStatsListIntoCache(StatsList & colStatsList,
const NAColumnArray& colArray,
const QualifiedName & qualifiedName,
Int64 modifTime,
Int64 statsTime,
const Int64 & redefTime,
NABoolean allFakeStats)
{
ULng32 histCacheHeapSize = heap_->getAllocSize();
// create memory efficient representation of colStatsList
HistogramsCacheEntry * histogramsForCache = new (heap_)
HistogramsCacheEntry(colStatsList, qualifiedName,
modifTime, statsTime, redefTime, heap_);
ULng32 cacheEntrySize = heap_->getAllocSize() - histCacheHeapSize;
if(CmpCommon::getDefault(CACHE_HISTOGRAMS_CHECK_FOR_LEAKS) == DF_ON)
{
delete histogramsForCache;
ULng32 histCacheHeapSize2 = heap_->getAllocSize();
CMPASSERT( histCacheHeapSize == histCacheHeapSize2);
histogramsForCache = new (heap_)
HistogramsCacheEntry(colStatsList, qualifiedName,
modifTime, statsTime, redefTime, heap_);
cacheEntrySize = heap_->getAllocSize() - histCacheHeapSize2;
}
histogramsForCache->setSize(cacheEntrySize);
if(FALSE)
{
delete histogramsForCache;
histogramsForCache = new (heap_)
HistogramsCacheEntry(colStatsList, qualifiedName,
modifTime, statsTime, redefTime, heap_);
}
// add it to the cache
QualifiedName* key = const_cast<QualifiedName*>
(histogramsForCache->getName());
QualifiedName *name = histogramsCache_->insert(key, histogramsForCache);
if (name)
{
// append it to least recently used queue
lruQ_.insertAt(lruQ_.entries(), histogramsForCache);
}
size_ += cacheEntrySize;
}
// if we're above memoryLimit_, try to decache
NABoolean HistogramCache::enforceMemorySpaceConstraints()
{
if (size_ <= memoryLimit_)
return TRUE;
HistogramsCacheEntry* entry = NULL;
while (lruQ_.entries())
{
entry = lruQ_[0];
if (entry->accessedInCurrentStatement())
return FALSE;
deCache(&entry);
if (size_ <= memoryLimit_)
return TRUE;
}
return FALSE;
}
// lookup given table's histograms.
// if found, return its HistogramsCacheEntry*.
// otherwise, return NULL.
HistogramsCacheEntry* HistogramCache::lookUp(NATable& table)
{
const QualifiedName& tblNam = table.getFullyQualifiedGuardianName();
HistogramsCacheEntry* hcEntry = NULL;
if (histogramsCache_)
{
// lookup given table's lean histogram cache entry
hcEntry = histogramsCache_->getFirstValue(&tblNam);
if (hcEntry)
{
// move entry to tail of least recently used queue
lruQ_.remove(hcEntry);
lruQ_.insertAt(lruQ_.entries(), hcEntry);
}
}
return hcEntry;
}
// decache entry
void HistogramCache::deCache(HistogramsCacheEntry** entry)
{
if (entry && (*entry))
{
ULng32 entrySize = (*entry)->getSize();
histogramsCache_->remove(const_cast<QualifiedName*>((*entry)->getName()));
lruQ_.remove(*entry);
ULng32 heapSizeBeforeDelete = heap_->getAllocSize();
delete (*entry);
ULng32 memReclaimed = heapSizeBeforeDelete - heap_->getAllocSize();
if(CmpCommon::getDefault(CACHE_HISTOGRAMS_CHECK_FOR_LEAKS) == DF_ON)
CMPASSERT( memReclaimed >= entrySize );
*entry = NULL;
size_ -= entrySize;
}
}
void HistogramCache::resizeCache(size_t limit)
{
memoryLimit_ = limit;
enforceMemorySpaceConstraints();
}
ULng32 HistogramCache::entries() const
{
return histogramsCache_ ? histogramsCache_->entries() : 0;
}
void HistogramCache::display() const
{
HistogramCache::print();
}
void
HistogramCache::print(FILE *ofd, const char* indent, const char* title) const
{
#ifndef NDEBUG
BUMP_INDENT(indent);
fprintf(ofd,"%s%s\n",NEW_INDENT,title);
fprintf(ofd,"entries: %d \n", entries());
fprintf(ofd,"size: %d bytes\n", size_);
for (CollIndex x=lruQ_.entries(); x>0; x--)
{
lruQ_[x-1]->print(ofd, indent, "HistogramCacheEntry");
}
#endif
}
void HistogramCache::traceTable(NATable& table) const
{
if (tfd_)
{
NAString tableName(table.getTableName().getQualifiedNameAsString());
fprintf(tfd_,"table:%s\n",tableName.data());
table.getColStats()->trace(tfd_, &table);
fflush(tfd_);
}
}
void HistogramCache::traceTablesFinalize() const
{
if (tfd_)
{
fprintf(tfd_,"cache_size:%d\n", size_);
fprintf(tfd_,"cache_heap_size:" PFSZ "\n", heap_->getAllocSize());
fflush(tfd_);
}
}
void HistogramCache::closeTraceFile()
{
if (tfd_) fclose(tfd_);
tfd_ = NULL;
}
void HistogramCache::openTraceFile(const char *filename)
{
tfd_ = fopen(filename, "w+");
}
void HistogramCache::closeMonitorFile()
{
if (mfd_) fclose(mfd_);
mfd_ = NULL;
}
void HistogramCache::openMonitorFile(const char *filename)
{
mfd_ = fopen(filename, "w+");
}
void HistogramCache::monitor() const
{
// if histogram caching is off, there's nothing to monitor
if(!OptDefaults::cacheHistograms()) return;
if (mfd_)
{
for (CollIndex x=lruQ_.entries(); x>0; x--)
{
lruQ_[x-1]->monitor(mfd_);
}
if (CmpCommon::getDefault(CACHE_HISTOGRAMS_MONITOR_MEM_DETAIL) == DF_ON)
{
fprintf(mfd_,"cache_size:%d\n", size_);
fprintf(mfd_,"cache_heap_size:" PFSZ "\n", heap_->getAllocSize());
}
fflush(mfd_);
}
}
// constructor for memory efficient representation of colStats.
// colStats has both single-column & multi-column histograms.
HistogramsCacheEntry::HistogramsCacheEntry
(const StatsList & colStats,
const QualifiedName & qualifiedName,
const Int64 & modifTime,
const Int64 & statsTime,
const Int64 & redefTime,
NAMemory * heap)
: full_(NULL), multiColumn_(NULL), name_(NULL), heap_(heap)
, refreshTime_(0), singleColumnPositions_(heap)
, accessedInCurrentStatement_(TRUE)
, size_(0)
{
modifTime_ = modifTime;
statsTime_ = statsTime;
updateRefreshTime();
redefTime_ = redefTime;
preFetched_ = CURRSTMT_OPTDEFAULTS->preFetchHistograms();
allFakeStats_ = colStats.allFakeStats();
// make a deep copy of the key.
// qualifiedName is short-lived (from stmtheap).
// name_ is longer-lived (from contextheap).
name_ = new(heap_) QualifiedName(qualifiedName, heap_);
// create pointers to full single-column histograms (include fake)
UInt32 singleColumnCount = colStats.getSingleColumnCount();
if (singleColumnCount > 0)
{
full_ = new(heap_) NAList<ColStatsSharedPtr>(heap_, singleColumnCount);
// fill-in pointers to deep copy of single-column histograms
for(UInt32 i=0; i<colStats.entries();i++)
{
const NAColumnArray& colArray = colStats[i]->getStatColumns();
if (colArray.entries() == 1)
{
// keep pointer to deep copy of single-column histogram
full_->insertAt(full_->entries(),
ColStats::deepCopyHistIntoCache(*(colStats[i]),heap_));
// update singleColumnPositions
singleColumnPositions_ +=
(Lng32)colArray.getColumn(Lng32(0))->getPosition();
}
}
}
// create pointers to multi-column histograms
multiColumn_ = new(heap_) MultiColumnHistogramList(heap_);
// add deep copy of multi-column histograms (but, avoid duplicates)
multiColumn_->addMultiColumnHistograms(colStats);
}
// insertDeepCopyIntoCache adds histograms of the sametype
// (single-column and/or multi-column) to this cache entry
void
HistogramsCacheEntry::addToCachedEntry
(NAColumnArray & columns, StatsList & list)
{
// update allFakeStats_
if (allFakeStats_)
allFakeStats_ = list.allFakeStats();
//iterate over all the colstats in the stats list passed in
ColumnSet singleColHistAdded(heap_);
for(UInt32 j=0;j<list.entries();j++)
{
//get the columns for the current colstats
NAColumnArray colList = list[j]->getStatColumns();
//get the first column for the columns represented by
//the current colstats
NAColumn * column = colList.getColumn(Lng32(0));
//column position of first column
Lng32 currentColPosition = column->getPosition();
//check if current column requires full histograms
NABoolean requiresHistogram = column->needHistogram();
//check if current colstats is a single-column histogram
NABoolean singleColHist = (colList.entries()==1? TRUE: FALSE);
NABoolean mcForHbasePart = list[j]->isMCforHbasePartitioning ();
//only fullHistograms are inserted in full_.
//We also add fake histograms to the cache.
//This will help us not to call FetchHistograms
//for a column that has fake statistics.
//Previously we did not cache statistics for
//columns that did not have statistics in the histograms tables
//(FetchHistogram faked statistics for such column).
//Since statistics for such columns were not found in the
//cache we had to repeatedly call FetchHistogram
//to get statistics for these columns
//instead of just getting the fake statistics from the cache.
//FetchHistograms always return fake statistics for such columns
//so why not just cache them and not call FetchHistograms.
//When statistics are added for these columns then the timestamp
//matching code will realize that and
//re-read the statistics for the table again.
if((requiresHistogram || NOT singleColHist)|| list[j]->isFakeHistogram())
{
//if single column Histograms
//if((singleColHist || mcForHbasePart) && (!singleColumnPositions_.contains(currentColPosition)))
if((singleColHist) && (!singleColumnPositions_.contains(currentColPosition)))
{
//Current colstats represent a single column histogram
//Insert the colstats from the stats list passed in, at the end of
//this objects stats list (represented by colStats_).
full_->insertAt(full_->entries(),
ColStats::deepCopyHistIntoCache(*(list[j]),heap_));
singleColHistAdded += currentColPosition;
}
else if (NOT singleColHist)
{
//Assumption: a multi-column histogram is retrieved when
//histograms for any of its columns are retrieved.
//e.g. Table T1(a int, b int, c int)
//histograms: {a},{b},{c},{a,b},{a,c},{b,c},{a,b,c}
//If histograms for column a are fetched we will get
//histograms: {a}, {a,b}, {a,c}, {a,b,c}
//If histograms for column b are fetched we will get
//histograms: {b}, {a,b}, {b,c}, {a,b,c}
//Therefore to avoid duplicated multicolumn stats being inserted
//we pass down the list of single columns for which we have stats
//Current colstats represent a multicolumn histogram
addMultiColumnHistogram(*(list[j]), &singleColumnPositions_);
}
}
}
singleColumnPositions_ += singleColHistAdded;
}
// add multi-column histogram to this cache entry
void
HistogramsCacheEntry::addMultiColumnHistogram
(const ColStats& mcStat, ColumnSet* singleColPositions)
{
if (!multiColumn_)
multiColumn_ = new(heap_) MultiColumnHistogramList(heap_);
multiColumn_->addMultiColumnHistogram(mcStat, singleColPositions);
}
const QualifiedName*
HistogramsCacheEntry::getName() const
{
return name_;
}
const ColStatsSharedPtr
HistogramsCacheEntry::getStatsAt(CollIndex x) const
{
if (!full_ OR x > full_->entries())
return NULL;
else
return full_->at(x);
}
const MultiColumnHistogram*
HistogramsCacheEntry::getMultiColumnAt(CollIndex x) const
{
if (!multiColumn_ OR x > multiColumn_->entries())
return NULL;
else
return multiColumn_->at(x);
}
// return pointer to full single-column histogram identified by col
ColStatsSharedPtr const
HistogramsCacheEntry::getHistForCol (NAColumn& col) const
{
if (!full_) return NULL;
// search for colPos in full_
for(UInt32 i=0; i < full_->entries(); i++)
{
// have we found colPos?
if (((*full_)[i]->getStatColumnPositions().entries() == 1) AND
(*full_)[i]->getStatColumnPositions().contains(col.getPosition()))
{
return (*full_)[i];
}
}
return NULL;
}
// insert all multicolumns referencing col into list
// use singleColsFound to avoid duplicates
void
HistogramsCacheEntry::getMCStatsForColFromCacheIntoList
(StatsList& list, // out: "fat" rep of multi-column stats for col
NAColumn& col, // in: column whose multi-column stats we want
ColumnSet& singleColsFound) // in: columns whose single-column
//stats have already been processed by caller.
//Assumption: a multi-column histogram is retrieved when
//histograms for any of its columns are retrieved.
{
CollIndex multiColCount = multiColumnCount();
if (multiColCount <= 0) return; // entry has no multicolumn stats
// search entry's multicolumn stats for col
NAMemory* heap = list.heap();
for(UInt32 i=0; i<multiColCount; i++)
{
const MultiColumnHistogram* mcHist = getMultiColumnAt(i);
if (mcHist)
{
ColumnSet mcCols(mcHist->cols(), STMTHEAP);
if (!mcCols.contains(col.getPosition()))
continue; // no col
if ((mcCols.intersectSet(singleColsFound)).entries())
continue; // avoid dup
// create "fat" representation of multi-column histogram
ColStatsSharedPtr mcStat;
if (col.getNATable()->isHbaseTable() && col.isPrimaryKey()) {
// For mcStats covering a key column of a HBASE table,
// create a colStat object with multi-intervals, which will
// be useful in allowing better stats-based split.
mcStat = new (STMTHEAP) ColStats(*(mcHist->getColStatsPtr()),
STMTHEAP, TRUE);
} else {
ComUID id(mcHist->id());
CostScalar uec(mcHist->uec());
CostScalar rows(mcHist->rows());
mcStat = new (STMTHEAP) ColStats
(id, uec, rows, rows, FALSE, FALSE, NULL, FALSE,
1.0, 1.0, 0, STMTHEAP, FALSE);
// populate its NAColumnArray with mcCols
(*mcStat).populateColumnArray(mcHist->cols(), col.getNATable());
// set up its histogram interval
HistogramSharedPtr histogram = new(STMTHEAP) Histogram(heap);
HistInt loInt;
NABoolean boundaryInclusive = TRUE;
HistInt hiInt(1, NULL, (*mcStat).statColumns(),
rows, uec, boundaryInclusive, 0);
histogram->insert(loInt);
histogram->insert(hiInt);
mcStat->setHistogram(histogram);
MCSkewedValueList * mcSkewedValueList = new (STMTHEAP) MCSkewedValueList (*(mcHist->getMCSkewedValueList()), STMTHEAP);
mcStat->setMCSkewedValueList(*mcSkewedValueList);
}
// append to list the mcStat
list.insertAt(list.entries(), mcStat);
}
}
}
//destructor
HistogramsCacheEntry::~HistogramsCacheEntry()
{
if(full_)
{
ColStatsSharedPtr colStat = NULL;
while(full_->getFirst(colStat))
{
colStat->deepDeleteFromHistogramCache();
//colStats is a shared pointer
//and will not be deleted till
//ref count goes to zero
//Therefore to avoid leaks and
//ensure colStats is deleted we
//do the following
ColStats * colStatPtr = colStat.get();
colStat.reset();
delete colStatPtr;
}
delete full_;
}
if(multiColumn_)
delete multiColumn_;
if(name_)
delete name_;
singleColumnPositions_.clear();
}
void HistogramsCacheEntry::display() const
{
HistogramsCacheEntry::print();
}
void HistogramsCacheEntry::monitor(FILE* mfd) const
{
NAString tableName(name_->getQualifiedNameAsString());
fprintf(mfd,"table:%s\n",tableName.data());
if (CmpCommon::getDefault(CACHE_HISTOGRAMS_MONITOR_HIST_DETAIL) == DF_ON)
{
if (full_)
{
for (CollIndex x=0; x<full_->entries(); x++)
{
full_->at(x)->trace(mfd, NULL);
}
}
if (multiColumn_)
{
multiColumn_->print(mfd, NULL);
}
}
if (CmpCommon::getDefault(CACHE_HISTOGRAMS_MONITOR_MEM_DETAIL) == DF_ON)
fprintf(mfd,"table_size:%d\n",size_);
fflush(mfd);
}
void HistogramsCacheEntry::print
(FILE *ofd, const char* indent, const char* title) const
{
#ifndef NDEBUG
BUMP_INDENT(indent);
fprintf(ofd,"%s%s\n",NEW_INDENT,title);
name_->print(ofd);
fprintf(ofd,"accessedInCurrentStatement_:%d ", accessedInCurrentStatement_);
fprintf(ofd,"allFakeStats_:%d ", allFakeStats_);
fprintf(ofd,"preFetched_:%d \n", preFetched_);
char time[30];
convertInt64ToAscii(modifTime_, time);
fprintf(ofd,"modifTime_:%s ", time);
convertInt64ToAscii(redefTime_, time);
fprintf(ofd,"redefTime_:%s ", time);
convertInt64ToAscii(refreshTime_, time);
fprintf(ofd,"refreshTime_:%s ", time);
convertInt64ToAscii(statsTime_, time);
fprintf(ofd,"statsTime_:%s ", time);
convertInt64ToAscii(getLastUpdateStatsTime(), time);
fprintf(ofd,"lastUpdateStatsTime:%s \n", time);
fprintf(ofd,"single-column histograms:%d ", singleColumnCount());
singleColumnPositions_.printColsFromTable(ofd,NULL);
if (full_)
{
for (CollIndex x=0; x<full_->entries(); x++)
{
full_->at(x)->print(ofd);
}
}
fprintf(ofd,"multi-column histograms:%d ", multiColumnCount());
if (multiColumn_)
{
multiColumn_->print(ofd);
}
#endif
}
// -----------------------------------------------------------------------
// getRangePartitionBoundaryValues()
// This method receives a string within which the partitioning key values
// appear in a comma-separated sequence. It returns an ItemExprList that
// contains ConstValue expressions for representing each partitioning
// key value as shown below:
//
// ------ ------ ------
// "<value1>, <value2>, <value3>" => | | ---> | | ---> | |
// ------ ------ ------
// | | |
// v v v
// ConstValue ConstValue ConstValue
// (<value1>) (<value2>) (<value3>)
//
// -----------------------------------------------------------------------
ItemExpr * getRangePartitionBoundaryValues
(const char * keyValueBuffer,
const Lng32 keyValueBufferSize,
NAMemory* heap,
CharInfo::CharSet strCharSet = CharInfo::UTF8
)
{
char * keyValue; // the string for the key value
ItemExpr * partKeyValue; // -> dynamically allocated expression
Lng32 length; // index to the next key value and its length
Lng32 startIndex = 0;
Lng32 stopIndex = keyValueBufferSize-1;
startIndex = skipLeadingBlanks(keyValueBuffer, startIndex, stopIndex);
// Skip leading '('
NABoolean leadingParen = FALSE;
if (keyValueBuffer[startIndex] == '(')
{
leadingParen = TRUE;
startIndex++;
}
stopIndex = skipTrailingBlanks(&keyValueBuffer[startIndex], stopIndex);
// Skip trailing ')' only if there was a leading paren. This
// is the case where the value comes in as (<value>)
if ((keyValueBuffer[stopIndex] == ')') &&
(leadingParen == TRUE))
stopIndex--;
length = stopIndex - startIndex + 1;
NAString keyValueString( &keyValueBuffer[startIndex], (size_t) length );
// ---------------------------------------------------------------------
// Copy the string from the keyValueBuffer into a string that
// is terminated by a semicolon and a null.
// ---------------------------------------------------------------------
keyValue = new (heap) char[length + 1 /* for semicolon */ + 1 /* for eol */ ];
// strncpy( keyValue, keyValueString.data(), (size_t) length );
//soln:10-031112-1256
// strncpy replaced with memcpy to handle columns of the partition?s first key value is
// NULL character within double-quote eg:( ?\0? ). i.e ( "( "6666673" , "\0" , 8060928 )").
memcpy(keyValue, (char *)( keyValueString.data() ), (size_t) length );
keyValue[length] = ';';
keyValue[length+1] = '\0';
// ---------------------------------------------------------------------
// Create a new ItemExprList using the parse tree generated from the
// string of comma-separated literals.
// ---------------------------------------------------------------------
Parser parser(CmpCommon::context());
//partKeyValue = parser.getItemExprTree(keyValue);
partKeyValue = parser.getItemExprTree(keyValue,length+1,strCharSet);
// Check to see if the key values parsed successfully. An error
// could occur if the table is an MP Table and the first key values
// contain MP syntax that is not supported by MX. For instance
// Datetime literals which do not have the max number of digits in
// each field. (e.g. DATETIME '1999-2-4' YEAR TO DAY)
//
if(partKeyValue == NULL) {
return NULL;
}
return partKeyValue->copyTree(heap);
} // static getRangePartitionBoundaryValues()
// In some cases we don't have a text representation of the start keys,
// only the encoded keys (e.g. from HBase regions start keys). In this
// case, un-encode these binary values and form ConstValues from them.
static ItemExpr * getRangePartitionBoundaryValuesFromEncodedKeys(
const NAColumnArray & partColArray,
const char * encodedKey,
const Lng32 encodedKeyLen,
NAMemory* heap)
{
Lng32 keyColOffset = 0;
ItemExpr *result = NULL;
char *actEncodedKey = (char *) encodedKey; // original key or a copy
const char* encodedKeyP = NULL;
char* varCharstr = NULL;
Lng32 totalKeyLength = 0;
Lng32 numProvidedCols = 0;
Lng32 lenOfFullyProvidedCols = 0;
// in newer HBase versions, the region start key may be shorter than an actual key
for (CollIndex i = 0; i < partColArray.entries(); i++)
{
const NAType *pkType = partColArray[i]->getType();
Lng32 colEncodedLength = pkType->getSQLnullHdrSize() + pkType->getNominalSize();
totalKeyLength += colEncodedLength;
if (totalKeyLength <= encodedKeyLen)
{
// this column is fully provided in the region start key
numProvidedCols++;
lenOfFullyProvidedCols = totalKeyLength;
}
}
if (encodedKeyLen < totalKeyLength)
{
// the provided key does not cover all the key columns
// need to extend the partial buffer, allocate a copy
actEncodedKey = new(heap) char[totalKeyLength];
memcpy(actEncodedKey, encodedKey, encodedKeyLen);
// extend the remainder with zeroes, assuming that this is what
// HBase does when deciding which region a row belongs to
memset(&actEncodedKey[encodedKeyLen], 0, totalKeyLength-encodedKeyLen);
Lng32 currOffset = lenOfFullyProvidedCols;
// go through the partially or completely missing columns and make something up
// so that we can treat the buffer as fully encoded in the final loop below
for (CollIndex j = numProvidedCols; j < partColArray.entries(); j++)
{
const NAType *pkType = partColArray[j]->getType();
Lng32 nullHdrSize = pkType->getSQLnullHdrSize();
int valOffset = currOffset + nullHdrSize;
int valEncodedLength = pkType->getNominalSize();
Lng32 colEncodedLength = nullHdrSize + valEncodedLength;
NABoolean isDescending = (partColArray[j]->getClusteringKeyOrdering() == DESCENDING);
NABoolean nullHdrAlreadySet = FALSE;
NABoolean columnIsPartiallyProvided = (currOffset < encodedKeyLen);
if (columnIsPartiallyProvided)
{
// This column is partially provided, try to make sure that it has a valid
// value. Note that the buffer has a prefix of some bytes with actual key
// values, followed by bytes that are zeroed out.
// the number of bytes actually provided in the key (not filled in)
int numBytesInProvidedVal = encodedKeyLen-valOffset;
if (nullHdrSize && numBytesInProvidedVal <= 0)
{
// only the null-header or a part thereof was provided
CMPASSERT(nullHdrSize == sizeof(short));
// get the partial indicator values into a short
short indicatorVal = *reinterpret_cast<short *>(&actEncodedKey[currOffset]);
// make it either 0 or -1
if (indicatorVal)
indicatorVal = -1;
// put it back and let the code below know that we set it already
// (this is handled otherwise as a non-provided column)
memcpy(&actEncodedKey[currOffset], &indicatorVal, sizeof(indicatorVal));
nullHdrAlreadySet = TRUE;
columnIsPartiallyProvided = FALSE;
}
// Next, decide by data type whether it's ok for the type to have
// a suffix of the buffer zeroed out (descending columns will
// see 0xFF values, once the encoded value gets inverted). If the
// type can't take it or we are not quite sure, we'll just discard
// all the partial information. Note that this could potentially
// lead to two partition boundaries with the same key, and also
// to partition boundaries that don't reflect the actual region
// boundaries.
if (columnIsPartiallyProvided)
switch (pkType->getTypeQualifier())
{
case NA_NUMERIC_TYPE:
{
NumericType *nt = (NumericType *) pkType;
if (!nt->isExact() || nt->isDecimal() || nt->isBigNum() ||
(isDescending && nt->decimalPrecision()))
// we may be able to improve this in the future
columnIsPartiallyProvided = FALSE;
}
break;
case NA_DATETIME_TYPE:
case NA_INTERVAL_TYPE:
// those types should tolerate zeroing out trailing bytes, but
// not filling with 0xFF
if (isDescending)
columnIsPartiallyProvided = FALSE;
break;
case NA_CHARACTER_TYPE:
// generally, character types should also tolerate zeroing out
// trailing bytes, but we might need to clean up characters
// that got split in the middle
{
CharInfo::CharSet cs = pkType->getCharSet();
switch (cs)
{
case CharInfo::UCS2:
// For now just accept partial characters, it's probably ok
// since they are just used as a key. May look funny in EXPLAIN.
break;
case CharInfo::UTF8:
{
// temporarily invert the provided key so it is actual UTF8
if (isDescending)
for (int i=0; i<numBytesInProvidedVal; i++)
actEncodedKey[valOffset+i] = ~actEncodedKey[valOffset+i];
CMPASSERT(numBytesInProvidedVal > 0);
// remove a trailing partial character, if needed
int validLen = lightValidateUTF8Str(&actEncodedKey[valOffset],
numBytesInProvidedVal);
// replace the remainder of the buffer with UTF8 min/max chars
fillWithMinMaxUTF8Chars(&actEncodedKey[valOffset+validLen],
valEncodedLength - validLen,
0,
isDescending);
// limit to the max # of UTF-8characters, if needed
if (pkType->getPrecisionOrMaxNumChars() > 0)
{
// this time validate the # of chars (likely to be more,
// since we filled to the end with non-blanks)
validLen = lightValidateUTF8Str(&actEncodedKey[valOffset],
valEncodedLength,
pkType->getPrecisionOrMaxNumChars());
if (validLen > 0)
// space after valid #chars is filled with blanks
memset(&actEncodedKey[valOffset+validLen], ' ', valEncodedLength-validLen);
else
columnIsPartiallyProvided = FALSE;
}
// undo the inversion, if needed, now for the whole key
if (isDescending)
for (int k=0; k<valEncodedLength; k++)
actEncodedKey[valOffset+k] = ~actEncodedKey[valOffset+k];
}
break;
case CharInfo::ISO88591:
// filling with 0x00 or oxFF should both be ok
break;
default:
// don't accept partial keys for other charsets
columnIsPartiallyProvided = FALSE;
break;
}
}
break;
default:
// don't accept partial keys for any other data types
columnIsPartiallyProvided = FALSE;
break;
}
if (columnIsPartiallyProvided)
{
// a CQD can suppress, give errors, warnings or enable partially provided cols
DefaultToken tok = CmpCommon::getDefault(HBASE_RANGE_PARTITIONING_PARTIAL_COLS);
switch (tok)
{
case DF_OFF:
// disable use of partial columns
// (use this as a workaround if they cause problems)
columnIsPartiallyProvided = FALSE;
break;
case DF_MINIMUM:
// give an error (again, this is probably mostly used as a
// workaround or to detect past problems)
*CmpCommon::diags() << DgSqlCode(-1212) << DgInt0(j);
break;
case DF_MEDIUM:
// give a warning, could be used for searching or testing
*CmpCommon::diags() << DgSqlCode(+1212) << DgInt0(j);
break;
case DF_ON:
case DF_MAXIMUM:
default:
// allow it, no warning or error
break;
}
}
if (columnIsPartiallyProvided)
// from now on, treat it as if it were fully provided
numProvidedCols++;
}
if (!columnIsPartiallyProvided)
{
// This column is not at all provided in the region start key
// or we decided to erase the partial value.
// Generate the min/max value for ASC/DESC key columns.
// NOTE: This is generating un-encoded values, unlike
// the values we get from HBase. The next loop below
// will skip decoding for any values generated here.
Lng32 remainingBufLen = valEncodedLength;
if (nullHdrSize && !nullHdrAlreadySet)
{
// generate a NULL indicator
// NULL (-1) for descending columns, this is the max value
// non-NULL (0) for ascending columns, min value is non-null
short indicatorVal = (isDescending ? -1 : 0);
CMPASSERT(nullHdrSize == sizeof(short));
memcpy(&actEncodedKey[currOffset], &indicatorVal, sizeof(indicatorVal));
}
pkType->minMaxRepresentableValue(&actEncodedKey[valOffset],
&remainingBufLen,
isDescending,
NULL,
heap);
}
currOffset += colEncodedLength;
} // loop through columns not entirely provided
} // provided encoded key length < total key length
for (CollIndex c = 0; c < partColArray.entries(); c++)
{
const NAType *pkType = partColArray[c]->getType();
Lng32 decodedValueLen =
pkType->getNominalSize() + pkType->getSQLnullHdrSize();
ItemExpr *keyColVal = NULL;
// does this column need encoding (only if it actually came
// from an HBase split key, if we made up the value it's
// already in the decoded format)
if (pkType->isEncodingNeeded() && c < numProvidedCols)
{
encodedKeyP = &actEncodedKey[keyColOffset];
// for varchar the decoding logic expects the length to be in the first
// pkType->getVarLenHdrSize() chars, so add it.
// Please see bug LP 1444134 on how to improve this in the long term.
// Note that this is less than ideal:
// - A VARCHAR is really encoded as a fixed char in the key, as
// the full length without a length field
// - Given that an encoded key is not aligned, we should really
// consider it a byte string, e.g. a character type with charset
// ISO88591, which tolerates any bit patterns. Considering the
// enoded key as the same data type as the column causes all kinds
// of problems.
// - The key decode function in the expressions code expects the varchar
// length field, even though it is not present in an actual key. So,
// we add it here in a separate buffer.
// - When we generate a ConstValue to represent the decoded key, we also
// need to include the length field, with length = max. length
if (pkType->getTypeName() == "VARCHAR")
{
Int32 varLenSize = pkType->getVarLenHdrSize() ;
Int32 nullHdrSize = pkType->getSQLnullHdrSize();
// Format of encodedKeyP :| null hdr | varchar data|
// Format of VarcharStr : | null hdr | var len hdr | varchar data|
varCharstr = new (heap) char[decodedValueLen + varLenSize];
if (nullHdrSize > 0)
str_cpy_all(varCharstr, encodedKeyP, nullHdrSize);
// careful, this works on little-endian systems only!!
str_cpy_all(varCharstr+nullHdrSize, (char*) &decodedValueLen,
varLenSize);
str_cpy_all(varCharstr+nullHdrSize+varLenSize,
encodedKeyP+nullHdrSize,
decodedValueLen-nullHdrSize);
decodedValueLen += pkType->getVarLenHdrSize();
encodedKeyP = varCharstr;
}
// un-encode the key value by using an expression
NAString encConstLiteral("encoded_val");
ConstValue *keyColEncVal =
new (heap) ConstValue(pkType,
(void *) encodedKeyP,
decodedValueLen,
&encConstLiteral,
heap);
CMPASSERT(keyColEncVal);
if (keyColEncVal->isNull())
{
// do not call the expression evaluator if the value
// to be decoded is NULL.
keyColVal = keyColEncVal ;
}
else
{
keyColVal =
new(heap) CompDecode(keyColEncVal,
pkType,
!partColArray.isAscending(c),
decodedValueLen,
CollationInfo::Sort,
TRUE,
heap);
keyColVal->synthTypeAndValueId();
keyColVal = keyColVal->evaluate(heap);
if ( !keyColVal )
return NULL;
}
} // encoded
else
{
// simply use the provided value as the binary value of a constant
keyColVal =
new (heap) ConstValue(pkType,
(void *) &actEncodedKey[keyColOffset],
decodedValueLen,
NULL,
heap);
}
// this and the above assumes that encoded and unencoded values
// have the same length
keyColOffset += decodedValueLen;
if (pkType->getTypeName() == "VARCHAR")
{
keyColOffset -= pkType->getVarLenHdrSize();
NADELETEBASIC (varCharstr, heap);
varCharstr = NULL;
}
if (result)
result = new(heap) ItemList(result, keyColVal);
else
result = keyColVal;
}
// make sure we consumed the entire key but no more than that
CMPASSERT(keyColOffset == totalKeyLength);
if (actEncodedKey != encodedKey)
NADELETEBASIC(actEncodedKey, heap);
return result;
} // static getRangePartitionBoundaryValuesFromEncodedKeys()
// -----------------------------------------------------------------------
// createRangePartitionBoundaries()
// This method is used for creating a tuple, which defines the maximum
// permissible values that the partitioning key columns can contain
// within a certain partition, for range-partitioned data.
// -----------------------------------------------------------------------
NABoolean checkColumnTypeForSupportability(const NAColumnArray & partColArray, const char* key)
{
NABoolean floatWarningIssued = FALSE;
for (CollIndex c = 0; c < partColArray.entries(); c++) {
const NAType *pkType = partColArray[c]->getType();
// For the EAP release, the unsupported types are the non-standard
// SQL/MP Datetime types. For the FCS release the unsupported
// types are the FRACTION only SQL/MP Datetime types.
//
// They are (for now) represented as CHAR types that have a
// non-zero MP Datetime size.
//
NABoolean unsupportedPartnKey = FALSE;
NABoolean unsupportedFloatDatatype = FALSE;
if (NOT pkType->isSupportedType())
unsupportedPartnKey = TRUE;
else if (DFS2REC::isFloat(pkType->getFSDatatype())) {
const NATable * naTable = partColArray[c]->getNATable();
if ((CmpCommon::getDefault(MARIAQUEST_PROCESS) == DF_OFF) &&
(NOT naTable->isSeabaseTable()) &&
(NOT naTable->isHiveTable())) {
unsupportedPartnKey = TRUE;
unsupportedFloatDatatype = TRUE;
}
}
if (unsupportedPartnKey) {
// Get the name of the table which has the unsupported
// partitioning key column.
//
const NAString &tableName =
partColArray[c]->getNATable()->
getTableName().getQualifiedNameAsAnsiString();
if (unsupportedFloatDatatype)
*CmpCommon::diags()
<< DgSqlCode(-1120);
else
// ERROR 1123 Unable to process the partition key values...
*CmpCommon::diags()
<< DgSqlCode(-1123)
<< DgString0(key)
<< DgTableName(tableName);
return FALSE;
}
}
return TRUE;
}
// -----------------------------------------------------------------------
// createRangePartitionBoundaries()
// This method is used for creating a tuple, which defines the maximum
// permissible values that the partitioning key columns can contain
// within a certain partition, for range-partitioned data.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
static RangePartitionBoundaries * createRangePartitionBoundaries
(desc_struct * part_desc_list,
Lng32 numberOfPartitions,
const NAColumnArray & partColArray,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// ASSUMPTION: The partitions descriptor list is a singly-linked list
// ========== in which the first element is the descriptor for the
// first partition and the last element is the descriptor
// for the last partition, in partitioning key sequence.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
CMPASSERT(partns_desc->body.partns_desc.primarypartition);
// Check all the partitioning keys. If any of them are not
// supported, issue an error and return.
//
// Skip past the primary partition, so that a meaningful first
// key value can be used for the error message.
char* key = (partns_desc->header.next) ->body.partns_desc.firstkey;
if ( !checkColumnTypeForSupportability(partColArray, key) )
return NULL;
// ---------------------------------------------------------------------
// Allocate a new RangePartitionBoundaries.
// ---------------------------------------------------------------------
RangePartitionBoundaries * partBounds = new (heap)
RangePartitionBoundaries
(numberOfPartitions,
partColArray.entries(),heap);
// ---------------------------------------------------------------------
// compute the length of the encoded partitioning key
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// Iterate over all the partitions and define the boundary (maximum
// permissible key values) for each one of them.
// The first key for the first partition cannot be specified in
// the CREATE TABLE command. It is therefore stored as an empty
// string in the SMD.
// NOTE: The RangePartitionBoundaries is 0 based.
// ---------------------------------------------------------------------
partns_desc = partns_desc->header.next; // skip the primary partition
Lng32 counter = 1;
char* encodedKey;
while (partns_desc AND (counter < numberOfPartitions))
{
encodedKey = partns_desc->body.partns_desc.encodedkey;
size_t encodedKeyLen = partns_desc->body.partns_desc.encodedkeylen;
if(heap != CmpCommon::statementHeap())
{
//we don't know here if encodedkey is a regular char or a wchar
//if it's a wchar then it should end with "\0\0", so add an extra
//'\0' to the end, it wont hurt anyways. Copying encodedKeyLen+1 chars
//will include one '\0' character and we add an extra '\0' to the end
//to make it "\0\0".
encodedKey = new(heap) char [encodedKeyLen+2];
encodedKey[encodedKeyLen] = encodedKey[encodedKeyLen+1] = '\0';
str_cpy_all(encodedKey, partns_desc->body.partns_desc.encodedkey,
encodedKeyLen);
}
ItemExpr *rangePartBoundValues = NULL;
if (partns_desc->body.partns_desc.firstkey)
// Extract and parse the partition boundary values, producing an
// ItemExprList of the boundary values.
//
rangePartBoundValues = getRangePartitionBoundaryValues(
partns_desc->body.partns_desc.firstkey,
partns_desc->body.partns_desc.firstkeylen,
heap);
else
rangePartBoundValues = getRangePartitionBoundaryValuesFromEncodedKeys(
partColArray,
encodedKey,
encodedKeyLen,
heap);
// Check to see if the key values parsed successfully. An error
// could occur if the table is an MP Table and the first key
// values contain MP syntax that is not supported by MX. For
// instance Datetime literals which do not have the max number
// of digits in each field. (e.g. DATETIME '1999-2-4' YEAR TO
// DAY)
//
if (rangePartBoundValues == NULL) {
// Get the name of the table which has the 'bad' first key
// value. Use the first entry in the array of partition
// columns (partColArray) to get to the NATable object.
//
const NAString &tableName =
partColArray[0]->getNATable()->
getTableName().getQualifiedNameAsAnsiString();
// The Parser will have already issued an error.
// ERROR 1123 Unable to process the partition key values...
*CmpCommon::diags()
<< DgSqlCode(-1123)
<< DgString0(partns_desc->body.partns_desc.firstkey)
<< DgTableName(tableName);
delete partBounds;
//coverity[leaked_storage]
return NULL;
}
partBounds->defineUnboundBoundary(
counter++,
rangePartBoundValues,
encodedKey);
partns_desc = partns_desc->header.next;
} // end while (partns_desc)
// ---------------------------------------------------------------------
// Before doing consistency check setup for the statement
// ---------------------------------------------------------------------
partBounds->setupForStatement(FALSE);
// ---------------------------------------------------------------------
// Perform a consistency check to ensure that a boundary was defined
// for each partition.
// ---------------------------------------------------------------------
partBounds->checkConsistency(numberOfPartitions);
return partBounds;
} // static createRangePartitionBoundaries()
#pragma warn(1506) // warning elimination
// -----------------------------------------------------------------------
// createRangePartitioningFunction()
// This method is used for creating a rangePartitioningFunction.
// -----------------------------------------------------------------------
static PartitioningFunction * createRangePartitioningFunction
(desc_struct * part_desc_list,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Compute the number of partitions.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
Lng32 numberOfPartitions = 0;
while (partns_desc)
{
numberOfPartitions++;
partns_desc = partns_desc->header.next;
}
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
numberOfPartitions = MAXOF(1,numberOfPartitions);
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
// ---------------------------------------------------------------------
// Create the partitioning key ranges
// ---------------------------------------------------------------------
RangePartitionBoundaries *boundaries =
createRangePartitionBoundaries(part_desc_list,
numberOfPartitions,
partKeyColArray,
heap);
// Check to see if the boundaries were created successfully. An
// error could occur if one of the partitioning keys is an
// unsupported type or if the table is an MP Table and the first key
// values contain MP syntax that is not supported by MX. For the
// EAP release, the unsupported types are the non-standard SQL/MP
// Datetime types. For the FCS release the unsupported types are
// the FRACTION only SQL/MP Datetime types. An example of a syntax
// error is a Datetime literal which does not have the max number of
// digits in each field. (e.g. DATETIME '1999-2-4' YEAR TO DAY)
//
if (boundaries == NULL) {
// The Parser may have already issued an error.
// ERROR 1123 Unable to process the partition key values...
// will have been issued by createRangePartitionBoundaries.
//
return NULL;
}
return new (heap) RangePartitioningFunction(boundaries, // memory leak??
nodeMap, heap);
} // static createRangePartitioningFunction()
// -----------------------------------------------------------------------
// createRoundRobinPartitioningFunction()
// This method is used for creating a RoundRobinPartitioningFunction.
// -----------------------------------------------------------------------
// LCOV_EXCL_START :cnu
static PartitioningFunction * createRoundRobinPartitioningFunction
(desc_struct * part_desc_list,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Compute the number of partitions.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
Lng32 numberOfPartitions = 0;
while (partns_desc)
{
numberOfPartitions++;
partns_desc = partns_desc->header.next;
}
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
numberOfPartitions = MAXOF(1,numberOfPartitions);
// For round robin partitioning, must create the partitioning function
// even for one partition, since the SYSKEY must be generated for
// round robin and this is trigger off the partitioning function.
//
// if (numberOfPartitions == 1)
// return new (heap) SinglePartitionPartitioningFunction(nodeMap);
return new (heap) RoundRobinPartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createRoundRobinPartitioningFunction()
// LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// createHashDistPartitioningFunction()
// This method is used for creating a HashDistPartitioningFunction.
// -----------------------------------------------------------------------
static PartitioningFunction * createHashDistPartitioningFunction
(desc_struct * part_desc_list,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Compute the number of partitions.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
Lng32 numberOfPartitions = 0;
while (partns_desc)
{
numberOfPartitions++;
partns_desc = partns_desc->header.next;
}
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
numberOfPartitions = MAXOF(1,numberOfPartitions);
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return new (heap) HashDistPartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createHashDistPartitioningFunction()
// -----------------------------------------------------------------------
// createHash2PartitioningFunction()
// This method is used for creating a Hash2PartitioningFunction.
// -----------------------------------------------------------------------
static PartitioningFunction * createHash2PartitioningFunction
(desc_struct * part_desc_list,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Compute the number of partitions.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
Lng32 numberOfPartitions = 0;
while (partns_desc)
{
numberOfPartitions++;
partns_desc = partns_desc->header.next;
}
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
numberOfPartitions = MAXOF(1,numberOfPartitions);
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return new (heap) Hash2PartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createHash2PartitioningFunction()
static PartitioningFunction * createHash2PartitioningFunction
(Int32 numberOfPartitions,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return new (heap) Hash2PartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createHash2PartitioningFunction()
static
NodeMap* createNodeMapForHbase(desc_struct* desc, const NATable* table,
int numSaltBuckets, NAMemory* heap)
{
Int32 partns = 0;
Int32 numRegions = 0;
desc_struct* hrk = desc;
while ( hrk ) {
numRegions++;
hrk=hrk->header.next;
}
if (numSaltBuckets <= 1)
partns = numRegions;
else
partns = numSaltBuckets;
NodeMap* nodeMap = new (heap)
NodeMap(heap, partns, NodeMapEntry::ACTIVE, NodeMap::HBASE);
// get nodeNames of region servers by making a JNI call
// do it only for multiple partition table
// TBD: co-location for tables where # of salt buckets and # regions don't match
if (partns > 1 && (CmpCommon::getDefault(TRAF_ALLOW_ESP_COLOCATION) == DF_ON) &&
(numSaltBuckets <= 1 || numSaltBuckets == numRegions)) {
ARRAY(const char *) nodeNames(heap, partns);
if (table->getRegionsNodeName(partns, nodeNames)) {
for (Int32 p=0; p < partns; p++) {
NAString node(nodeNames[p], heap);
// remove anything after node name
size_t size = node.index('.');
if (size && size != NA_NPOS)
node.remove(size);
// populate NodeMape with region server node ids
nodeMap->setNodeNumber(p, nodeMap->mapNodeNameToNodeNum(node));
}
}
}
return nodeMap;
}
static
PartitioningFunction*
createHash2PartitioningFunctionForHBase(desc_struct* desc,
const NATable * table,
int numSaltBuckets,
NAMemory* heap)
{
desc_struct* hrk = desc;
NodeMap* nodeMap = createNodeMapForHbase(desc, table, numSaltBuckets, heap);
Int32 partns = nodeMap->getNumEntries();
PartitioningFunction* partFunc;
if ( partns > 1 )
partFunc = new (heap) Hash2PartitioningFunction(partns, nodeMap, heap);
else
partFunc = new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return partFunc;
}
// -----------------------------------------------------------------------
// createRangePartitionBoundaries()
// This method is used for creating a tuple, which defines the maximum
// permissible values that the partitioning key columns can contain
// within a certain partition, for range-partitioned data.
//
// The boundary values of the range partitions are completely defined by
// a histogram's boundary values.
//
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
RangePartitionBoundaries * createRangePartitionBoundariesFromStats
(const IndexDesc* idesc,
HistogramSharedPtr& hist,
Lng32 numberOfPartitions,
const NAColumnArray & partColArray,
const ValueIdList& partitioningKeyColumnsOrder,
const Int32 statsColsCount,
NAMemory* heap)
{
if ( (!checkColumnTypeForSupportability(partColArray, "")) ||
(numberOfPartitions != hist->numIntervals()) ||
(partColArray.entries() < statsColsCount)
)
return NULL;
// ---------------------------------------------------------------------
// Allocate a new RangePartitionBoundaries.
// ---------------------------------------------------------------------
RangePartitionBoundaries * partBounds = new (heap)
RangePartitionBoundaries
(numberOfPartitions,
partColArray.entries(),heap);
// ---------------------------------------------------------------------
// compute the length of the encoded partitioning key
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// Iterate over all the partitions and define the boundary (maximum
// permissible key values) for each one of them.
// The first key for the first partition cannot be specified in
// the CREATE TABLE command. It is therefore stored as an empty
// string in the SMD.
// NOTE: The RangePartitionBoundaries is 0 based.
// ---------------------------------------------------------------------
Lng32 counter = 1;
ULng32 totalEncodedKeyLength = 0;
Interval iter = hist->getFirstInterval();
while ( iter.isValid() ) {
totalEncodedKeyLength = 0;
NAString* evInStr = NULL;
NAColumn* ncol = partColArray[0];
const NAType* nt = ncol->getType();
double ev = ( !iter.isLast() ) ?
iter.hiBound().getDblValue() : nt->getMaxValue();
if ((partColArray.entries() == 1) && (statsColsCount == 1))
{
// Convert the double into a string value of the type of
// the leading key column
evInStr = nt->convertToString(ev, heap);
}
else if ((partColArray.entries() > 1) && (statsColsCount == 1))
{
MCboundaryValueList mcEv;
mcEv.insert(EncodedValue(ev));
evInStr = mcEv.convertToString(partColArray, iter.isLast());
}
else // partColArray.entries() > 1 && statsColsCount > 1
{
MCboundaryValueList mcEv = iter.hiMCBound();
evInStr = mcEv.convertToString(partColArray, iter.isLast());
}
if ( !evInStr )
return NULL;
// Construct a boundary as ItemExprList of ConstValues
ItemExpr* rangePartBoundValues = getRangePartitionBoundaryValues(
evInStr->data(), evInStr->length(), heap, CharInfo::ISO88591);
NAString totalEncodedKeyBuf;
ItemExpr* val = NULL;
ItemExpr* encodeExpr = NULL ;
ItemExprList* list = NULL;
list = new (heap) ItemExprList(rangePartBoundValues, heap,ITM_ITEM_LIST,FALSE);
for (CollIndex c = 0; c < partColArray.entries(); c++)
{
NAColumn* ncol = partColArray[c];
const NAType* nt = ncol->getType();
if (rangePartBoundValues->getOperatorType() == ITM_ITEM_LIST )
val = (ItemExpr*) (*list) [c];
else
val = (ItemExpr*) (*list) [0];
if (nt->isEncodingNeeded())
encodeExpr = new(heap) CompEncode(val, !(partColArray.isAscending(c)));
else
encodeExpr = val;
encodeExpr->synthTypeAndValueId();
const NAType& eeNT = encodeExpr->getValueId().getType();
ULng32 encodedKeyLength = eeNT.getEncodedKeyLength();
char* encodedKeyBuffer = new (heap) char[encodedKeyLength];
Lng32 offset;
Lng32 length;
ValueIdList vidList;
short ok = vidList.evaluateTree(encodeExpr,
encodedKeyBuffer,
encodedKeyLength,
&length,
&offset,
(CmpCommon::diags()));
totalEncodedKeyLength += encodedKeyLength;
totalEncodedKeyBuf += encodedKeyBuffer;
if ( ok != 0 )
return NULL;
}
char* char_totalEncodedKeyBuf =new char[totalEncodedKeyLength];
memcpy (char_totalEncodedKeyBuf, totalEncodedKeyBuf.data(), totalEncodedKeyLength);
if (totalEncodedKeyLength != 0)
{
partBounds->defineUnboundBoundary(
counter++,
rangePartBoundValues,
char_totalEncodedKeyBuf);
}
iter.next();
}
// ---------------------------------------------------------------------
// Before doing consistency check setup for the statement
// ---------------------------------------------------------------------
partBounds->setupForStatement(FALSE);
// ---------------------------------------------------------------------
// Perform a consistency check to ensure that a boundary was defined
// for each partition.
// ---------------------------------------------------------------------
partBounds->checkConsistency(numberOfPartitions);
// -----------------------------------------------------------------
// Add the first and the last boundary (0 and numberOfPartitions)
// at the ends that do not separate two partitions
// -----------------------------------------------------------------
partBounds->completePartitionBoundaries(
partitioningKeyColumnsOrder,
totalEncodedKeyLength);
return partBounds;
} // createRangePartitionBoundariesFromStats()
#pragma warn(1506) // warning elimination
static
PartitioningFunction*
createRangePartitioningFunctionForSingleRegionHBase(
const NAColumnArray & partKeyColArray,
NAMemory* heap
)
{
NodeMap* nodeMap = NULL;
Lng32 regionsToFake =
(ActiveSchemaDB()->getDefaults()).getAsLong(HBASE_USE_FAKED_REGIONS);
if ( regionsToFake == 0 ) {
nodeMap = new (heap)
NodeMap(heap, 1, NodeMapEntry::ACTIVE, NodeMap::HBASE);
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
}
nodeMap = new (heap)
NodeMap(heap, regionsToFake, NodeMapEntry::ACTIVE, NodeMap::HBASE);
//
// Setup an array of doubles to record the next begin key value for
// each key column. Needed when the table has a single region.
// The number ranges is controlled by CQD HBASE_USE_FAKED_REGIONS.
//
// Later on, we can make smart split utilizing the stats.
//
Int32 keys = partKeyColArray.entries();
double* firstkeys = new (heap) double[keys];
double* steps = new (heap) double[keys];
for ( Int32 i=0; i<keys; i++ ) {
double min = partKeyColArray[i]->getType()->getMinValue();
double max = partKeyColArray[i]->getType()->getMaxValue();
firstkeys[i] = partKeyColArray[i]->getType()->getMinValue();
steps[i] = (max - min) / regionsToFake;
}
struct desc_struct* head = NULL;
struct desc_struct* tail = NULL;
Int32 i=0;
for ( i=0; i<regionsToFake; i++ ) {
if ( tail == NULL ) {
head = tail = new (heap) struct desc_struct;
// to satisfy createRangePartitionBoundaries() in NATable.cpp
tail->body.partns_desc.primarypartition = 1;
} else {
tail->header.next = new (heap) struct desc_struct;
tail = tail->header.next;
}
tail->header.next = NULL;
NAString firstkey('(');
for ( Int32 i=0; i<keys; i++ ) {
double v = firstkeys[i];
NAString* v_str = partKeyColArray[i]->getType()->convertToString(v,heap);
// If for some reason we can not make the conversion, we
// return a single-part func.
if ( !v_str ) {
nodeMap = new (heap)
NodeMap(heap, 1, NodeMapEntry::ACTIVE, NodeMap::HBASE);
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
}
firstkey.append(*v_str);
if ( i < keys-1 )
firstkey.append(',');
// Prepare for the next range
firstkeys[i] += steps[i];
}
firstkey.append(')');
Int32 len = firstkey.length();
tail->body.partns_desc.firstkeylen = len;
tail->body.partns_desc.firstkey = new (heap) char[len];
memcpy(tail->body.partns_desc.firstkey, firstkey.data(), len);
// For now, assume firstkey == encodedkey
tail->body.partns_desc.encodedkeylen = len;
tail->body.partns_desc.encodedkey = new (heap) char[len];
memcpy(tail->body.partns_desc.encodedkey, firstkey.data(), len);
}
//
return createRangePartitioningFunction
(head,
partKeyColArray,
nodeMap,
heap);
}
void
populatePartnDescOnEncodingKey( struct desc_struct* prevEndKey,
struct desc_struct* tail,
struct desc_struct* hrk,
NAMemory* heap)
{
if (!prevEndKey) {
// the start key of the first partitions has all zeroes in it
Int32 len = hrk->body.hbase_region_desc.endKeyLen;
tail->body.partns_desc.encodedkeylen = len;
tail->body.partns_desc.encodedkey = new (heap) char[len];
memset(tail->body.partns_desc.encodedkey, 0, len);
}
else {
// the beginning key of this partition is the end key of
// the previous one
// (HBase returns end keys, we need begin keys here)
Int32 len = prevEndKey->body.hbase_region_desc.endKeyLen;
// For HBase regions, we don't have the text representation
// (value, value, ... value) of the boundary, just the encoded
// key.
tail->body.partns_desc.encodedkeylen = len;
tail->body.partns_desc.encodedkey = new (heap) char[len];
memcpy(tail->body.partns_desc.encodedkey,
prevEndKey->body.hbase_region_desc.endKey, len);
}
}
void
populatePartnDescOnFirstKey( struct desc_struct* ,
struct desc_struct* tail,
struct desc_struct* hrk,
NAMemory* heap)
{
char* buf = hrk->body.hbase_region_desc.beginKey;
Int32 len = hrk->body.hbase_region_desc.beginKeyLen;
NAString firstkey('(');
firstkey.append('\'');
firstkey.append(buf, len);
firstkey.append('\'');
firstkey.append(')');
Int32 keyLen = firstkey.length();
tail->body.partns_desc.firstkeylen = keyLen;
tail->body.partns_desc.firstkey = new (heap) char[keyLen];
memcpy(tail->body.partns_desc.firstkey, firstkey.data(), keyLen);
tail->body.partns_desc.encodedkeylen = keyLen;
tail->body.partns_desc.encodedkey = new (heap) char[keyLen];
memcpy(tail->body.partns_desc.encodedkey, firstkey.data(), keyLen);
}
typedef void (*populatePartnDescT)( struct desc_struct* prevEndKey,
struct desc_struct* tail,
struct desc_struct* hrk,
NAMemory* heap);
static struct desc_struct*
convertRangeDescToPartnsDesc(desc_struct* desc, populatePartnDescT funcPtr, NAMemory* heap)
{
desc_struct* hrk = desc;
desc_struct* prevEndKey = NULL;
struct desc_struct* head = NULL;
struct desc_struct* tail = NULL;
Int32 i=0;
while ( hrk ) {
struct desc_struct *newNode = new (heap) struct desc_struct;
memset(&newNode->header, 0, sizeof(newNode->header));
memset(&newNode->body.partns_desc, 0, sizeof(tail->body.partns_desc));
newNode->header.nodetype = DESC_PARTNS_TYPE;
if ( tail == NULL ) {
head = tail = newNode;
// to satisfy createRangePartitionBoundaries() in NATable.cpp
tail->body.partns_desc.primarypartition = 1;
} else {
tail->header.next = newNode;
tail = tail->header.next;
}
(*funcPtr)(prevEndKey, tail, hrk, heap);
prevEndKey = hrk;
hrk = hrk->header.next;
}
return head;
}
static
PartitioningFunction*
createRangePartitioningFunctionForMultiRegionHBase(Int32 partns,
desc_struct* desc,
const NATable* table,
const NAColumnArray & partKeyColArray,
NAMemory* heap)
{
NodeMap* nodeMap = createNodeMapForHbase(desc, table, -1, heap);
struct desc_struct*
partns_desc = ( table->isHbaseCellTable() || table->isHbaseRowTable()) ?
convertRangeDescToPartnsDesc(desc, populatePartnDescOnFirstKey, heap)
:
convertRangeDescToPartnsDesc(desc, populatePartnDescOnEncodingKey, heap);
return createRangePartitioningFunction
(partns_desc,
partKeyColArray,
nodeMap,
heap);
}
Int32 findDescEntries(desc_struct* desc)
{
Int32 partns = 0;
desc_struct* hrk = desc;
while ( hrk ) {
partns++;
hrk = hrk->header.next;
}
return partns;
}
//
// A single entry point to figure out range partition function for
// Hbase.
//
static
PartitioningFunction*
createRangePartitioningFunctionForHBase(desc_struct* desc,
const NATable* table,
const NAColumnArray & partKeyColArray,
NAMemory* heap)
{
Int32 partns = 0;
if (CmpCommon::getDefault(HBASE_RANGE_PARTITIONING) != DF_OFF)
// First figure out # partns
partns = findDescEntries(desc);
else
partns = 1;
return (partns > 1) ?
createRangePartitioningFunctionForMultiRegionHBase(partns,
desc, table, partKeyColArray, heap)
:
createRangePartitioningFunctionForSingleRegionHBase(
partKeyColArray, heap);
}
static PartitioningFunction * createHivePartitioningFunction
(Int32 numberOfPartitions,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return new (heap) HivePartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createHivePartitioningFunction()
// -----------------------------------------------------------------------
// createNodeMap()
// This method is used for creating a node map for all DP2 partitions of
// associated with this table or index.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
static void createNodeMap (desc_struct* part_desc_list,
NodeMap* nodeMap,
NAMemory* heap,
char * tableName,
Int32 tableIdent)
{
// ---------------------------------------------------------------------
// Loop over all partitions creating a DP2 node map entry for each
// partition.
// ---------------------------------------------------------------------
desc_struct* partns_desc = part_desc_list;
CollIndex currentPartition = 0;
if(NOT partns_desc)
{
NodeMapEntry entry =
NodeMapEntry(tableName,NULL,heap,tableIdent);
nodeMap->setNodeMapEntry(currentPartition,entry,heap);
}
else{
while (partns_desc)
{
NodeMapEntry entry(partns_desc->body.partns_desc.partitionname,
partns_desc->body.partns_desc.givenname,
heap,tableIdent);
nodeMap->setNodeMapEntry(currentPartition,entry,heap);
partns_desc = partns_desc->header.next;
currentPartition++;
}
}
// -------------------------------------------------------------------
// If no partitions supplied, create a single partition node map with
// a dummy entry.
// -------------------------------------------------------------------
if (nodeMap->getNumEntries() == 0)
{
NodeMapEntry entry(NodeMapEntry::ACTIVE);
nodeMap->setNodeMapEntry(0,entry,heap);
}
// -------------------------------------------------------------------
// Set the tableIndent into the nodemap itself.
// -------------------------------------------------------------------
nodeMap->setTableIdent(tableIdent);
// -----------------------------------------------------------------------
// See if we need to build a bogus node map with fake volume assignments.
// This will allow us to fake costing code into believing that all
// partitions are distributed evenly among SMP nodes in the cluster.
// -----------------------------------------------------------------------
if (CmpCommon::getDefault(FAKE_VOLUME_ASSIGNMENTS) == DF_ON)
{
// --------------------------------------------------------------------
// Extract number of SMP nodes in the cluster from the defaults table.
// --------------------------------------------------------------------
NADefaults &defs = ActiveSchemaDB()->getDefaults();
CollIndex numOfSMPs = gpClusterInfo->numOfSMPs();
if(CURRSTMT_OPTDEFAULTS->isFakeHardware())
{
numOfSMPs = defs.getAsLong(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS);
}
// ------------------------------------------------------------------
// Determine how many node map entries will be assigned a particular
// node, and also calculate if there are any remaining entries.
// ------------------------------------------------------------------
CollIndex entriesPerNode = nodeMap->getNumEntries() / numOfSMPs;
CollIndex entriesRemaining = nodeMap->getNumEntries() % numOfSMPs;
// ----------------------------------------------------------------
// Assign each node to consecutive entries such that each node has
// approximately the same number of entries.
//
// Any extra entries get assigned evenly to the last remaining
// nodes. For example, if the cluster has 5 nodes and the node map
// has 23 entries, we would assign nodes to entries as follows:
//
// Entries 0 - 3 to node 0. (4 entries)
// Entries 4 - 7 to node 1. (4 entries)
// Entries 8 - 12 to node 2. (5 entries)
// Entries 13 - 17 to node 3. (5 entries)
// Entries 18 - 22 to node 4. (5 entries)
// ----------------------------------------------------------------
CollIndex mapIdx = 0;
for (CollIndex nodeIdx = 0; nodeIdx < numOfSMPs; nodeIdx++)
{
if (nodeIdx == numOfSMPs - entriesRemaining)
{
entriesPerNode += 1;
}
for (CollIndex entryIdx = 0; entryIdx < entriesPerNode; entryIdx++)
{
nodeMap->setNodeNumber(mapIdx,nodeIdx);
mapIdx += 1;
}
}
}
} // static createNodeMap()
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
static void createNodeMap (hive_tbl_desc* hvt_desc,
NodeMap* nodeMap,
NAMemory* heap,
char * tableName,
Int32 tableIdent)
{
// ---------------------------------------------------------------------
// Loop over all hive storage (partition file ) creating a node map
// entry for each partition.
// ---------------------------------------------------------------------
CMPASSERT(nodeMap->type() == NodeMap::HIVE);
hive_sd_desc* sd_desc = hvt_desc->getSDs();
CollIndex currentPartition = 0;
// char buf[500];
Int32 i= 0;
while (sd_desc)
{
HiveNodeMapEntry entry(NodeMapEntry::ACTIVE, heap);
nodeMap->setNodeMapEntry(currentPartition++,entry,heap);
sd_desc = sd_desc->next_;
}
// -------------------------------------------------------------------
// If no partitions supplied, create a single partition node map with
// a dummy entry.
// -------------------------------------------------------------------
if (nodeMap->getNumEntries() == 0)
{
HiveNodeMapEntry entry(NodeMapEntry::ACTIVE, heap);
nodeMap->setNodeMapEntry(0,entry,heap);
}
// -------------------------------------------------------------------
// Set the tableIndent into the nodemap itself.
// -------------------------------------------------------------------
nodeMap->setTableIdent(tableIdent);
// No fake volumn assignment because Hive' partitions are not hash
// based, there is no balance of data among all partitions.
} // static createNodeMap()
#pragma warn(1506) // warning elimination
//-------------------------------------------------------------------------
// This function checks if a table/index or any of its partitions are
// remote. This is required to determine the size of the EidRootBuffer
// to be sent to DP2 - Expand places limits on the size of messages
// - approx 31000 for messages to remote nodes, and 56000 for messages
// on the local node.
//-------------------------------------------------------------------------
#pragma nowarn(262) // warning elimination
static NABoolean checkRemote(desc_struct* part_desc_list,
char * tableName)
{
return TRUE;
}
#pragma warn(262) // warning elimination
// warning elimination (removed "inline")
static NAString makeTableName(const NATable *table,
const columns_desc_struct *column_desc)
{
return NAString(
table ?
table->getTableName().getQualifiedNameAsAnsiString().data() :
column_desc->tablename ?
column_desc->tablename : "");
}
// warning elimination (removed "inline")
static NAString makeColumnName(const NATable *table,
const columns_desc_struct *column_desc)
{
NAString nam(makeTableName(table, column_desc));
if (!nam.isNull()) nam += ".";
nam += column_desc->colname;
return nam;
}
// -----------------------------------------------------------------------
// Method for creating NAType from desc_struct.
// -----------------------------------------------------------------------
NABoolean createNAType(columns_desc_struct *column_desc /*IN*/,
const NATable *table /*IN*/,
NAType *&type /*OUT*/,
NAMemory *heap /*IN*/,
Lng32 * errorCode
)
{
//
// Compute the NAType for this column
//
#define REC_INTERVAL REC_MIN_INTERVAL
DataType datatype = column_desc->datatype;
if (REC_MIN_INTERVAL <= datatype && datatype <= REC_MAX_INTERVAL)
datatype = REC_INTERVAL;
Lng32 charCount = column_desc->length;
if ( DFS2REC::isAnyCharacter(column_desc->datatype) )
{
if ( CharInfo::isCharSetSupported(column_desc->character_set) == FALSE ) {
if (!errorCode)
{
*CmpCommon::diags() << DgSqlCode(-4082)
<< DgTableName(makeTableName(table, column_desc));
}
else
{
*errorCode = 4082;
}
return TRUE; // error
}
if ( CharInfo::is_NCHAR_MP(column_desc->character_set) )
charCount /= SQL_DBCHAR_SIZE;
}
switch (datatype)
{
case REC_BPINT_UNSIGNED :
type = new (heap)
SQLBPInt(column_desc->precision, column_desc->null_flag, FALSE, heap);
break;
case REC_BIN8_SIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLTiny(TRUE,
column_desc->null_flag,
heap
);
break;
case REC_BIN8_UNSIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
FALSE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLTiny(FALSE,
column_desc->null_flag,
heap
);
break;
case REC_BIN16_SIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLSmall(TRUE,
column_desc->null_flag,
heap
);
break;
case REC_BIN16_UNSIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
FALSE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLSmall(FALSE,
column_desc->null_flag,
heap
);
break;
case REC_BIN32_SIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLInt(TRUE,
column_desc->null_flag,
heap
);
break;
case REC_BIN32_UNSIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
FALSE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLInt(FALSE,
column_desc->null_flag,
heap
);
break;
case REC_BIN64_SIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLLargeInt(TRUE,
column_desc->null_flag,
heap
);
break;
case REC_BIN64_UNSIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
FALSE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLLargeInt(FALSE,
column_desc->null_flag,
heap
);
break;
case REC_DECIMAL_UNSIGNED:
type = new (heap)
SQLDecimal(column_desc->length,
column_desc->scale,
FALSE,
column_desc->null_flag,
heap
);
break;
case REC_DECIMAL_LSE:
type = new (heap)
SQLDecimal(column_desc->length,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
break;
case REC_NUM_BIG_UNSIGNED:
type = new (heap)
SQLBigNum(column_desc->precision,
column_desc->scale,
TRUE, // is a real bignum
FALSE,
column_desc->null_flag,
heap
);
break;
case REC_NUM_BIG_SIGNED:
type = new (heap)
SQLBigNum(column_desc->precision,
column_desc->scale,
TRUE, // is a real bignum
TRUE,
column_desc->null_flag,
heap
);
break;
case REC_FLOAT32:
type = new (heap)
SQLReal(column_desc->null_flag, heap, column_desc->precision);
break;
case REC_FLOAT64:
type = new (heap)
SQLDoublePrecision(column_desc->null_flag, heap, column_desc->precision);
break;
case REC_BYTE_F_DOUBLE:
charCount /= SQL_DBCHAR_SIZE; // divide the storage length by 2
type = new (heap)
SQLChar(charCount,
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
FALSE,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT
);
break;
case REC_BYTE_F_ASCII:
if (column_desc->character_set == CharInfo::UTF8 ||
(column_desc->character_set == CharInfo::SJIS &&
column_desc->encoding_charset == CharInfo::SJIS))
{
Lng32 maxBytesPerChar = CharInfo::maxBytesPerChar(column_desc->character_set);
Lng32 sizeInChars = charCount ; // Applies when CharLenUnit == BYTES
if ( column_desc->precision > 0 )
sizeInChars = column_desc->precision;
type = new (heap)
SQLChar(CharLenInfo(sizeInChars, charCount/*in_bytes*/),
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
FALSE, // varLenFlag
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT, // Coercibility
column_desc->encoding_charset
);
}
else // keep the old behavior
type = new (heap)
SQLChar(charCount,
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
FALSE,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT
);
break;
case REC_BYTE_V_DOUBLE:
charCount /= SQL_DBCHAR_SIZE; // divide the storage length by 2
// fall thru
case REC_BYTE_V_ASCII:
if (column_desc->character_set == CharInfo::SJIS ||
column_desc->character_set == CharInfo::UTF8)
{
Lng32 maxBytesPerChar = CharInfo::maxBytesPerChar(column_desc->character_set);
Lng32 sizeInChars = charCount ; // Applies when CharLenUnit == BYTES
if ( column_desc->precision > 0 )
sizeInChars = column_desc->precision;
type = new (heap)
SQLVarChar(CharLenInfo(sizeInChars, charCount/*in_bytes*/),
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT, // Coercibility
column_desc->encoding_charset
);
}
else // keep the old behavior
type = new (heap)
SQLVarChar(charCount,
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT
);
break;
case REC_BYTE_V_ASCII_LONG:
type = new (heap)
SQLLongVarChar(charCount,
FALSE,
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT
);
break;
case REC_DATETIME:
type = DatetimeType::constructSubtype(
column_desc->null_flag,
column_desc->datetimestart,
column_desc->datetimeend,
column_desc->datetimefractprec,
heap
);
CMPASSERT(type);
if (!type->isSupportedType())
{
column_desc->defaultClass = COM_NO_DEFAULT; // can't set a default for these, either.
// 4030 Column is an unsupported combination of datetime fields
if (!errorCode)
{
*CmpCommon::diags() << DgSqlCode(4030)
<< DgColumnName(makeColumnName(table, column_desc))
<< DgInt0(column_desc->datetimestart)
<< DgInt1(column_desc->datetimeend)
<< DgInt2(column_desc->datetimefractprec);
}
else
{
*errorCode = 4030;
}
}
break;
case REC_INTERVAL:
type = new (heap)
SQLInterval(column_desc->null_flag,
column_desc->datetimestart,
column_desc->intervalleadingprec,
column_desc->datetimeend,
column_desc->datetimefractprec,
heap
);
CMPASSERT(type);
if (! ((SQLInterval *)type)->checkValid(CmpCommon::diags()))
return TRUE; // error
if (!type->isSupportedType())
{
column_desc->defaultClass = COM_NO_DEFAULT; // can't set a default for these, either.
if (!errorCode)
*CmpCommon::diags() << DgSqlCode(3044) << DgString0(column_desc->colname);
else
*errorCode = 3044;
}
break;
case REC_BLOB :
type = new (heap)
SQLBlob(column_desc->precision,Lob_Invalid_Storage,
column_desc->null_flag);
break;
case REC_CLOB :
type = new (heap)
SQLClob(column_desc->precision,Lob_Invalid_Storage,
column_desc->null_flag);
break;
case REC_BOOLEAN :
{
type = new (heap) SQLBooleanNative(column_desc->null_flag);
}
break;
default:
{
// 4031 Column %s is an unknown data type, %d.
if (!errorCode)
{
*CmpCommon::diags() << DgSqlCode(-4031)
<< DgColumnName(makeColumnName(table, column_desc))
<< DgInt0(column_desc->datatype);
}
else
{
*errorCode = 4031;
}
return TRUE; // error
}
} // end switch (column_desc->datatype)
CMPASSERT(type);
if (type->getTypeQualifier() == NA_CHARACTER_TYPE) {
CharInfo::Collation co = ((CharType *)type)->getCollation();
// a "mini-cache" to avoid proc call, for perf
static THREAD_P CharInfo::Collation cachedCO = CharInfo::UNKNOWN_COLLATION;
static THREAD_P Int32 cachedFlags = CollationInfo::ALL_NEGATIVE_SYNTAX_FLAGS;
if (cachedCO != co) {
cachedCO = co;
cachedFlags = CharInfo::getCollationFlags(co);
}
if (cachedFlags & CollationInfo::ALL_NEGATIVE_SYNTAX_FLAGS) {
//
//## The NCHAR/COLLATE NSK-Rel1 project is forced to disallow all user-
// defined collations here. What we can't handle is:
// - full support! knowledge of how to really collate!
// - safe predicate-ability of collated columns, namely
// . ORDER/SEQUENCE/SORT BY
// MIN/MAX
// < <= > >=
// These *would* have been disallowed by the
// CollationInfo::ORDERED_CMP_ILLEGAL flag.
// . DISTINCT
// GROUP BY
// = <>
// These *would* have been disallowed by the
// CollationInfo::EQ_NE_CMP_ILLEGAL flag.
// . SELECTing a collated column which is a table or index key
// We *would* have done full table scan only, based on flag
// . INS/UPD/DEL involving a collated column which is a key
// We *would* have had to disallow this, based on flag;
// otherwise we would position in wrong and corrupt either
// our partitioning or b-trees or both.
// See the "MPcollate.doc" document, and
// see sqlcomp/DefaultValidator.cpp ValidateCollationList comments.
//
{
// 4069 Column TBL.COL uses unsupported collation COLLAT.
if (!errorCode)
{
*CmpCommon::diags() << DgSqlCode(-4069)
<< DgColumnName(makeColumnName(table, column_desc));
}
else
{
*errorCode= 4069;
}
return TRUE; // error
}
}
}
return FALSE; // no error
} // createNAType()
// -----------------------------------------------------------------------
// Method for inserting new NAColumn entries in NATable::colArray_,
// one for each column_desc in the list supplied as input.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
NABoolean createNAColumns(desc_struct *column_desc_list /*IN*/,
NATable *table /*IN*/,
NAColumnArray &colArray /*OUT*/,
NAMemory *heap /*IN*/)
{
NAType *type;
ColumnClass colClass;
while (column_desc_list)
{
columns_desc_struct * column_desc = &column_desc_list->body.columns_desc;
NABoolean isMvSystemAdded = FALSE;
NABoolean hasSystemColumnAsUserColumn = FALSE;
if (NAColumn::createNAType(column_desc, table, type, heap))
return TRUE;
// Get the column class. The column will either be a system column or a
// user column.
//
switch (column_desc->colclass)
{
case 'S':
{
if ( (CmpCommon::getDefault(OVERRIDE_SYSKEY)==DF_ON) &&
(table && table->getSpecialType() != ExtendedQualName::VIRTUAL_TABLE) )
{
colClass = USER_COLUMN;
hasSystemColumnAsUserColumn = TRUE;
}
else
colClass = SYSTEM_COLUMN;
}
break;
case 'U':
colClass = USER_COLUMN;
break;
case 'A':
case 'C':
colClass = USER_COLUMN;
break;
case 'M': // MVs --
colClass = USER_COLUMN;
isMvSystemAdded = TRUE;
break;
default:
{
// 4032 column is an unknown class (not sys nor user)
*CmpCommon::diags() << DgSqlCode(-4032)
<< DgColumnName(makeColumnName(table, column_desc))
<< DgInt0(column_desc->colclass);
return TRUE; // error
}
} // end switch (column_desc->colclass)
// Create an NAColumn and insert it into the NAColumn array.
//
NAColumn *newColumn = NULL;
if (column_desc->colname[0] != '\0')
{
// Standard named column from ReadTableDef...
CMPASSERT(column_desc->colnumber >= 0);
char* defaultValue = column_desc->defaultvalue;
char* heading = column_desc->heading;
char* computed_column_text = column_desc->computed_column_text;
NABoolean isSaltColumn = FALSE;
NABoolean isDivisioningColumn = FALSE;
if (column_desc->defaultClass == COM_ALWAYS_COMPUTE_COMPUTED_COLUMN_DEFAULT)
{
if (column_desc->colFlags & SEABASE_COLUMN_IS_SALT)
isSaltColumn = TRUE;
if (column_desc->colFlags & SEABASE_COLUMN_IS_DIVISION)
isDivisioningColumn = TRUE;
if (!computed_column_text)
{
computed_column_text = defaultValue;
defaultValue = NULL;
}
}
if(ActiveSchemaDB()->getNATableDB()->cachingMetaData()){
//make copies of stuff onto the heap passed in
if(defaultValue){
defaultValue = (char*) new (heap) char[strlen(defaultValue)+1];
strcpy(defaultValue, column_desc->defaultvalue);
}
if(heading){
Int32 headingLength = str_len(heading)+1;
heading = new (heap) char [headingLength];
memcpy(heading,column_desc->heading,headingLength);
}
if(computed_column_text){
char * computed_column_text_temp = computed_column_text;
Int32 cctLength = str_len(computed_column_text)+1;
computed_column_text = new (heap) char [cctLength];
memcpy(computed_column_text,computed_column_text_temp,cctLength);
}
}
newColumn = new (heap)
NAColumn(column_desc->colname,
column_desc->colnumber,
type,
heap,
table,
colClass,
column_desc->defaultClass,
defaultValue,
heading,
column_desc->upshift,
((column_desc->colclass == 'A') ||
(column_desc->colclass == 'C')),
COM_UNKNOWN_DIRECTION,
FALSE,
NULL,
column_desc->stored_on_disk,
computed_column_text,
isSaltColumn,
isDivisioningColumn,
(column_desc->colclass == 'C'));
}
else
{
CMPASSERT(0);
}
if (isMvSystemAdded)
newColumn->setMvSystemAddedColumn();
if (table &&
((table->isSeabaseTable()) ||
(table->isHbaseCellTable()) ||
(table->isHbaseRowTable())))
{
if (column_desc->hbaseColFam)
newColumn->setHbaseColFam(column_desc->hbaseColFam);
if (column_desc->hbaseColQual)
newColumn->setHbaseColQual(column_desc->hbaseColQual);
newColumn->setHbaseColFlags(column_desc->hbaseColFlags);
}
if (table != NULL)
{
if (newColumn->isAddedColumn())
table->setHasAddedColumn(TRUE);
if (newColumn->getType()->isVaryingLen())
table->setHasVarcharColumn(TRUE);
if (hasSystemColumnAsUserColumn)
table->setSystemColumnUsedAsUserColumn(TRUE) ;
if (newColumn->getType()->isLob())
table->setHasLobColumn(TRUE);
if (CmpSeabaseDDL::isEncodingNeededForSerialization(newColumn))
table->setHasSerializedEncodedColumn(TRUE);
if (CmpSeabaseDDL::isSerialized(newColumn->getHbaseColFlags()))
table->setHasSerializedColumn(TRUE);
}
colArray.insert(newColumn);
column_desc_list = column_desc_list->header.next;
} // end while
return FALSE; // no error
} // createNAColumns()
#pragma warn(1506) // warning elimination
NAType* getSQColTypeForHive(const char* hiveType, NAMemory* heap)
{
if ( !strcmp(hiveType, "tinyint"))
{
if (CmpCommon::getDefault(TRAF_TINYINT_SUPPORT) == DF_OFF)
return new (heap) SQLSmall(TRUE /* neg */, TRUE /* allow NULL*/, heap);
else
return new (heap) SQLTiny(TRUE /* neg */, TRUE /* allow NULL*/, heap);
}
if ( !strcmp(hiveType, "smallint"))
return new (heap) SQLSmall(TRUE /* neg */, TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "int"))
return new (heap) SQLInt(TRUE /* neg */, TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "bigint"))
return new (heap) SQLLargeInt(TRUE /* neg */, TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "boolean"))
return new (heap) SQLBooleanNative(TRUE, heap);
if ( !strcmp(hiveType, "string"))
{
Int32 len = CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH);
Int32 lenInBytes = CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH_IN_BYTES);
if( lenInBytes != 32000 )
len = lenInBytes;
NAString hiveCharset =
ActiveSchemaDB()->getDefaults().getValue(HIVE_DEFAULT_CHARSET);
hiveCharset.toUpper();
CharInfo::CharSet hiveCharsetEnum = CharInfo::getCharSetEnum(hiveCharset);
Int32 maxNumChars = 0;
Int32 storageLen = len;
SQLVarChar * nat =
new (heap) SQLVarChar(CharLenInfo(maxNumChars, storageLen),
TRUE, // allow NULL
FALSE, // not upshifted
FALSE, // not case-insensitive
CharInfo::getCharSetEnum(hiveCharset),
CharInfo::DefaultCollation,
CharInfo::IMPLICIT);
nat->setWasHiveString(TRUE);
return nat;
}
if ( !strcmp(hiveType, "float"))
return new (heap) SQLReal(TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "double"))
return new (heap) SQLDoublePrecision(TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "timestamp"))
return new (heap) SQLTimestamp(TRUE /* allow NULL */ , 6, heap);
if ( !strcmp(hiveType, "date"))
return new (heap) SQLDate(TRUE /* allow NULL */ , heap);
if ( !strncmp(hiveType, "varchar", 7) )
{
char maxLen[32];
memset(maxLen, 0, 32);
int i=0,j=0;
int copyit = 0;
//get length
for(i = 0; i < strlen(hiveType) ; i++)
{
if(hiveType[i] == '(') //start
{
copyit=1;
continue;
}
else if(hiveType[i] == ')') //stop
break;
if(copyit > 0)
{
maxLen[j] = hiveType[i];
j++;
}
}
Int32 len = atoi(maxLen);
if(len == 0) return NULL; //cannot parse correctly
NAString hiveCharset =
ActiveSchemaDB()->getDefaults().getValue(HIVE_DEFAULT_CHARSET);
hiveCharset.toUpper();
CharInfo::CharSet hiveCharsetEnum = CharInfo::getCharSetEnum(hiveCharset);
Int32 maxNumChars = 0;
Int32 storageLen = len;
if (CharInfo::isVariableWidthMultiByteCharSet(hiveCharsetEnum))
{
// For Hive VARCHARs, the number specified is the max. number of characters,
// while we count in bytes when using HIVE_MAX_STRING_LENGTH for Hive STRING
// columns. Set the max character constraint and also adjust the required storage length.
maxNumChars = len;
storageLen = len * CharInfo::maxBytesPerChar(hiveCharsetEnum);
}
return new (heap) SQLVarChar(CharLenInfo(maxNumChars, storageLen),
TRUE, // allow NULL
FALSE, // not upshifted
FALSE, // not case-insensitive
CharInfo::getCharSetEnum(hiveCharset),
CharInfo::DefaultCollation,
CharInfo::IMPLICIT);
}
return NULL;
}
NABoolean createNAColumns(struct hive_column_desc* hcolumn /*IN*/,
NATable *table /*IN*/,
NAColumnArray &colArray /*OUT*/,
NAMemory *heap /*IN*/)
{
// Assume that hive_struct->conn has the right connection,
// and tblID and sdID has be properly set.
// In the following loop, we need to extract the column information.
while (hcolumn) {
NAType* natype = getSQColTypeForHive(hcolumn->type_, heap);
if ( !natype ) {
*CmpCommon::diags()
<< DgSqlCode(-1204)
<< DgString0(hcolumn->type_);
return TRUE;
}
NAString colName(hcolumn->name_);
colName.toUpper();
NAColumn* newColumn = new (heap)
NAColumn(colName.data(),
hcolumn->intIndex_,
natype,
heap,
table,
USER_COLUMN, // colClass,
COM_NULL_DEFAULT ,//defaultClass,
(char*)"", // defaultValue,
(char*)"", // heading,
FALSE, // column_desc->upshift,
FALSE, // added column
COM_UNKNOWN_DIRECTION,
FALSE, // isOptional
NULL, // routineParamType
TRUE, // column_desc->stored_on_disk,
(char*)"" //computed_column_text
);
if (table != NULL)
{
if (newColumn->isAddedColumn())
table->setHasAddedColumn(TRUE);
if (newColumn->getType()->isVaryingLen())
table->setHasVarcharColumn(TRUE);
}
colArray.insert(newColumn);
hcolumn= hcolumn->next_;
}
return FALSE; // no error
} // createNAColumns()
#pragma warn(1506) // warning elimination
NABoolean createNAKeyColumns(desc_struct *keys_desc_list /*IN*/,
NAColumnArray &colArray /*IN*/,
NAColumnArray &keyColArray /*OUT*/,
CollHeap *heap /*IN*/)
{
const desc_struct *keys_desc = keys_desc_list;
while (keys_desc)
{
Int32 tablecolnumber = keys_desc->body.keys_desc.tablecolnumber;
NAColumn *indexColumn = colArray.getColumn(tablecolnumber);
SortOrdering order = NOT_ORDERED;
keyColArray.insert(indexColumn);
order = keys_desc->body.keys_desc.ordering ? DESCENDING : ASCENDING;
keyColArray.setAscending(keyColArray.entries()-1, order == ASCENDING);
// Remember that this columns is part of the clustering
// key and remember its key ordering (asc or desc)
indexColumn->setClusteringKey(order);
keys_desc = keys_desc->header.next;
} // end while (keys_desc)
return FALSE;
}
// ************************************************************************
// The next two methods are used for code related to indexes hiding.
// In particular, this is related to hiding remote indexes having the same
// name as the local name. Here we mark the remote indexes that have the
// same local name and in addition share the following:
// (1) both share the same index columns
// (2) both have the same partioning keys
//
// The method naStringHashFunc is used by the NAHashDictionary<NAString, Index>
// that maps indexname to the corresponding list of indexes having that name
//
//*************************************************************************
ULng32 naStringHashFunc(const NAString& indexName)
{
ULng32 hash= (ULng32) NAString::hash(indexName);
return hash;
}
//*************************************************************************
// The method processDuplicateNames() is called by createNAFileSet() for
// tables having duplicate remote indexes.
//*************************************************************************
// LCOV_EXCL_START :nsk
void processDuplicateNames(NAHashDictionaryIterator<NAString, Int32> &Iter,
NAFileSetList & indexes,
char *localNodeName)
{
return;
} // processDuplicateNames()
// LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// Method for:
// - inserting new NAFileSet entries in NATable::indexes_
// one for each index in the list supplied as input. It also
// returns a pointer to the NAFileSet for the clustering index
// as well as the primary index on this NATable.
// - inserting new NAFileSet entries in NATable::vertParts_
// one for each vertical partition in the list supplied as input.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
static
NABoolean createNAFileSets(desc_struct * table_desc /*IN*/,
const NATable * table /*IN*/,
const NAColumnArray & colArray /*IN*/,
NAFileSetList & indexes /*OUT*/,
NAFileSetList & vertParts /*OUT*/,
NAFileSet * & clusteringIndex /*OUT*/,
LIST(CollIndex) & tableIdList /*OUT*/,
NAMemory* heap,
BindWA * bindWA,
NAColumnArray &newColumns, /*OUT */
Int32 *maxIndexLevelsPtr = NULL)
{
// ---------------------------------------------------------------------
// Add index/vertical partition (VP) information; loop over all indexes/
// VPs, but start with the clustering key, then process all others.
// The clustering key has a keytag 0.
// ---------------------------------------------------------------------
// this dictionary is used for hiding remote indexes; the remote indexes
// are hidden when the CQD INDEX_ELIMINATION_LEVEL is set to aggressive
NAHashDictionary<NAString, Int32> *indexFilesetMap =
new (heap) NAHashDictionary<NAString, Int32>
(naStringHashFunc, 101, FALSE, CmpCommon::statementHeap());
NAList<NAString *> stringList (CmpCommon::statementHeap());
desc_struct *indexes_desc = table_desc->body.table_desc.indexes_desc;
while (indexes_desc AND indexes_desc->body.indexes_desc.keytag)
indexes_desc = indexes_desc->header.next;
// must have a clustering key if not view
CMPASSERT((indexes_desc AND !indexes_desc->body.indexes_desc.keytag) OR
(table_desc->body.table_desc.views_desc));
NABoolean isTheClusteringKey = TRUE;
NABoolean isVerticalPartition;
NABoolean hasRemotePartition = FALSE;
CollIndex numClusteringKeyColumns = 0;
NABoolean tableAlignedRowFormat = table->isSQLMXAlignedTable();
// get hbase table index level and blocksize. costing code uses index_level
// and block size to estimate cost. Here we make a JNI call to read index level
// and block size. If there is a need to avoid reading from Hbase layer,
// HBASE_INDEX_LEVEL cqd can be used to disable JNI call. User can
// set this CQD to reflect desired index_level for his query.
// Default value of HBASE_BLOCK_SIZE is 64KB, when not reading from Hbase layer.
Int32 hbtIndexLevels = 0;
Int32 hbtBlockSize = 0;
NABoolean res = false;
if (table->isHbaseTable())
{
// get default values of index_level and block size
hbtIndexLevels = (ActiveSchemaDB()->getDefaults()).getAsLong(HBASE_INDEX_LEVEL);
hbtBlockSize = (ActiveSchemaDB()->getDefaults()).getAsLong(HBASE_BLOCK_SIZE);
// call getHbaseTableInfo if index level is set to 0
if (hbtIndexLevels == 0)
res = table->getHbaseTableInfo(hbtIndexLevels, hbtBlockSize);
}
// Set up global cluster information. This global information always
// gets put on the context heap.
//
// Note that this function call will probably not do anything, since
// this cluster information is set up when arkcmp is created; however,
// it's certainly better to have this call here, rather than in a
// doubly-nested loop below where it used to be ...
// $$$ probably not necessary to call this even once ...
setUpClusterInfo(CmpCommon::contextHeap());
NABoolean doHash2 =
(CmpCommon::getDefault(HBASE_HASH2_PARTITIONING) != DF_OFF &&
!(bindWA && bindWA->isTrafLoadPrep()));
// ---------------------------------------------------------------------
// loop over all indexes/VPs defined on the base table
// ---------------------------------------------------------------------
while (indexes_desc)
{
Lng32 numberOfFiles = 1; // always at least 1
NAColumn * indexColumn; // an index/VP key column
NAColumn * newIndexColumn;
NAFileSet * newIndex; // a new file set
//hardcoding statement heap here, previosly the following calls
//used the heap that was passed in (which was always statement heap)
//Now with the introduction of NATable caching we pass in the NATable
//heap and these guys should not be created on the NATable heap, they
//should be created on the statement heap. Only the array objects
//will be on the statement heap whatever is in the arrays i.e. NAColumns
//will still be where ever they were before.
NAColumnArray allColumns(CmpCommon::statementHeap());// all columns that belong to an index
NAColumnArray indexKeyColumns(CmpCommon::statementHeap());// the index key columns
NAColumnArray saveNAColumns(CmpCommon::statementHeap());// save NAColums of secondary index columns
NAColumnArray partitioningKeyColumns(CmpCommon::statementHeap());// the partitioning key columns
PartitioningFunction * partFunc = NULL;
// is this an index or is it really a VP?
isVerticalPartition = indexes_desc->body.indexes_desc.isVerticalPartition;
NABoolean isPacked = indexes_desc->body.indexes_desc.isPacked;
NABoolean indexAlignedRowFormat = (indexes_desc->body.indexes_desc.rowFormat == COM_ALIGNED_FORMAT_TYPE);
NABoolean isNotAvailable =
indexes_desc->body.indexes_desc.notAvailable;
ItemExprList hbaseSaltColumnList(CmpCommon::statementHeap());
Int64 numOfSaltedPartitions = 0;
// ---------------------------------------------------------------------
// loop over the clustering key columns of the index
// ---------------------------------------------------------------------
const desc_struct *keys_desc = indexes_desc->body.indexes_desc.keys_desc;
while (keys_desc)
{
// Add an index/VP key column.
//
// If this is an alternate index or a VP, the keys table actually
// describes all columns of the index or VP. For nonunique
// indexes, all index columns form the key, while for unique
// alternate indexes the last "numClusteringKeyColumns"
// columns are non-key columns, they are just the clustering
// key columns used to find the base table record. This is
// true for both SQL/MP and SQL/MX tables at this time.
// To make these assumptions is not optimal, but the
// desc_structs that are used as input are a historical
// leftover from SQL/MP and therefore aren't set up very
// well to describe index columns and index keys. Some day
// we might consider a direct conversion from the MX catalog
// manager (SOL) objects into NATables and NAFilesets.
//
// NOTE:
// The last "numClusteringKeyColumns" key columns
// of a unique alternate index (which ARE described in the
// keys_desc) get deleted later.
Int32 tablecolnumber = keys_desc->body.keys_desc.tablecolnumber;
indexColumn = colArray.getColumn(tablecolnumber);
if ((table->isHbaseTable()) &&
((indexes_desc->body.indexes_desc.keytag != 0) ||
(indexAlignedRowFormat && indexAlignedRowFormat != tableAlignedRowFormat)))
{
newIndexColumn = new(heap) NAColumn(*indexColumn);
newIndexColumn->setIndexColName(keys_desc->body.keys_desc.keyname);
newIndexColumn->setHbaseColFam(keys_desc->body.keys_desc.hbaseColFam);
newIndexColumn->setHbaseColQual(keys_desc->body.keys_desc.hbaseColQual);
newIndexColumn->resetSerialization();
saveNAColumns.insert(indexColumn);
newColumns.insert(newIndexColumn);
indexColumn = newIndexColumn;
}
SortOrdering order = NOT_ORDERED;
// as mentioned above, for all alternate indexes we
// assume at first that all columns are key columns
// and we make adjustments later
indexKeyColumns.insert(indexColumn);
order = keys_desc->body.keys_desc.ordering ?
DESCENDING : ASCENDING;
indexKeyColumns.setAscending(indexKeyColumns.entries() - 1,
order == ASCENDING);
if ( table->isHbaseTable() &&
indexColumn->isSaltColumn() )
{
// examples of the saltClause string:
// 1. HASH2PARTFUNC(CAST(L_ORDERKEY AS INT NOT NULL) FOR 4)
// 2. HASH2PARTFUNC(CAST(A AS INT NOT NULL),CAST(B AS INT NOT NULL) FOR 4)
const char* saltClause = indexColumn->getComputedColumnExprString();
Parser parser(CmpCommon::context());
ItemExpr* saltExpr = parser.getItemExprTree(saltClause,
strlen(saltClause),
CharInfo::ISO88591);
CMPASSERT(saltExpr &&
saltExpr->getOperatorType() == ITM_HASH2_DISTRIB);
// get the # of salted partitions from saltClause
ItemExprList csList(CmpCommon::statementHeap());
saltExpr->findAll(ITM_CONSTANT, csList, FALSE, FALSE);
// get #salted partitions from last ConstValue in the list
if ( csList.entries() > 0 ) {
ConstValue* ct = (ConstValue*)csList[csList.entries()-1];
if ( ct->canGetExactNumericValue() ) {
numOfSaltedPartitions = ct->getExactNumericValue();
}
}
// collect all ColReference objects into hbaseSaltColumnList.
saltExpr->findAll(ITM_REFERENCE, hbaseSaltColumnList, FALSE, FALSE);
}
if (isTheClusteringKey)
{
// Since many columns of the base table may not be in the
// clustering key, we'll delay setting up the list of all
// columns in the index until later, so we can then just
// add them all at once.
// Remember that this columns is part of the clustering
// key and remember its key ordering (asc or desc)
indexColumn->setClusteringKey(order);
numClusteringKeyColumns++;
}
else
{
// Since all columns in the index are guaranteed to be in
// the key, we can set up the list of all columns in the index
// now just by adding every key column.
allColumns.insert(indexColumn);
}
keys_desc = keys_desc->header.next;
} // end while (keys_desc)
// ---------------------------------------------------------------------
// Loop over the non key columns of the index/vertical partition.
// These columns get added to the list of all the columns for the index/
// VP. Their length also contributes to the total record length.
// ---------------------------------------------------------------------
const desc_struct *non_keys_desc =
indexes_desc->body.indexes_desc.non_keys_desc;
while (non_keys_desc)
{
Int32 tablecolnumber = non_keys_desc->body.keys_desc.tablecolnumber;
indexColumn = colArray.getColumn(tablecolnumber);
if ((table->isHbaseTable()) &&
((indexes_desc->body.indexes_desc.keytag != 0) ||
(indexAlignedRowFormat && indexAlignedRowFormat != tableAlignedRowFormat)))
{
newIndexColumn = new(heap) NAColumn(*indexColumn);
if (non_keys_desc->body.keys_desc.keyname)
newIndexColumn->setIndexColName(non_keys_desc->body.keys_desc.keyname);
newIndexColumn->setHbaseColFam(non_keys_desc->body.keys_desc.hbaseColFam);
newIndexColumn->setHbaseColQual(non_keys_desc->body.keys_desc.hbaseColQual);
newIndexColumn->resetSerialization();
indexColumn = newIndexColumn;
newColumns.insert(newIndexColumn);
}
allColumns.insert(indexColumn);
non_keys_desc = non_keys_desc->header.next;
} // end while (non_keys_desc)
desc_struct *files_desc;
NABoolean isSystemTable;
if (isTheClusteringKey)
{
// We haven't set up the list of all columns in the clustering
// index yet, so do that now. Do this by adding all
// the base table columns to the columns of the clustering index.
// Don't add a column, of course, if somehow it has already
// been added.
for (CollIndex bcolNo = 0; bcolNo < colArray.entries(); bcolNo++)
{
NAColumn *baseCol = colArray[bcolNo];
if (NOT allColumns.contains(baseCol))
{
// add this base column
allColumns.insert(baseCol);
}
} // end for
files_desc = table_desc->body.table_desc.files_desc;
isSystemTable = table_desc->body.table_desc.issystemtablecode;
// Record length of clustering key is the same as that of the base table record
indexes_desc->body.indexes_desc.record_length = table_desc->body.table_desc.record_length;
} // endif (isTheClusteringKey)
else
{
if (indexes_desc->body.indexes_desc.unique)
{
// As mentioned above, if this is a unique index,
// the last numClusteringKeyColumns are actually not
// part of the KEY of the index, they are just part of
// the index record. Since there are keys_desc entries
// for these columns, remove the correspoinding entries
// from indexKeyColumns
// $$$$ Commenting this out, since Catman and DP2 handle index
// keys differently: they always assume that all index columns
// are part of the key. Somehow DP2 is told which prefix length
// of the key is actually the unique part.
// $$$$ This could be enabled when key lengths and record lengths
// are different.
// for (CollIndex i = 0; i < numClusteringKeyColumns; i++)
// indexKeyColumns.removeAt(indexKeyColumns.entries() - 1);
}
files_desc = indexes_desc->body.indexes_desc.files_desc;
isSystemTable = indexes_desc->body.indexes_desc.issystemtablecode;
} // endif (NOT isTheClusteringKey)
// -------------------------------------------------------------------
// Build the partition attributes for this table.
//
// Usually the partitioning key columns are the same as the
// clustering key columns. If no partitioning key columns have
// been specified then the partitioning key columns will be assumed
// to be the same as the clustering key columns. Otherwise, they
// could be the same but may not necessarily be the same.
//
// We will ASSUME here that NonStop SQL/MP or the simulator will not
// put anything into partitioning keys desc and only SQL/MX will. So
// we don't have to deal with keytag columns here.
// -------------------------------------------------------------------
const desc_struct *partitioning_keys_desc =
indexes_desc->body.indexes_desc.partitioning_keys_desc;
// the key columns that build the salt column for HBase table
NAColumnArray hbaseSaltOnColumns(CmpCommon::statementHeap());
if (partitioning_keys_desc)
{
keys_desc = partitioning_keys_desc;
while (keys_desc)
{
Int32 tablecolnumber = keys_desc
->body.keys_desc.tablecolnumber;
indexColumn = colArray.getColumn(tablecolnumber);
partitioningKeyColumns.insert(indexColumn);
SortOrdering order = keys_desc
->body.keys_desc.ordering ?
DESCENDING : ASCENDING;
partitioningKeyColumns.setAscending
(partitioningKeyColumns.entries() - 1,
order == ASCENDING);
keys_desc = keys_desc->header.next;
} // end while (keys_desc)
}
else {
partitioningKeyColumns = indexKeyColumns;
// compute the partition key columns for HASH2 partitioning scheme
// for a salted HBase table. Later on, we will replace
// partitioningKeyColumns with the column list computed here if
// the desired partitioning schema is HASH2.
for (CollIndex i=0; i<hbaseSaltColumnList.entries(); i++ )
{
ColReference* cRef = (ColReference*)hbaseSaltColumnList[i];
const NAString& colName = (cRef->getColRefNameObj()).getColName();
NAColumn *col = allColumns.getColumn(colName.data()) ;
hbaseSaltOnColumns.insert(col);
}
}
// Create DP2 node map for partitioning function.
NodeMap* nodeMap = NULL;
//increment for each table/index to create unique identifier
cmpCurrentContext->incrementTableIdent();
// NB: Just in case, we made a call to setupClusterInfo at the
// beginning of this function.
desc_struct * partns_desc;
Int32 indexLevels = 1;
Int32 blockSize = indexes_desc->body.indexes_desc.blocksize;
if (files_desc)
{
if( (table->getSpecialType() != ExtendedQualName::VIRTUAL_TABLE AND
/*
table->getSpecialType() != ExtendedQualName::ISP_TABLE AND
*/
(NOT table->isHbaseTable()))
OR files_desc->body.files_desc.partns_desc )
{
nodeMap = new (heap) NodeMap(heap);
createNodeMap(files_desc->body.files_desc.partns_desc,
nodeMap,
heap,
table_desc->body.table_desc.tablename,
cmpCurrentContext->getTableIdent());
tableIdList.insert(CollIndex(cmpCurrentContext->getTableIdent()));
}
// Check whether the index has any remote partitions.
if (checkRemote(files_desc->body.files_desc.partns_desc,
indexes_desc->body.indexes_desc.indexname))
hasRemotePartition = TRUE;
else
hasRemotePartition = FALSE;
// Sol: 10-030703-7600. Earlier we assumed that the table is
// partitioned same as the indexes, hence we used table partitioning
// to create partitionining function. But this is not true. Hence
// we now use the indexes partitioning function
switch (indexes_desc->body.indexes_desc.partitioningScheme)
{
case COM_ROUND_ROBIN_PARTITIONING :
// Round Robin partitioned table
partFunc = createRoundRobinPartitioningFunction(
files_desc->body.files_desc.partns_desc,
nodeMap,
heap);
break;
case COM_HASH_V1_PARTITIONING :
// Hash partitioned table
partFunc = createHashDistPartitioningFunction(
files_desc->body.files_desc.partns_desc,
partitioningKeyColumns,
nodeMap,
heap);
break;
case COM_HASH_V2_PARTITIONING :
// Hash partitioned table
partFunc = createHash2PartitioningFunction(
files_desc->body.files_desc.partns_desc,
partitioningKeyColumns,
nodeMap,
heap);
partitioningKeyColumns = hbaseSaltOnColumns;
break;
case COM_UNSPECIFIED_PARTITIONING :
case COM_NO_PARTITIONING :
case COM_RANGE_PARTITIONING :
case COM_SYSTEM_PARTITIONING :
{
// If this is an MP Table, parse the first key
// values as MP Stored Text.
//
desc_struct* hbd =
((table_desc_struct*)table_desc)->hbase_regionkey_desc;
// splits will be 1 for single partitioned table.
Int32 splits = findDescEntries(hbd);
// Do Hash2 only if the table is salted orignally
// and the current number of HBase regions is greater than 1.
if ( doHash2 )
doHash2 = (numOfSaltedPartitions > 0 && splits > 1);
if ( hbd )
if ( doHash2 ) {
partFunc = createHash2PartitioningFunctionForHBase(
((table_desc_struct*)table_desc)->hbase_regionkey_desc,
table,
numOfSaltedPartitions,
heap);
partitioningKeyColumns = hbaseSaltOnColumns;
}
else
partFunc = createRangePartitioningFunctionForHBase(
((table_desc_struct*)table_desc)->hbase_regionkey_desc,
table,
partitioningKeyColumns,
heap);
else {
// no region descriptor, range partitioned or single partition table
partFunc = createRangePartitioningFunction(
files_desc->body.files_desc.partns_desc,
partitioningKeyColumns,
nodeMap,
heap);
}
break;
}
case COM_UNKNOWN_PARTITIONING:
{
*CmpCommon::diags() << DgSqlCode(-4222)
<< DgString0("Unsupported partitioning");
return TRUE;
}
default:
CMPASSERT_STRING(FALSE, "Unhandled switch statement");
}
// Check to see if the partitioning function was created
// successfully. An error could occur if one of the
// partitioning keys is an unsupported type or if the table is
// an MP Table and the first key values contain MP syntax that
// is not supported by MX. The unsupported types are the
// FRACTION only SQL/MP Datetime types. An example of a
// syntax error is a Datetime literal which does not have the
// max number of digits in each field. (e.g. DATETIME
// '1999-2-4' YEAR TO DAY)
//
if (partFunc == NULL) {
return TRUE;
}
// currently we save the indexLevels in the fileset. Since there
// is a indexLevel for each file that belongs to the fileset,
// we get the biggest of this indexLevels and save in the fileset.
partns_desc = files_desc->body.files_desc.partns_desc;
if(partns_desc)
{
while (partns_desc)
{
if ( indexLevels < partns_desc->body.partns_desc.indexlevel)
indexLevels = partns_desc->body.partns_desc.indexlevel;
partns_desc = partns_desc->header.next;
}
}
}
// add a new access path
//
// $$$ The estimated number of records should be available from
// $$$ a FILES descriptor. If it is not available, it may have
// $$$ to be computed by examining the EOFs of each individual
// $$$ file that belongs to the file set.
// Create fully qualified ANSI name from indexname, the PHYSICAL name.
// If this descriptor was created for a sql/mp table, then the
// indexname is a fully qualified NSK name (\sys.$vol.subvol.name).
QualifiedName qualIndexName(indexes_desc->body.indexes_desc.indexname,
1, heap, bindWA);
// This ext_indexname is expected to be set up correctly as an
// EXTERNAL-format name (i.e., dquoted if any delimited identifiers)
// by sqlcat/read*.cpp. The ...AsAnsiString() is just-in-case (MP?).
NAString extIndexName(
indexes_desc->body.indexes_desc.ext_indexname ?
(NAString)indexes_desc->body.indexes_desc.ext_indexname :
qualIndexName.getQualifiedNameAsAnsiString(),
CmpCommon::statementHeap());
QualifiedName qualExtIndexName;
//if (indexes_desc->body.indexes_desc.isVolatile)
if (table->getSpecialType() != ExtendedQualName::VIRTUAL_TABLE)
qualExtIndexName = QualifiedName(extIndexName, 1, heap, bindWA);
else
qualExtIndexName = qualIndexName;
// for volatile tables, set the object part as the external name.
// cat/sch parts are internal and should not be shown.
if (indexes_desc->body.indexes_desc.isVolatile)
{
ComObjectName con(extIndexName);
extIndexName = con.getObjectNamePartAsAnsiString();
}
if (partFunc)
numberOfFiles = partFunc->getCountOfPartitions();
CMPASSERT(indexes_desc->body.indexes_desc.blocksize > 0);
NAList<HbaseCreateOption*>* hbaseCreateOptions = NULL;
if ((indexes_desc->body.indexes_desc.hbaseCreateOptions) &&
(CmpSeabaseDDL::genHbaseCreateOptions
(indexes_desc->body.indexes_desc.hbaseCreateOptions,
hbaseCreateOptions,
heap,
NULL,
NULL)))
return TRUE;
if (table->isHbaseTable())
{
indexLevels = hbtIndexLevels;
blockSize = hbtBlockSize;
}
newIndex = new (heap)
NAFileSet(
qualIndexName, // QN containing "\NSK.$VOL", FUNNYSV, FUNNYNM
//(indexes_desc->body.indexes_desc.isVolatile ?
qualExtIndexName, // :
//qualIndexName),
extIndexName, // string containing Ansi name CAT.SCH."indx"
files_desc ? files_desc->body.files_desc.fileorganization
: KEY_SEQUENCED_FILE,
isSystemTable,
numberOfFiles,
MAXOF(table_desc->body.table_desc.rowcount,0),
indexes_desc->body.indexes_desc.record_length,
blockSize,
indexLevels,
allColumns,
indexKeyColumns,
partitioningKeyColumns,
partFunc,
indexes_desc->body.indexes_desc.keytag,
uint32ArrayToInt64(
indexes_desc->body.indexes_desc.redeftime),
files_desc ? files_desc->body.files_desc.audit : 0,
files_desc ? files_desc->body.files_desc.auditcompress : 0,
files_desc ? files_desc->body.files_desc.compressed : 0,
files_desc ? (ComCompressionType)files_desc->body.files_desc.dcompressed : COM_NO_COMPRESSION,
files_desc ? files_desc->body.files_desc.icompressed : 0,
files_desc ? files_desc->body.files_desc.buffered: 0,
files_desc ? files_desc->body.files_desc.clearOnPurge: 0,
isPacked,
hasRemotePartition,
((indexes_desc->body.indexes_desc.keytag != 0) &&
(indexes_desc->body.indexes_desc.unique != 0)),
files_desc ? files_desc->body.files_desc.decoupledPartitionKeyList: 0,
files_desc ? files_desc->body.files_desc.fileCode : 0,
(indexes_desc->body.indexes_desc.isVolatile != 0),
(indexes_desc->body.indexes_desc.isInMemoryObjectDefn != 0),
indexes_desc->body.indexes_desc.indexUID,
indexes_desc->body.indexes_desc.keys_desc,
NULL, // no Hive stats
indexes_desc->body.indexes_desc.numSaltPartns,
hbaseCreateOptions,
heap);
if (isNotAvailable)
newIndex->setNotAvailable(TRUE);
newIndex->setRowFormat(indexes_desc->body.indexes_desc.rowFormat);
// Mark each NAColumn in the list
indexKeyColumns.setIndexKey();
if ((table->isHbaseTable()) && (indexes_desc->body.indexes_desc.keytag != 0))
saveNAColumns.setIndexKey();
if (indexes_desc->body.indexes_desc.isCreatedExplicitly)
newIndex->setIsCreatedExplicitly(TRUE);
//if index is unique and is on one column, then mark column as unique
if ((indexes_desc->body.indexes_desc.unique) &&
(indexKeyColumns.entries() == 1))
indexKeyColumns[0]->setIsUnique();
partitioningKeyColumns.setPartitioningKey();
// If it is a VP add it to the list of VPs.
// Otherwise, add it to the list of indices.
if (isVerticalPartition)
vertParts.insert(newIndex); // >>>> RETURN VALUE
else
{
indexes.insert(newIndex);
}
//
// advance to the next index
//
if (isTheClusteringKey)
{
clusteringIndex = newIndex; // >>>> RETURN VALUE
// switch to the alternate indexes by starting over again
isTheClusteringKey = FALSE;
indexes_desc = table_desc->body.table_desc.indexes_desc;
}
else
{
// simply advance to the next in the list
indexes_desc = indexes_desc->header.next;
}
// skip the clustering index, if we encounter it again
if (indexes_desc AND !indexes_desc->body.indexes_desc.keytag)
indexes_desc = indexes_desc->header.next;
} // end while (indexes_desc)
// logic related to indexes hiding
return FALSE;
} // static createNAFileSets()
#pragma warn(1506) // warning elimination
// for Hive tables
NABoolean createNAFileSets(hive_tbl_desc* hvt_desc /*IN*/,
const NATable * table /*IN*/,
const NAColumnArray & colArray /*IN*/,
NAFileSetList & indexes /*OUT*/,
NAFileSetList & vertParts /*OUT*/,
NAFileSet * & clusteringIndex /*OUT*/,
LIST(CollIndex) & tableIdList /*OUT*/,
NAMemory* heap,
BindWA * bindWA,
Int32 *maxIndexLevelsPtr = NULL)
{
NABoolean isTheClusteringKey = TRUE;
NABoolean isVerticalPartition;
NABoolean hasRemotePartition = FALSE;
CollIndex numClusteringKeyColumns = 0;
// Set up global cluster information. This global information always
// gets put on the context heap.
//
// Note that this function call will probably not do anything, since
// this cluster information is set up when arkcmp is created; however,
// it's certainly better to have this call here, rather than in a
// doubly-nested loop below where it used to be ...
// $$$ probably not necessary to call this even once ...
setUpClusterInfo(CmpCommon::contextHeap());
// only one set of key columns to handle for hive
Lng32 numberOfFiles = 1; // always at least 1
// NAColumn * indexColumn; // an index/VP key column
NAFileSet * newIndex; // a new file set
// all columns that belong to an index
NAColumnArray allColumns(CmpCommon::statementHeap());
// the index key columns - the SORT columns
NAColumnArray indexKeyColumns(CmpCommon::statementHeap());
// the partitioning key columns - the BUCKETING columns
NAColumnArray partitioningKeyColumns(CmpCommon::statementHeap());
PartitioningFunction * partFunc = NULL;
// is this an index or is it really a VP?
isVerticalPartition = FALSE;
NABoolean isPacked = FALSE;
NABoolean isNotAvailable = FALSE;
// ---------------------------------------------------------------------
// loop over the clustering key columns of the index
// ---------------------------------------------------------------------
const hive_bkey_desc *hbk_desc = hvt_desc->getBucketingKeys();
Int32 numBucketingColumns = 0;
while (hbk_desc)
{
NAString colName(hbk_desc->name_);
colName.toUpper();
NAColumn* bucketingColumn = colArray.getColumn(colName);
if ( bucketingColumn ) {
partitioningKeyColumns.insert(bucketingColumn);
numBucketingColumns++;
}
hbk_desc = hbk_desc->next_;
} // end while (hvk_desc)
const hive_skey_desc *hsk_desc = hvt_desc->getSortKeys();
if ( hsk_desc == NULL ) {
// assume all columns are index key columns
for (CollIndex i=0; i<colArray.entries(); i++ )
indexKeyColumns.insert(colArray[i]);
} else {
while (hsk_desc)
{
NAString colName(hsk_desc->name_);
colName.toUpper();
NAColumn* sortKeyColumn = colArray.getColumn(colName);
if ( sortKeyColumn ) {
indexKeyColumns.insert(sortKeyColumn);
indexKeyColumns.setAscending(indexKeyColumns.entries() - 1,
hsk_desc->orderInt_);
}
hsk_desc = hsk_desc->next_;
} // end while (hsk_desc)
}
// ---------------------------------------------------------------------
// Loop over the non key columns.
// ---------------------------------------------------------------------
for (CollIndex i=0; i<colArray.entries(); i++)
{
allColumns.insert(colArray[i]);
}
//increment for each table/index to create unique identifier
cmpCurrentContext->incrementTableIdent();
// collect file stats from HDFS for the table
const hive_sd_desc *sd_desc = hvt_desc->getSDs();
HHDFSTableStats * hiveHDFSTableStats = new(heap) HHDFSTableStats(heap);
hiveHDFSTableStats->
setPortOverride(CmpCommon::getDefaultLong(HIVE_LIB_HDFS_PORT_OVERRIDE));
// create file-level statistics and estimate total row count and record length
hiveHDFSTableStats->populate(hvt_desc);
if (hiveHDFSTableStats->hasError())
{
*CmpCommon::diags() << DgSqlCode(-1200)
<< DgString0(hiveHDFSTableStats->getDiags().getErrMsg())
<< DgTableName(table->getTableName().getQualifiedNameAsAnsiString());
return TRUE;
}
if ((hiveHDFSTableStats->isOrcFile()) &&
(CmpCommon::getDefault(TRAF_ENABLE_ORC_FORMAT) == DF_OFF))
{
*CmpCommon::diags() << DgSqlCode(-3069)
<< DgTableName(table->getTableName().getQualifiedNameAsAnsiString());
return TRUE;
}
#ifndef NDEBUG
NAString logFile =
ActiveSchemaDB()->getDefaults().getValue(HIVE_HDFS_STATS_LOG_FILE);
if (logFile.length())
{
FILE *ofd = fopen(logFile, "a");
if (ofd)
{
hiveHDFSTableStats->print(ofd);
fclose(ofd);
}
}
// for release code, would need to sandbox the ability to write
// files, e.g. to a fixed log directory
#endif
// Create a node map for partitioning function.
NodeMap* nodeMap = new (heap) NodeMap(heap, NodeMap::HIVE);
createNodeMap(hvt_desc,
nodeMap,
heap,
(char*)(table->getTableName().getObjectName().data()),
cmpCurrentContext->getTableIdent());
tableIdList.insert(CollIndex(cmpCurrentContext->getTableIdent()));
// For the time being, set it up as Hash2 partitioned table
Int32 numBuckets = hvt_desc->getSDs()->buckets_;
if (numBuckets>1 && partitioningKeyColumns.entries()>0) {
if ( CmpCommon::getDefault(HIVE_USE_HASH2_AS_PARTFUNCION) == DF_ON )
partFunc = createHash2PartitioningFunction
(numBuckets, partitioningKeyColumns, nodeMap, heap);
else
partFunc = createHivePartitioningFunction
(numBuckets, partitioningKeyColumns, nodeMap, heap);
} else
partFunc = new (heap)
SinglePartitionPartitioningFunction(nodeMap, heap);
// NB: Just in case, we made a call to setupClusterInfo at the
// beginning of this function.
// desc_struct * partns_desc;
Int32 indexLevels = 1;
// add a new access path
//
// $$$ The estimated number of records should be available from
// $$$ a FILES descriptor. If it is not available, it may have
// $$$ to be computed by examining the EOFs of each individual
// $$$ file that belongs to the file set.
// Create fully qualified ANSI name from indexname, the PHYSICAL name.
// If this descriptor was created for a sql/mp table, then the
// indexname is a fully qualified NSK name (\sys.$vol.subvol.name).
QualifiedName qualIndexName(
(char*)(table->getTableName().getObjectName().data()),
"HIVE", "", heap);
// This ext_indexname is expected to be set up correctly as an
// EXTERNAL-format name (i.e., dquoted if any delimited identifiers)
// by sqlcat/read*.cpp. The ...AsAnsiString() is just-in-case (MP?).
NAString extIndexName(
qualIndexName.getQualifiedNameAsAnsiString(),
CmpCommon::statementHeap());
QualifiedName qualExtIndexName = QualifiedName(extIndexName, 1, heap, bindWA);
if (partFunc)
numberOfFiles = partFunc->getCountOfPartitions();
Int64 estimatedRC = 0;
Int64 estimatedRecordLength = 0;
if ( !sd_desc->isTrulyText() ) {
//
// Poor man's estimation by assuming the record length in hive is the
// same as SQ's. We can do better once we know how the binary data is
// stored in hdfs.
//
estimatedRecordLength = colArray.getTotalStorageSize();
estimatedRC = hiveHDFSTableStats->getTotalSize() / estimatedRecordLength;
} else {
// use the information estimated during populate() call
estimatedRC = hiveHDFSTableStats->getEstimatedRowCount();
estimatedRecordLength =
Lng32(MINOF(hiveHDFSTableStats->getEstimatedRecordLength(),
hiveHDFSTableStats->getEstimatedBlockSize()-100));
}
((NATable*)table)-> setOriginalRowCount((double)estimatedRC);
newIndex = new (heap)
NAFileSet(
qualIndexName, // QN containing "\NSK.$VOL", FUNNYSV, FUNNYNM
//(indexes_desc->body.indexes_desc.isVolatile ?
qualExtIndexName, // :
//qualIndexName),
extIndexName, // string containing Ansi name CAT.SCH."indx"
// The real orginization is a hybrid of KEY_SEQ and HASH.
// Well, we just take the KEY_SEQ for now.
KEY_SEQUENCED_FILE,
FALSE, // isSystemTable
numberOfFiles,
// HIVE-TBD
Cardinality(estimatedRC),
Lng32(estimatedRecordLength),
//hvt_desc->getBlockSize(),
(Lng32)hiveHDFSTableStats->getEstimatedBlockSize(),
indexLevels, // HIVE-TBD
allColumns,
indexKeyColumns,
partitioningKeyColumns,
partFunc,
0, // indexes_desc->body.indexes_desc.keytag,
hvt_desc->redeftime(),
1, // files_desc->body.files_desc.audit
0, // files_desc->body.files_desc.auditcompress
0, // files_desc->body.files_desc.compressed
COM_NO_COMPRESSION,
0, // files_desc->body.files_desc.icompressed
0, // files_desc->body.files_desc.buffered:
0, // files_desc->body.files_desc.clearOnPurge: 0,
isPacked,
hasRemotePartition,
0, // not a unique secondary index
0, // isDecoupledRangePartitioned
0, // file code
0, // not a volatile
0, // inMemObjectDefn
0,
NULL, // indexes_desc->body.indexes_desc.keys_desc,
hiveHDFSTableStats,
0, // saltPartns
NULL, //hbaseCreateOptions
heap);
if (isNotAvailable)
newIndex->setNotAvailable(TRUE);
// Mark each NAColumn in the list
indexKeyColumns.setIndexKey();
partitioningKeyColumns.setPartitioningKey();
// If it is a VP add it to the list of VPs.
// Otherwise, add it to the list of indices.
indexes.insert(newIndex);
clusteringIndex = newIndex;
return FALSE;
} // static createNAFileSets()
#pragma warn(1506) // warning elimination
// -----------------------------------------------------------------------
// Mark columns named in PRIMARY KEY constraint (these will be different
// from clustering key columns when the PK is droppable), for Binder error 4033.
// -----------------------------------------------------------------------
static void markPKCols(const constrnts_desc_struct * constrnt /*IN*/,
const NAColumnArray& columnArray /*IN*/)
{
desc_struct *keycols_desc = constrnt->constr_key_cols_desc;
while (keycols_desc)
{
constrnt_key_cols_desc_struct *key =
&keycols_desc->body.constrnt_key_cols_desc;
// Lookup by name (not position: key->position is pos *within the PK*)
NAColumn *nacol = columnArray.getColumn(key->colname);
if(nacol != NULL)
nacol->setPrimaryKey();
keycols_desc = keycols_desc->header.next;
}
} // static markPKCols
// -----------------------------------------------------------------------
// Insert MP CHECK CONSTRAINT text into NATable::checkConstraints_.
// -----------------------------------------------------------------------
static NABoolean
createConstraintInfo(const desc_struct * table_desc /*IN*/,
const QualifiedName& tableQualName /*IN*/,
const NAColumnArray& columnArray /*IN*/,
CheckConstraintList& checkConstraints /*OUT*/,
AbstractRIConstraintList& uniqueConstraints,
AbstractRIConstraintList& refConstraints,
NAMemory* heap,
BindWA *bindWA)
{
desc_struct *constrnts_desc = table_desc->body.table_desc.constrnts_desc;
while (constrnts_desc)
{
constrnts_desc_struct *constrntHdr = &constrnts_desc->body.constrnts_desc;
Int32 minNameParts=3;
QualifiedName constrntName(constrntHdr->constrntname, minNameParts, (NAMemory*)0, bindWA);
if (constrntName.numberExpanded() == 0) {
// There was an error parsing the name of the constraint (see
// QualifiedName ctor). Return TRUE indicating an error.
//
return TRUE;
}
switch (constrntHdr->type)
{
case PRIMARY_KEY_CONSTRAINT:
markPKCols(constrntHdr, columnArray);
case UNIQUE_CONSTRAINT: {
UniqueConstraint *uniqueConstraint = new (heap)
UniqueConstraint(constrntName, tableQualName, heap,
(constrntHdr->type == PRIMARY_KEY_CONSTRAINT));
uniqueConstraint->setKeyColumns(constrntHdr, heap);
uniqueConstraint->setRefConstraintsReferencingMe(constrntHdr, heap, bindWA);
uniqueConstraints.insert(uniqueConstraint);
}
break;
case REF_CONSTRAINT:
{
char *refConstrntName = constrntHdr->referenced_constrnts_desc->
body.ref_constrnts_desc.constrntname;
char *refTableName = constrntHdr->referenced_constrnts_desc->
body.ref_constrnts_desc.tablename;
QualifiedName refConstrnt(refConstrntName, 3, (NAMemory*)0, bindWA);
QualifiedName refTable(refTableName, 3, (NAMemory*)0, bindWA);
RefConstraint *refConstraint = new (heap)
RefConstraint(constrntName, tableQualName,
refConstrnt, refTable, heap);
refConstraint->setKeyColumns(constrntHdr, heap);
refConstraint->setIsEnforced((constrntHdr->isEnforced == 1));
refConstraints.insert(refConstraint);
}
break;
case CHECK_CONSTRAINT:
case MP_CHECK_CONSTRAINT:
{
char *constrntText = constrntHdr->check_constrnts_desc->
body.check_constrnts_desc.constrnt_text;
checkConstraints.insert(new (heap)
CheckConstraint(constrntName, constrntText, heap));
}
break;
default:
CMPASSERT(FALSE);
}
constrnts_desc = constrnts_desc->header.next;
}
// return FALSE, indicating no error.
//
return FALSE;
} // static createConstraintInfo()
ULng32 hashColPosList(const CollIndexSet &colSet)
{
return colSet.hash();
}
// ----------------------------------------------------------------------------
// method: lookupObjectUidByName
//
// Calls DDL manager to get the object UID for the specified object
//
// params:
// qualName - name of object to lookup
// objectType - type of object
// reportError - whether to set diags area when not found
//
// returns:
// -1 -> error found trying to read metadata including object not found
// UID of found object
//
// the diags area contains details of any error detected
//
// ----------------------------------------------------------------------------
static Int64 lookupObjectUidByName( const QualifiedName& qualName
, ComObjectType objectType
, NABoolean reportError
)
{
ExeCliInterface cliInterface(STMTHEAP);
Int64 objectUID = 0;
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return -1;
}
objectUID = cmpSBD.getObjectUID(&cliInterface,
qualName.getCatalogName().data(),
qualName.getSchemaName().data(),
qualName.getObjectName().data(),
comObjectTypeLit(objectType),
NULL,
NULL,
FALSE,
reportError);
cmpSBD.switchBackCompiler();
return objectUID;
}
NABoolean NATable::fetchObjectUIDForNativeTable(const CorrName& corrName)
{
NAString adjustedName = ComConvertNativeNameToTrafName
(corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getUnqualifiedSchemaNameAsAnsiString(),
corrName.getQualifiedNameObj().getUnqualifiedObjectNameAsAnsiString());
QualifiedName extObjName (adjustedName, 3, STMTHEAP);
objectUID_ = lookupObjectUidByName(extObjName, COM_BASE_TABLE_OBJECT, FALSE);
// If the objectUID is not found, then the table is not externally defined
// in Trafodion, set the objectUID to 0
// If an unexpected error occurs, then return with the error
if (objectUID_ <= 0)
{
if (CmpCommon::diags()->mainSQLCODE() < 0)
return FALSE;
else
objectUID_ = 0;
}
return TRUE;
}
// -----------------------------------------------------------------------
// NATable::NATable() constructor
// -----------------------------------------------------------------------
const Lng32 initHeapSize = 32 * 1024; // ## 32K: tune this someday!
#pragma nowarn(770) // warning elimination
NATable::NATable(BindWA *bindWA,
const CorrName& corrName,
NAMemory *heap,
desc_struct* inTableDesc)
//
// The NATable heap ( i.e. heap_ ) used to come from ContextHeap
// (i.e. heap) but it creates high memory usage/leakage in Context
// Heap. Although the NATables are deleted at the end of each statement,
// the heap_ is returned to heap (i.e. context heap) which caused
// context heap containing a lot of not used chunk of memory. So it is
// changed to be from whatever heap is passed in at the call in
// NATableDB.getNATable.
//
// Now NATable objects can be cached.If an object is to be cached (persisted
// across statements) a NATable heap is allocated for the object
// and is passed in (this is done in NATableDB::get(CorrName& corrName...).
// Otherwise a reference to the Statement heap is passed in. When a cached
// object is to be deleted the object's heap is deleted which wipes out the
// NATable object all its related stuff. NATable objects that are not cached
// are wiped out at the end of the statement when the statement heap is deleted.
//
: heap_(heap),
referenceCount_(0),
refsIncompatibleDP2Halloween_(FALSE),
isHalloweenTable_(FALSE),
qualifiedName_(corrName.getExtendedQualNameObj(),heap),
synonymReferenceName_(heap),
fileSetName_(corrName.getQualifiedNameObj(),heap), // for now, set equal
clusteringIndex_(NULL),
colcount_(0),
colArray_(heap),
recordLength_(0),
indexes_(heap),
vertParts_(heap),
colStats_(NULL),
statsFetched_(FALSE),
viewFileName_(NULL),
viewText_(NULL),
viewTextInNAWchars_(heap),
viewTextCharSet_(CharInfo::UnknownCharSet),
viewCheck_(NULL),
flags_(IS_INSERTABLE | IS_UPDATABLE),
insertMode_(COM_REGULAR_TABLE_INSERT_MODE),
isSynonymTranslationDone_(FALSE),
checkConstraints_(heap),
createTime_(0),
redefTime_(0),
cacheTime_(0),
statsTime_(0),
catalogUID_(0),
schemaUID_(0),
objectUID_(0),
objectType_(COM_UNKNOWN_OBJECT),
partitioningScheme_(COM_UNKNOWN_PARTITIONING),
uniqueConstraints_(heap),
refConstraints_(heap),
isAnMV_(FALSE),
isAnMVMetaData_(FALSE),
mvsUsingMe_(heap),
mvInfo_(NULL),
accessedInCurrentStatement_(TRUE),
setupForStatement_(FALSE),
resetAfterStatement_(FALSE),
hitCount_(0),
replacementCounter_(2),
sizeInCache_(0),
recentlyUsed_(TRUE),
tableConstructionHadWarnings_(FALSE),
isAnMPTableWithAnsiName_(FALSE),
isUMDTable_(FALSE),
isSMDTable_(FALSE),
isMVUMDTable_(FALSE),
// For virtual tables, we set the object schema version
// to be the current schema version
osv_(COM_VERS_CURR_SCHEMA),
ofv_(COM_VERS_CURR_SCHEMA),
partnsDesc_(NULL),
colsWithMissingStats_(NULL),
originalCardinality_(-1.0),
tableIdList_(heap),
rcb_(NULL),
rcbLen_(0),
keyLength_(0),
parentTableName_(NULL),
sgAttributes_(NULL),
isHive_(FALSE),
isHbase_(FALSE),
isHbaseCell_(FALSE),
isHbaseRow_(FALSE),
isSeabase_(FALSE),
isSeabaseMD_(FALSE),
isSeabasePrivSchemaTable_(FALSE),
isUserUpdatableSeabaseMD_(FALSE),
resetHDFSStatsAfterStmt_(FALSE),
hiveDefaultStringLen_(0),
hiveTableId_(-1),
tableDesc_(inTableDesc),
privInfo_(NULL),
secKeySet_(heap),
newColumns_(heap),
snapshotName_(NULL),
prototype_(NULL)
{
NAString tblName = qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString();
NAString mmPhase;
Lng32 preCreateNATableWarnings = CmpCommon::diags()->getNumber(DgSqlCode::WARNING_);
//set heap type
if(heap_ == CmpCommon::statementHeap()){
heapType_ = STATEMENT;
mmPhase = "NATable Init (Stmt) - " + tblName;
}else if (heap_ == CmpCommon::contextHeap()){
heapType_ = CONTEXT;
mmPhase = "NATable Init (Cnxt) - " + tblName;
}else {
heapType_ = OTHER;
mmPhase = "NATable Init (Other) - " + tblName;
}
MonitorMemoryUsage_Enter((char*)mmPhase.data(), heap_, TRUE);
// Do a readTableDef, if table descriptor has not been passed in
//
desc_struct * table_desc;
Int32 *maxIndexLevelsPtr = new (STMTHEAP) Int32;
if (!inTableDesc)
{
// lookup from metadata other than HBase is not currently supported
CMPASSERT(inTableDesc);
}
else
{
// use the input descriptor to create NATable.
// Used if 'virtual' tables, like EXPLAIN,
// DESCRIBE, RESOURCE_FORK, etc are to be created.
table_desc = inTableDesc;
// Need to initialize the maxIndexLevelsPtr field
*maxIndexLevelsPtr = 1;
}
if ((corrName.isHbase()) || (corrName.isSeabase()))
{
setIsHbaseTable(TRUE);
setIsSeabaseTable(corrName.isSeabase());
setIsHbaseCellTable(corrName.isHbaseCell());
setIsHbaseRowTable(corrName.isHbaseRow());
setIsSeabaseMDTable(corrName.isSeabaseMD());
}
// Check if the synonym name translation to reference object has been done.
if (table_desc->body.table_desc.isSynonymNameTranslationDone)
{
isSynonymTranslationDone_ = TRUE;
NAString synonymReferenceName(table_desc->body.table_desc.tablename);
synonymReferenceName_ = synonymReferenceName;
ComUID uid(table_desc->body.table_desc.objectUID[0]*0x100000000LL +
table_desc->body.table_desc.objectUID[1]);
synonymReferenceObjectUid_ = uid;
}
// Check if it is a UMD table, or SMD table or MV related UMD object
// and set cll correcsponding flags to indicate this.
if (table_desc->body.table_desc.isUMDTable)
{
isUMDTable_ = TRUE;
}
if (table_desc->body.table_desc.issystemtablecode)
{
isSMDTable_ = TRUE;
}
if (table_desc->body.table_desc.isMVMetaDataObject)
{
isMVUMDTable_ = TRUE;
}
isTrigTempTable_ = (qualifiedName_.getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE);
switch(table_desc->body.table_desc.rowFormat)
{
case COM_PACKED_FORMAT_TYPE:
setSQLMXTable(TRUE);
break;
case COM_ALIGNED_FORMAT_TYPE:
setSQLMXAlignedTable(TRUE);
break;
case COM_HBASE_FORMAT_TYPE:
case COM_UNKNOWN_FORMAT_TYPE:
break;
}
if (table_desc->body.table_desc.isVolatile)
{
setVolatileTable( TRUE );
}
if (table_desc->body.table_desc.isInMemoryObjectDefn)
{
setInMemoryObjectDefn( TRUE );
}
if (table_desc->body.table_desc.isDroppable)
{
setDroppableTable( TRUE );
}
if (corrName.isExternal())
{
setIsExternalTable(TRUE);
}
if (qualifiedName_.getQualifiedNameObj().isHistograms() ||
qualifiedName_.getQualifiedNameObj().isHistogramIntervals())
{
setIsHistogramTable(TRUE);
}
insertMode_ = table_desc->body.table_desc.insertMode;
setRecordLength(table_desc->body.table_desc.record_length);
//
// Add timestamp information.
//
createTime_ = uint32ArrayToInt64(table_desc->body.table_desc.createtime);
redefTime_ = uint32ArrayToInt64(table_desc->body.table_desc.redeftime);
cacheTime_ = uint32ArrayToInt64(table_desc->body.table_desc.cachetime);
catalogUID_ = uint32ArrayToInt64(table_desc->body.table_desc.catUID);
schemaUID_ = uint32ArrayToInt64(table_desc->body.table_desc.schemaUID);
objectUID_ = uint32ArrayToInt64(table_desc->body.table_desc.objectUID);
// Set the objectUID_ for hbase Cell and Row tables, if the table has
// been defined in Trafodion use this value, otherwise, set to 0
if (isHbaseCell_ || isHbaseRow_)
{
if ( !fetchObjectUIDForNativeTable(corrName) )
return;
if (objectUID_ > 0 )
setHasExternalTable(TRUE);
}
if (table_desc->body.table_desc.owner)
{
Int32 userInfo (table_desc->body.table_desc.owner);
owner_ = userInfo;
}
if (table_desc->body.table_desc.schemaOwner)
{
Int32 schemaUser(table_desc->body.table_desc.schemaOwner);
schemaOwner_ = schemaUser;
}
objectType_ = table_desc->body.table_desc.objectType;
partitioningScheme_ = table_desc->body.table_desc.partitioningScheme;
// Set up privs
if ((corrName.getSpecialType() == ExtendedQualName::SG_TABLE) ||
(!(corrName.isSeabaseMD() || corrName.isSpecialTable())))
setupPrivInfo();
if ((table_desc->body.table_desc.objectFlags & SEABASE_OBJECT_IS_EXTERNAL_HIVE) != 0 ||
(table_desc->body.table_desc.objectFlags & SEABASE_OBJECT_IS_EXTERNAL_HBASE) != 0)
setIsExternalTable(TRUE);
if (CmpSeabaseDDL::isMDflagsSet
(table_desc->body.table_desc.tablesFlags, MD_TABLES_HIVE_EXT_COL_ATTRS))
setHiveExtColAttrs(TRUE);
if (CmpSeabaseDDL::isMDflagsSet
(table_desc->body.table_desc.tablesFlags, MD_TABLES_HIVE_EXT_KEY_ATTRS))
setHiveExtKeyAttrs(TRUE);
rcb_ = table_desc->body.table_desc.rcb;
rcbLen_ = table_desc->body.table_desc.rcbLen;
keyLength_ = table_desc->body.table_desc.keyLen;
if (table_desc->body.table_desc.parentTableName)
{
parentTableName_ =
new(heap_) char[strlen(table_desc->body.table_desc.parentTableName) + 1];
strcpy(parentTableName_, table_desc->body.table_desc.parentTableName);
}
if (table_desc->body.table_desc.snapshotName)
{
snapshotName_ =
new(heap_) char[strlen(table_desc->body.table_desc.snapshotName) + 1];
strcpy(snapshotName_, table_desc->body.table_desc.snapshotName);
}
if (table_desc->body.table_desc.default_col_fam)
defaultColFam_ = table_desc->body.table_desc.default_col_fam;
if (table_desc->body.table_desc.all_col_fams)
{
// Space delimited col families.
string buf; // Have a buffer string
stringstream ss(table_desc->body.table_desc.all_col_fams); // Insert the string into a stream
while (ss >> buf)
{
allColFams_.insert(buf.c_str());
}
}
else
allColFams_.insert(defaultColFam_);
desc_struct * files_desc = table_desc->body.table_desc.files_desc;
// Some objects don't have a file_desc set up (e.g. views)
// Therefore, only setup the partnsDesc_ if this is a partitionable object
if (files_desc)
{
if (files_desc->body.files_desc.partns_desc)
partnsDesc_ = files_desc->body.files_desc.partns_desc;
}
else
partnsDesc_ = NULL;
//
// Insert a NAColumn in the colArray_ for this NATable for each
// columns_desc from the ARK SMD. Returns TRUE if error creating NAColumns.
//
if (createNAColumns(table_desc->body.table_desc.columns_desc,
this,
colArray_ /*OUT*/,
heap_))
//coverity[leaked_storage]
return; // colcount_ == 0 indicates an error
//
// Add view information, if this is a view
//
desc_struct *view_desc = table_desc->body.table_desc.views_desc;
if (view_desc)
{
viewText_ = new (heap_) char[strlen(view_desc->body.view_desc.viewtext) + 2];
strcpy(viewText_, view_desc->body.view_desc.viewtext);
strcat(viewText_, ";");
viewTextCharSet_ = (CharInfo::CharSet)view_desc->body.view_desc.viewtextcharset;
viewCheck_ = NULL; //initialize
if(view_desc->body.view_desc.viewchecktext){
UInt32 viewCheckLength = str_len(view_desc->body.view_desc.viewchecktext)+1;
viewCheck_ = new (heap_) char[ viewCheckLength];
memcpy(viewCheck_, view_desc->body.view_desc.viewchecktext,
viewCheckLength);
}
setUpdatable(view_desc->body.view_desc.updatable);
setInsertable(view_desc->body.view_desc.insertable);
//
// The updatable flag is false for an MP view only if it is NOT a
// protection view. Therefore updatable == FALSE iff it is a
// shorthand view. See ReadTableDef.cpp, l. 3379.
//
viewFileName_ = NULL;
CMPASSERT(view_desc->body.view_desc.viewfilename);
UInt32 viewFileNameLength = str_len(view_desc->body.view_desc.viewfilename) + 1;
viewFileName_ = new (heap_) char[viewFileNameLength];
memcpy(viewFileName_, view_desc->body.view_desc.viewfilename,
viewFileNameLength);
}
else
{
//keep track of memory used by NAFileSets
Lng32 preCreateNAFileSetsMemSize = heap_->getAllocSize();
//
// Process indexes and vertical partitions for this table.
//
if (createNAFileSets(table_desc /*IN*/,
this /*IN*/,
colArray_ /*IN*/,
indexes_ /*OUT*/,
vertParts_ /*OUT*/,
clusteringIndex_ /*OUT*/,
tableIdList_ /*OUT*/,
heap_,
bindWA,
newColumns_, /*OUT*/
maxIndexLevelsPtr)) {
return; // colcount_ == 0 indicates an error
}
// Add constraint info.
//
// This call to createConstraintInfo, calls the parser on
// the constraint name
//
NABoolean errorOccurred =
createConstraintInfo(table_desc /*IN*/,
getTableName() /*IN*/,
getNAColumnArray()/*IN (some columns updated)*/,
checkConstraints_ /*OUT*/,
uniqueConstraints_/*OUT*/,
refConstraints_ /*OUT*/,
heap_,
bindWA);
if (errorOccurred) {
// return before setting colcount_, indicating that there
// was an error in constructing this NATable.
//
return;
}
//
// FetchHistograms call used to be here -- moved to getStatistics().
//
}
// change partFunc for base table if PARTITION clause has been used
// to limit the number of partitions that will be accessed.
if ((qualifiedName_.isPartitionNameSpecified()) ||
(qualifiedName_.isPartitionRangeSpecified())) {
if (filterUnusedPartitions(corrName.getPartnClause())) {
return ;
}
}
//
// Set colcount_ after all possible errors (Binder uses nonzero colcount
// as an indicator of valid table definition).
//
CMPASSERT(table_desc->body.table_desc.colcount >= 0); // CollIndex cast ok?
colcount_ = (CollIndex)table_desc->body.table_desc.colcount;
// If there is a host variable associated with this table, store it
// for use by the generator to generate late-name resolution information.
//
HostVar *hv = corrName.getPrototype();
prototype_ = hv ? new (heap_) HostVar(*hv) : NULL;
// MV
// Initialize the MV support data members
isAnMV_ = table_desc->body.table_desc.isMVtable;
isAnMVMetaData_ = table_desc->body.table_desc.isMVMetaDataObject;
mvAttributeBitmap_.initBitmap(table_desc->body.table_desc.mvAttributesBitmap);
desc_struct *mvs_desc = table_desc->body.table_desc.using_mvs_desc;
// Memory Leak
while (mvs_desc)
{
using_mv_desc_struct *mv = &mvs_desc->body.using_mv_desc;
UsingMvInfo *usingMv = new(heap_)
UsingMvInfo(mv->mvName, mv->refreshType, mv->rewriteEnabled,
mv->isInitialized, heap_);
mvsUsingMe_.insert(usingMv);
mvs_desc = mvs_desc->header.next;
}
// ++MV
// fix the special-type for MV objects. There are case where the type is
// set to NORMAL_TABLE although this is an MV.
//
// Example:
// --------
// in the statement "select * from MV1" mv1 will have a NORMAL_TABLE
// special-type, while in "select * from table(mv_table MV1)" it will
// have the MV_TABLE special-type.
if (isAnMV_)
{
switch(qualifiedName_.getSpecialType())
{
case ExtendedQualName::GHOST_TABLE:
qualifiedName_.setSpecialType(ExtendedQualName::GHOST_MV_TABLE);
break;
case ExtendedQualName::GHOST_MV_TABLE:
// Do not change it
break;
default:
qualifiedName_.setSpecialType(ExtendedQualName::MV_TABLE);
break;
}
}
// --MV
// Initialize the sequence generator fields
desc_struct *sequence_desc = table_desc->body.table_desc.sequence_generator_desc;
if (sequence_desc != NULL) {
sequence_generator_desc_struct *sg_desc = &sequence_desc->body.sequence_generator_desc;
if (sg_desc != NULL)
{
sgAttributes_ =
new(heap_) SequenceGeneratorAttributes(
sg_desc->startValue,
sg_desc->increment,
sg_desc->maxValue,
sg_desc->minValue,
sg_desc->sgType,
sg_desc->sqlDataType,
sg_desc->fsDataType,
sg_desc->cycleOption,
FALSE,
sg_desc->objectUID,
sg_desc->cache,
sg_desc->nextValue,
0,
sg_desc->redefTime);
}
}
#ifndef NDEBUG
if (getenv("NATABLE_DEBUG"))
{
cout << "NATable " << (void*)this << " "
<< qualifiedName_.getQualifiedNameObj().getQualifiedNameAsAnsiString() << " "
<< (Int32)qualifiedName_.getSpecialType() << endl;
colArray_.print();
}
#endif
//this guy is cacheable
if((qualifiedName_.isCacheable())&&
(NOT (isHbaseTable())) &&
//this object is not on the statement heap (i.e. it is being cached)
((heap_ != CmpCommon::statementHeap())||
(OSIM_runningInCaptureMode())))
{
char * nodeName = NULL;
char * catStr = NULL;
char * schemaStr = NULL;
char * fileStr = NULL;
short nodeNameLen = 0;
Int32 catStrLen = 0;
Int32 schemaStrLen = 0;
Int32 fileStrLen = 0;
#ifdef NA_64BIT
// dg64 - match signature
int_32 primaryNodeNum=0;
#else
Int32 primaryNodeNum=0;
#endif
short error = 0;
//clusteringIndex has physical filename that can be used to check
//if a catalog operation has been performed on a table.
//Views don't have clusteringIndex, so we get physical filename
//from the viewFileName_ datamember.
if(viewText_)
{
//view filename starts with node name
//filename is in format \<node_name>.$<volume>.<subvolume>.<file>
//catStr => <volume>
//schemaStr => <subvolume>
//fileStr => <file>
nodeName = viewFileName_;
catStr = nodeName;
//skip over node name
//measure node name length
//get to begining of volume name
//Measure length of node name
//skip over node name i.e. \MAYA, \AZTEC, etc
//and get to volume name
while((nodeName[nodeNameLen]!='.')&&
(nodeNameLen < 8)){
catStr++;
nodeNameLen++;
};
//skip over '.' and the '$' in volume name
catStr=&nodeName[nodeNameLen+2];
schemaStr=catStr;
//skip over the volume/catalog name
//while measuring catalog name length
while((catStr[catStrLen]!='.')&&
(catStrLen < 8))
{
schemaStr++;
catStrLen++;
}
//skip over the '.'
schemaStr++;
fileStr=schemaStr;
//skip over the subvolume/schema name
//while measuring schema name length
while((schemaStr[schemaStrLen]!='.')&&
(schemaStrLen < 8))
{
fileStr++;
schemaStrLen++;
}
//skip over the '.'
fileStr++;
fileStrLen = str_len(fileStr);
//figure out the node number for the node
//which has the primary partition.
primaryNodeNum=0;
if(!OSIM_runningSimulation())
primaryNodeNum = gpClusterInfo->mapNodeNameToNodeNum(NAString(nodeName));
}
else{
//get qualified name of the clustering index which should
//be the actual physical file name of the table
const QualifiedName fileNameObj = getClusteringIndex()->
getRandomPartition();
const NAString fileName = fileNameObj.getObjectName();
//get schemaName object
const SchemaName schemaNameObj = fileNameObj.getSchemaName();
const NAString schemaName = schemaNameObj.getSchemaName();
//get catalogName object
//this contains a string in the form \<node_name>.$volume
const CatalogName catalogNameObj = fileNameObj.getCatalogName();
const NAString catalogName = catalogNameObj.getCatalogName();
nodeName = (char*) catalogName.data();
catStr = nodeName;
//Measure length of node name
//skip over node name i.e. \MAYA, \AZTEC, etc
//and get to volume name
while((nodeName[nodeNameLen]!='.')&&
(nodeNameLen < 8)){
catStr++;
nodeNameLen++;
};
//get volume/catalog name
//skip ".$"
catStr=&nodeName[nodeNameLen+2];
#pragma nowarn(1506) // warning elimination
catStrLen = catalogName.length() - (nodeNameLen+2);
#pragma warn(1506) // warning elimination
//get subvolume/schema name
schemaStr = (char *) schemaName.data();
#pragma nowarn(1506) // warning elimination
schemaStrLen = schemaName.length();
#pragma warn(1506) // warning elimination
//get file name
fileStr = (char *) fileName.data();
#pragma nowarn(1506) // warning elimination
fileStrLen = fileName.length();
#pragma warn(1506) // warning elimination
//figure out the node number for the node
//which has the primary partition.
primaryNodeNum=0;
primaryNodeNum = gpClusterInfo->mapNodeNameToNodeNum(NAString(nodeName));
}
}
Lng32 postCreateNATableWarnings = CmpCommon::diags()->getNumber(DgSqlCode::WARNING_);
if(postCreateNATableWarnings != preCreateNATableWarnings)
tableConstructionHadWarnings_=TRUE;
const char *lobHdfsServer = CmpCommon::getDefaultString(LOB_HDFS_SERVER);
Int32 lobHdfsPort = (Lng32)CmpCommon::getDefaultNumeric(LOB_HDFS_PORT);
if (hasLobColumn())
{
// read lob related information from lob metadata
short *lobNumList = new (heap_) short[getColumnCount()];
short *lobTypList = new (heap_) short[getColumnCount()];
char **lobLocList = new (heap_) char*[getColumnCount()];
const NAColumnArray &colArray = getNAColumnArray();
NAColumn *nac = NULL;
Lng32 j = 0;
for (CollIndex i = 0; i < getColumnCount(); i++)
{
nac = colArray.getColumn(i);
if (nac->getType()->getTypeQualifier() == NA_LOB_TYPE)
{
lobLocList[j] = new (heap_) char[1024];
j++;
}
}
NAString schNam;
schNam = "\"";
schNam += getTableName().getCatalogName();
schNam += "\".\"";
schNam += getTableName().getSchemaName();
schNam += "\"";
Lng32 numLobs = 0;
Lng32 cliRC = SQL_EXEC_LOBddlInterface
(
(char*)schNam.data(),
schNam.length(),
objectUid().castToInt64(),
numLobs,
LOB_CLI_SELECT_CURSOR,
lobNumList,
lobTypList,
lobLocList,(char *)lobHdfsServer,lobHdfsPort,0,FALSE);
if (cliRC == 0)
{
for (Lng32 i = 0; i < numLobs; i++)
{
nac = colArray.getColumn(lobNumList[i]);
nac->lobNum() = lobNumList[i];
nac->lobStorageType() = (LobsStorage)lobTypList[i];
nac->lobStorageLocation() = lobLocList[i];
}
} // if
} // if
// LCOV_EXCL_STOP
initialSize_ = heap_->getAllocSize();
MonitorMemoryUsage_Exit((char*)mmPhase.data(), heap_, NULL, TRUE);
} // NATable()
#pragma warn(770) // warning elimination
// Constructor for a Hive table
NATable::NATable(BindWA *bindWA,
const CorrName& corrName,
NAMemory *heap,
struct hive_tbl_desc* htbl)
//
// The NATable heap ( i.e. heap_ ) used to come from ContextHeap
// (i.e. heap) but it creates high memory usage/leakage in Context
// Heap. Although the NATables are deleted at the end of each statement,
// the heap_ is returned to heap (i.e. context heap) which caused
// context heap containing a lot of not used chunk of memory. So it is
// changed to be from whatever heap is passed in at the call in
// NATableDB.getNATable.
//
// Now NATable objects can be cached.If an object is to be cached (persisted
// across statements) a NATable heap is allocated for the object
// and is passed in (this is done in NATableDB::get(CorrName& corrName...).
// Otherwise a reference to the Statement heap is passed in. When a cached
// object is to be deleted the object's heap is deleted which wipes out the
// NATable object all its related stuff. NATable objects that are not cached
// are wiped out at the end of the statement when the statement heap is deleted.
//
: heap_(heap),
referenceCount_(0),
refsIncompatibleDP2Halloween_(FALSE),
isHalloweenTable_(FALSE),
qualifiedName_(corrName.getExtendedQualNameObj(),heap),
synonymReferenceName_(heap),
fileSetName_(corrName.getQualifiedNameObj(),heap), // for now, set equal
clusteringIndex_(NULL),
colcount_(0),
colArray_(heap),
recordLength_(0),
indexes_(heap),
vertParts_(heap),
colStats_(NULL),
statsFetched_(FALSE),
viewFileName_(NULL),
viewText_(NULL),
viewTextInNAWchars_(heap),
viewTextCharSet_(CharInfo::UnknownCharSet),
viewCheck_(NULL),
flags_(IS_INSERTABLE | IS_UPDATABLE),
insertMode_(COM_REGULAR_TABLE_INSERT_MODE),
isSynonymTranslationDone_(FALSE),
checkConstraints_(heap),
createTime_(htbl->creationTS_),
redefTime_(htbl->redeftime()),
cacheTime_(0),
statsTime_(0),
catalogUID_(0),
schemaUID_(0),
objectUID_(0),
objectType_(COM_UNKNOWN_OBJECT),
partitioningScheme_(COM_UNKNOWN_PARTITIONING),
uniqueConstraints_(heap),
refConstraints_(heap),
isAnMV_(FALSE),
isAnMVMetaData_(FALSE),
mvsUsingMe_(heap),
mvInfo_(NULL),
accessedInCurrentStatement_(TRUE),
setupForStatement_(FALSE),
resetAfterStatement_(FALSE),
hitCount_(0),
replacementCounter_(2),
sizeInCache_(0),
recentlyUsed_(TRUE),
tableConstructionHadWarnings_(FALSE),
isAnMPTableWithAnsiName_(FALSE),
isUMDTable_(FALSE),
isSMDTable_(FALSE),
isMVUMDTable_(FALSE),
// For virtual tables, we set the object schema version
// to be the current schema version
osv_(COM_VERS_CURR_SCHEMA),
ofv_(COM_VERS_CURR_SCHEMA),
partnsDesc_(NULL),
colsWithMissingStats_(NULL),
originalCardinality_(-1.0),
tableIdList_(heap),
rcb_(NULL),
rcbLen_(0),
keyLength_(0),
parentTableName_(NULL),
sgAttributes_(NULL),
isHive_(TRUE),
isHbase_(FALSE),
isHbaseCell_(FALSE),
isHbaseRow_(FALSE),
isSeabase_(FALSE),
isSeabaseMD_(FALSE),
isSeabasePrivSchemaTable_(FALSE),
isUserUpdatableSeabaseMD_(FALSE),
resetHDFSStatsAfterStmt_(FALSE),
hiveDefaultStringLen_(0),
hiveTableId_(htbl->tblID_),
tableDesc_(NULL),
secKeySet_(heap),
privInfo_(NULL),
newColumns_(heap),
snapshotName_(NULL)
{
NAString tblName = qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString();
NAString mmPhase;
Lng32 preCreateNATableWarnings = CmpCommon::diags()->getNumber(DgSqlCode::WARNING_);
//set heap type
if(heap_ == CmpCommon::statementHeap()){
heapType_ = STATEMENT;
mmPhase = "NATable Init (Stmt) - " + tblName;
}else if (heap_ == CmpCommon::contextHeap()){
heapType_ = CONTEXT;
mmPhase = "NATable Init (Cnxt) - " + tblName;
}else {
heapType_ = OTHER;
mmPhase = "NATable Init (Other) - " + tblName;
}
MonitorMemoryUsage_Enter((char*)mmPhase.data(), heap_, TRUE);
isTrigTempTable_ = FALSE;
insertMode_ =
COM_MULTISET_TABLE_INSERT_MODE; // allow dup, to check
//ComInsertMode::COM_MULTISET_TABLE_INSERT_MODE; // allow dup, to check
//
// Add timestamp information.
//
// To get from Hive
/*
createTime_ = longArrayToInt64(table_desc->body.table_desc.createtime);
redefTime_ = longArrayToInt64(table_desc->body.table_desc.redeftime);
cacheTime_ = longArrayToInt64(table_desc->body.table_desc.cachetime);
*/
// NATable has a schemaUID column, probably should propogate it.
// for now, set to 0.
schemaUID_ = 0;
// Set the objectUID_
// If the HIVE table has been registered in Trafodion, get the objectUID
// from Trafodion, otherwise, set it to 0.
// TBD - does getQualifiedNameObj handle delimited names correctly?
if ( !fetchObjectUIDForNativeTable(corrName) )
return;
if ( objectUID_ > 0 )
setHasExternalTable(TRUE);
// for HIVE objects, the schema owner and table owner is HIVE_ROLE_ID
if (CmpCommon::context()->isAuthorizationEnabled())
{
owner_ = HIVE_ROLE_ID;
schemaOwner_ = HIVE_ROLE_ID;
}
else
{
owner_ = SUPER_USER;
schemaOwner_ = SUPER_USER;
}
if (hasExternalTable())
setupPrivInfo();
// TBD - if authorization is enabled and there is no external table to store
// privileges, go get privilege information from HIVE metadata ...
// TBD - add a check to verify that the column list coming from HIVE matches
// the column list stored in the external table. Maybe some common method
// that can be used to compare other things as well...
objectType_ = COM_BASE_TABLE_OBJECT;
// to check
partitioningScheme_ = COM_UNKNOWN_PARTITIONING;
// to check
rcb_ = 0;
rcbLen_ = 0;
keyLength_ = 0;
partnsDesc_ = NULL;
//
// Insert a NAColumn in the colArray_ for this NATable for each
// columns_desc from the ARK SMD. Returns TRUE if error creating NAColumns.
//
if (createNAColumns(htbl->getColumns(),
this,
colArray_ /*OUT*/,
heap_))
//coverity[leaked_storage]
return;
//
// Set colcount_ after all possible errors (Binder uses nonzero colcount
// as an indicator of valid table definition).
//
// To set it via the new createNAColumns()
colcount_ = colArray_.entries();
// compute record length from colArray
Int32 recLen = 0;
for ( CollIndex i=0; i<colcount_; i++ ) {
recLen += colArray_[i]->getType()->getNominalSize();
}
setRecordLength(recLen);
if (createNAFileSets(htbl /*IN*/,
this /*IN*/,
colArray_ /*IN*/,
indexes_ /*OUT*/,
vertParts_ /*OUT*/,
clusteringIndex_ /*OUT*/,
tableIdList_ /*OUT*/,
heap_,
bindWA
)) {
colcount_ = 0; // indicates failure
return;
}
// HIVE-TBD ignore constraint info creation for now
// If there is a host variable associated with this table, store it
// for use by the generator to generate late-name resolution information.
//
HostVar *hv = corrName.getPrototype();
prototype_ = hv ? new (heap_) HostVar(*hv) : NULL;
// MV
// Initialize the MV support data members
isAnMV_ = FALSE;
isAnMVMetaData_ = FALSE;
Lng32 postCreateNATableWarnings = CmpCommon::diags()->getNumber(DgSqlCode::WARNING_);
if(postCreateNATableWarnings != preCreateNATableWarnings)
tableConstructionHadWarnings_=TRUE;
hiveDefaultStringLen_ = CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH);
Int32 hiveDefaultStringLenInBytes = CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH_IN_BYTES);
if( hiveDefaultStringLenInBytes != 32000 )
hiveDefaultStringLen_ = hiveDefaultStringLenInBytes;
if (!(corrName.isSeabaseMD() || corrName.isSpecialTable()))
setupPrivInfo();
// LCOV_EXCL_STOP
initialSize_ = heap_->getAllocSize();
MonitorMemoryUsage_Exit((char*)mmPhase.data(), heap_, NULL, TRUE);
} // NATable()
#pragma warn(770) // warning elimination
NABoolean NATable::doesMissingStatsWarningExist(CollIndexSet & colsSet) const
{
return colsWithMissingStats_->contains(&colsSet);
}
NABoolean NATable::insertMissingStatsWarning(CollIndexSet colsSet) const
{
CollIndexSet * setOfColsWithMissingStats = new (STMTHEAP) CollIndexSet (colsSet);
Int32 someVar = 1;
CollIndexSet * result = colsWithMissingStats_->insert(setOfColsWithMissingStats, &someVar);
if (result == NULL)
return FALSE;
else
return TRUE;
}
// This gets called in the Optimizer phase -- the Binder phase will already have
// marked columns that were referenced in the query, so that the ustat function
// below can decide which histograms and histints to leave in the stats list
// and which to remove.
//
StatsList &
NATable::getStatistics()
{
if (!statsFetched_)
{
// mark the kind of histograms needed for this table's columns
markColumnsForHistograms();
NAString tblName = qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString();
NAString mmPhase = "NATable getStats - " + tblName;
MonitorMemoryUsage_Enter((char*)mmPhase.data(), NULL, TRUE);
//trying to get statistics for a new statement allocate colStats_
colStats_ = new (CmpCommon::statementHeap()) StatsList(CmpCommon::statementHeap());
// Do not create statistics on the fly for the following tables
if (isAnMV() || isUMDTable() ||
isSMDTable() || isMVUMDTable() ||
isTrigTempTable() )
CURRSTMT_OPTDEFAULTS->setHistDefaultSampleSize(0);
CURRCONTEXT_HISTCACHE->getHistograms(*this);
if ((*colStats_).entries() > 0)
originalCardinality_ = (*colStats_)[0]->getRowcount();
else
originalCardinality_ = ActiveSchemaDB()->getDefaults().getAsDouble(HIST_NO_STATS_ROWCOUNT);
// -----------------------------------------------------------------------
// So now we have read in the contents of the HISTOGRM & HISTINTS
// tables from the system catalog. Before we can use them, we need
// to massage them into a format we can use. In particular, we need
// to make sure that what we read in (which the user may have mucked
// about with) matches the histogram classes' internal semantic
// requirements. Also, we need to generate the MultiColumnUecList.
// ----------------------------------------------------------------------
// what did the user set as the max number of intervals?
NADefaults &defs = ActiveSchemaDB()->getDefaults();
CollIndex maxIntervalCount = defs.getAsLong(HIST_MAX_NUMBER_OF_INTERVALS);
//-----------------------------------------------------------------------------------
// Need to flag the MC colStatsDesc so it is only used for the range partitioning task
// and not any cardinality calculations tasks. Flagging it also makes the logic
// to check fo the presence for this MC easier (at the time we need to create
// the range partitioning function)
//-----------------------------------------------------------------------------------
if (CmpCommon::getDefault(HBASE_RANGE_PARTITIONING_MC_SPLIT) == DF_ON &&
!(*colStats_).allFakeStats())
{
CollIndex currentMaxsize = 1;
Int32 posMCtoUse = -1;
NAColumnArray partCols;
if (getClusteringIndex()->getPartitioningKeyColumns().entries() > 0)
partCols = getClusteringIndex()->getPartitioningKeyColumns();
else
partCols = getClusteringIndex()->getIndexKeyColumns();
CollIndex partColNum = partCols.entries();
// look for MC histograms that have multiple intervals and whose columns are a prefix for the
// paritition column list. If multiple pick the one with the most matching columns
for (Int32 i=0; i < (*colStats_).entries(); i++)
{
NAColumnArray statsCols = (*colStats_)[i]->getStatColumns();
CollIndex colNum = statsCols.entries();
CollIndex j = 0;
NABoolean potentialMatch = TRUE;
if ((colNum > currentMaxsize) &&
(!(*colStats_)[i]->isSingleIntHist()) && // no SIH -- number of histograms is large enough to do splitting
(colNum <= partColNum))
{
while ((j < colNum) && potentialMatch)
{
j++;
NAColumn * col = partCols[j-1];
if (statsCols[j-1]->getPosition() != partCols[j-1]->getPosition())
{
potentialMatch = FALSE;
break;
}
}
}
else
{
potentialMatch = FALSE;
}
if (potentialMatch)
{
currentMaxsize = j;
posMCtoUse = i;
}
// we got what we need, just return
if (potentialMatch && (currentMaxsize == partColNum))
{
break;
}
}
if (posMCtoUse >= 0)
{
(*colStats_)[posMCtoUse]->setMCforHbasePartitioning (TRUE);
}
}
// *************************************************************************
// FIRST: Generate the stats necessary to later create the
// MultiColumnUecList; then filter out the multi-column histograms
// because later code doesn't know how to handle them
// In the same loop, also mark another flag for originally fake histogram
// This is to differentiate the cases when the histogram is fake because
// it has no statistics and the case where the histogram has been termed
// fake by the optimizer because its statistics is no longer reliable.
// *************************************************************************
CollIndex i ;
for ( i = 0 ; i < (*colStats_).entries() ; /* no automatic increment */ )
{
// the StatsList has two lists which it uses to store the information we
// need to fill the MultiColumnUecList with <table-col-list,uec value> pairs:
//
// LIST(NAColumnArray) groupUecColumns_
// LIST(CostScalar) groupUecValues_
//
// ==> insert the NAColumnArray & uec total values for each
// entry in colStats_
// don't bother storing multicolumnuec info for fake histograms
// but do set the originallly fake histogram flag to TRUE
if ( (*colStats_)[i]->isFakeHistogram() )
(*colStats_)[i]->setOrigFakeHist(TRUE);
else
{
NAColumnArray cols = (*colStats_)[i]->getStatColumns() ;
(*colStats_).groupUecColumns_.insert(cols) ;
CostScalar uecs = (*colStats_)[i]->getTotalUec() ;
(*colStats_).groupUecValues_.insert(uecs) ;
if (CmpCommon::getDefault(USTAT_COLLECT_MC_SKEW_VALUES) == DF_ON)
{
MCSkewedValueList mcSkewedValueList = (*colStats_)[i]->getMCSkewedValueList() ;
(*colStats_).groupMCSkewedValueLists_.insert(mcSkewedValueList) ;
}
}
// MCH:
// once we've stored the column/uec information, filter out the
// multi-column histograms, since our synthesis code doesn't
// handle them
if (( (*colStats_)[i]->getStatColumns().entries() != 1) &&
(!(*colStats_)[i]->isMCforHbasePartitioning()))
{
(*colStats_).removeAt(i) ;
}
else
{
i++ ; // in-place removal from a list is a bother!
}
}
// *************************************************************************
// SECOND: do some fixup work to make sure the histograms maintain
// the semantics we later expect (& enforce)
// *************************************************************************
// -------------------------------------------------------------------------
// HISTINT fixup-code : char-string histograms
// -------------------------------------------------------------------------
// problem arises with HISTINTs that are for char* columns
// here's what we can get:
//
// Rows Uec Value
// ---- --- -----
// 0 0 "value"
// 10 5 "value"
//
// this is not good! The problem is our (lousy) encoding of
// char strings into EncodedValue's
//
// After much deliberation, here's our current fix:
//
// Rows Uec Value
// ---- --- -----
// 0 0 "valu" <-- reduce the min value of 1st interval
// 10 5 "value" by a little bit
//
// When we find two intervals like this where they aren't the
// first intervals in the histogram, we simply merge them into
// one interval (adding row/uec information) and continue; note
// that in this case, we haven't actually lost any information;
// we've merely made sense out of (the garbage) what we've got
//
// -------------------------------------------------------------------------
// additional HISTINT fixup-code
// -------------------------------------------------------------------------
// 1. If there are zero or one HISTINTs, then set the HISTINTs to match
// the max/min information contained in the COLSTATS object.
//
// 2. If there are any HISTINTs whose boundary values are out-of-order,
// we abort with an an ERROR message.
//
// 3. If there is a NULL HISTINT at the end of the Histogram, then we
// need to make sure there are *TWO* NULL HISTINTS, to preserve correct
// histogram semantics for single-valued intervals.
// -------------------------------------------------------------------------
CollIndex j ;
for ( i = 0 ; i < (*colStats_).entries() ; i++ )
{
// we only worry about histograms on char string columns
// correction: it turns out that these semantically-deranged
// ---------- histograms were being formed for other, non-char string
// columns, so we commented out the code below
// if ( colStats_[i]->getStatColumns()[0]->getType()->getTypeQualifier() !=
// NA_CHARACTER_TYPE)
// continue ; // not a string, skip to next
ColStatsSharedPtr stats = (*colStats_)[i] ;
HistogramSharedPtr hist = stats->getHistogramToModify() ;
// histograms for key columns of a table that are not
// referenced in the query are read in with zero intervals
// (to conserve memory); however, internal
// histogram-semantic checking code assumes that any
// histogram which has zero intervals is FAKE; however
// however, MDAM will not be chosen in the case where one of
// the histograms for a key column is FAKE. Thus -- we will
// avoid this entire issue by creating a single interval for
// any Histograms that we read in that are empty.
if ( hist->entries() < 2 )
{
if(stats->getMinValue() > stats->getMaxValue())
{
*CmpCommon::diags() << DgSqlCode(CATALOG_HISTOGRM_HISTINTS_TABLES_CONTAIN_BAD_VALUE)
<< DgString0("")
<< DgString1(stats->getStatColumns()[0]->getFullColRefNameAsAnsiString().data() );
stats->createFakeHist();
continue;
}
stats->setToSingleInterval ( stats->getMinValue(),
stats->getMaxValue(),
stats->getRowcount(),
stats->getTotalUec() ) ;
// now we have to undo some of the automatic flag-setting
// of ColStats::setToSingleInterval()
stats->setMinSetByPred (FALSE) ;
stats->setMaxSetByPred (FALSE) ;
stats->setShapeChanged (FALSE) ;
continue ; // skip to next ColStats
}
// NB: we'll handle the first Interval last
for ( j = 1 ; j < hist->entries()-1 ; /* no automatic increment */ )
{
if ( (*hist)[j].getUec() == 0 || (*hist)[j].getCardinality() == 0 )
{
hist->removeAt(j) ;
continue ; // don't increment, loop again
}
// intervals must be in order!
if ( (*hist)[j].getBoundary() > (*hist)[j+1].getBoundary() )
{
*CmpCommon::diags() <<
DgSqlCode(CATALOG_HISTINTS_TABLES_CONTAIN_BAD_VALUES)
<< DgInt0(j)
<< DgInt1(j+1)
<< DgString1(stats->getStatColumns()[0]->getFullColRefNameAsAnsiString().data() );
stats->createFakeHist();
break ; // skip to next ColStats
}
if ( (*hist)[j].getBoundary() == (*hist)[j+1].getBoundary() )
{
// merge Intervals, if the two consecutive intervals have same
// boundaries and these are not single valued (UEC > 1)
// If there are more two single valued intervals, then merge
// all except the last one.
NABoolean mergeIntervals = FALSE;
if (CmpCommon::getDefault(COMP_BOOL_79) == DF_ON)
{
mergeIntervals = TRUE;
if( (j < (hist->entries() - 2)) && ((*hist)[j+1].getUec() == 1) &&
((*hist)[j+1].getBoundary() != (*hist)[j+2].getBoundary())
||
(j == (hist->entries() - 2)) && ((*hist)[j+1].getUec() == 1) )
mergeIntervals = FALSE;
}
else
{
if ( (*hist)[j+1].getUec() > 1)
mergeIntervals = TRUE;
}
if ( mergeIntervals )
{
// if the intervals with same boundary are not SVI, just merge them
// together.
// Also do the merge, if there are more than one SVIs with same
// encoded interval boundary. Example, we want to avoid intervals
// such as
// boundary inclusive_flag UEC
// 12345.00 < 1
// 12345.00 < 1
// 12345.00 <= 1
// These would be changed to
// 12345.00 < 2
// 12345.00 <= 1
CostScalar combinedRows = (*hist)[ j ].getCardinality() +
(*hist)[j+1].getCardinality() ;
CostScalar combinedUec = (*hist)[ j ].getUec() +
(*hist)[j+1].getUec() ;
(*hist)[j].setCardAndUec (combinedRows, combinedUec) ;
stats->setIsColWithBndryConflict(TRUE);
hist->removeAt(j+1) ;
}
else
{
// for some reason, some SVI's aren't being
// generated correctly!
(*hist)[j].setBoundIncl(FALSE) ;
(*hist)[j+1].setBoundIncl(TRUE) ;
j++;
}
}
else
j++ ; // in-place removal from a list is a bother!
} // loop over intervals
// ----------------------------------------------------------------------
// now we handle the first interval
//
// first, it must be in order w.r.t. the second interval!
if ( (*hist)[0].getBoundary() > (*hist)[1].getBoundary() )
{
*CmpCommon::diags() <<
DgSqlCode(CATALOG_HISTINTS_TABLES_CONTAIN_BAD_VALUES)
<< DgInt0(0)
<< DgInt1(1)
<< DgString1(stats->getStatColumns()[0]->getFullColRefNameAsAnsiString().data() );
stats->createFakeHist();
continue ; // skip to next ColStats
}
// second, handle the case where first and second interval are the same
if ( hist->entries() > 1 && // avoid the exception! might just be a single NULL
// // interval after the loop above
(*hist)[0].getBoundary() == (*hist)[1].getBoundary() &&
(*hist)[1].getUec() > 1 )
{
const double KLUDGE_VALUE = 0.0001 ;
const double oldVal = (*hist)[0].getBoundary().getDblValue() ;
const EncodedValue newVal =
EncodedValue(oldVal - (_ABSOLUTE_VALUE_(oldVal) * KLUDGE_VALUE)) ; // kludge alert!
//Absolute of oldval due to CR 10-010426-2457
(*hist)[0].setBoundary( newVal ) ;
(*hist)[0].setBoundIncl( FALSE ) ; // no longer a real boundary!
(*colStats_)[i]->setMinValue( newVal ) ; // set aggr info also
}
// done with first interval
// ----------------------------------------------------------------------
//
// NULL values must only be stored in single-valued intervals
// in the histograms ; so, just in case we're only getting
// *one* HistInt for the NULL interval, insert a 2nd one
//
// 0 1 2
// | | |
// | | | entries() == 3
// NULL
//
// 0 1 2 3
// | | | |
// | | | | entries() == 4
// new NULL
// NULL
//
if ( hist->lastHistInt().isNull() )
{
CollIndex count = hist->entries() ;
if ( !(*hist)[count-2].isNull() )
{
// insert a 2nd NULL HISTINT, with boundaryIncl value FALSE
HistInt secondLast (hist->lastHistInt().getBoundary(), FALSE) ;
hist->insertAt(count-1,secondLast) ;
// new HISTINT by default has row/uec of 0, which is what we want
}
}
//
// Now, reduce the total number of intervals to be the number
// that the user wants. This is used to test the tradeoffs
// between compile time & rowcount estimation.
//
(*colStats_)[i]->setMaxIntervalCount (maxIntervalCount) ;
(*colStats_)[i]->reduceToMaxIntervalCount () ;
if ((*colStats_)[i]->getRowcount() == (*colStats_)[i]->getTotalUec() )
(*colStats_)[i]->setAlmostUnique(TRUE);
} // outer for loop -- done with this COLSTATS, continue with next one
// ***********************************************************************
statsFetched_ = TRUE;
MonitorMemoryUsage_Exit((char*)mmPhase.data(), NULL, NULL, TRUE);
} // !statsFetched_
return (*colStats_);
}
StatsList &
NATable::generateFakeStats()
{
if (colStats_ == NULL)
{
//trying to get statistics for a new statement allocate colStats_
colStats_ = new (CmpCommon::statementHeap()) StatsList(CmpCommon::statementHeap());
}
if (colStats_->entries() > 0)
return (*colStats_);
NAColumnArray colList = getNAColumnArray() ;
double defaultFakeRowCount = (ActiveSchemaDB()->getDefaults()).getAsDouble(HIST_NO_STATS_ROWCOUNT);
double defaultFakeUec = (ActiveSchemaDB()->getDefaults()).getAsDouble(HIST_NO_STATS_UEC);
if ( isHiveTable() ) {
defaultFakeRowCount = getOriginalRowCount().value();
}
/* if ( isHbaseTable() ) {
defaultFakeRowCount = getOriginalRowCount().value();
}
*/
for (CollIndex i = 0; i < colList.entries(); i++ )
{
NAColumn * col = colList[i];
if (col->isUnique() )
defaultFakeUec = defaultFakeRowCount;
else
defaultFakeUec = MINOF(defaultFakeUec, defaultFakeRowCount);
EncodedValue dummyVal(0.0);
EncodedValue lowBound = dummyVal.minMaxValue(col->getType(), TRUE);
EncodedValue highBound = dummyVal.minMaxValue(col->getType(), FALSE);
HistogramSharedPtr emptyHist(new (HISTHEAP) Histogram(HISTHEAP));
HistInt newFirstHistInt(lowBound, FALSE);
HistInt newSecondHistInt(highBound, TRUE);
newSecondHistInt.setCardAndUec(defaultFakeRowCount,
defaultFakeUec);
emptyHist->insert(newFirstHistInt);
emptyHist->insert(newSecondHistInt);
ComUID histid(NA_JulianTimestamp());
ColStatsSharedPtr fakeColStats(
new (HISTHEAP) ColStats(histid,
defaultFakeUec,
defaultFakeRowCount,
defaultFakeRowCount,
col->isUnique(),
FALSE,
emptyHist,
FALSE,
1.0,
1.0,
-1, // avg varchar size
HISTHEAP));
fakeColStats->setFakeHistogram(TRUE);
fakeColStats->setOrigFakeHist(TRUE);
fakeColStats->setMinValue(lowBound);
fakeColStats->setMaxValue(highBound);
fakeColStats->statColumns().insert(col);
colStats_->insert(fakeColStats);
}
setStatsFetched(TRUE);
setOriginalRowCount(defaultFakeRowCount);
return (*colStats_);
}
NABoolean NATable::rowsArePacked() const
{
// If one fileset is packed, they all are
return (getVerticalPartitionList().entries() &&
getVerticalPartitionList()[0]->isPacked());
}
// MV
// Read materialized view information from the catalog manager.
MVInfoForDML *NATable::getMVInfo(BindWA *bindWA)
{
return mvInfo_;
}
// MV
// An MV is usable unly when it is initialized and not unavailable.
// If not initialized, keep a list and report error at runtime.
NABoolean NATable::verifyMvIsInitializedAndAvailable(BindWA *bindWA) const
{
CMPASSERT(isAnMV());
const ComMvAttributeBitmap& bitmap = getMvAttributeBitmap();
// First check if the table is Unavailable.
NAString value;
if (bitmap.getIsMvUnAvailable())
{
// 12312 Materialized View $0~TableName is unavailable.
*CmpCommon::diags() << DgSqlCode(-12312)
<< DgTableName(getTableName().getQualifiedNameAsString());
bindWA->setErrStatus();
return TRUE;
}
// if the mv is uninitialized,
// add it to the uninitializedMvList in the BindWA
if (bitmap.getIsMvUnInitialized())
{
// get physical and ansi names
NAString fileName(
getClusteringIndex()->getFileSetName().getQualifiedNameAsString(),
bindWA->wHeap() );
NAString ansiName( getTableName().getQualifiedNameAsAnsiString(),
bindWA->wHeap() );
// get physical and ansi name
bindWA->addUninitializedMv(
convertNAString( fileName, bindWA->wHeap() ),
convertNAString( ansiName, bindWA->wHeap() ) );
}
return FALSE;
}
// Return value: TRUE, found an index or constr. FALSE, not found.
// explicitIndex: get explicitly created index
// uniqueIndex: TRUE, get unique index. FALSE, any index.
//
// primaryKeyOnly: TRUE, get primary key
// indexName: return index name, if passed in
// lookForSameSequenceOfCols: TRUE, look for an index in which the
// columns appear in the same sequence
// as in inputCols (whether they are ASC or
// DESC doesn't matter).
// FALSE, accept any index that has the
// same columns, in any sequence.
NABoolean NATable::getCorrespondingIndex(NAList<NAString> &inputCols,
NABoolean lookForExplicitIndex,
NABoolean lookForUniqueIndex,
NABoolean lookForPrimaryKey,
NABoolean lookForAnyIndexOrPkey,
NABoolean lookForSameSequenceOfCols,
NABoolean excludeAlwaysComputedSystemCols,
NAString *indexName)
{
NABoolean indexFound = FALSE;
CollIndex numInputCols = inputCols.entries();
if (numInputCols == 0)
{
lookForPrimaryKey = TRUE;
lookForUniqueIndex = FALSE;
lookForAnyIndexOrPkey = FALSE;
}
Lng32 numBTpkeys = getClusteringIndex()->getIndexKeyColumns().entries();
const NAFileSetList &indexList = getIndexList();
for (Int32 i = 0; (NOT indexFound && (i < indexList.entries())); i++)
{
NABoolean isPrimaryKey = FALSE;
NABoolean isUniqueIndex = FALSE;
const NAFileSet * naf = indexList[i];
if (naf->getKeytag() == 0)
isPrimaryKey = TRUE;
else if (naf->uniqueIndex())
isUniqueIndex = TRUE;
if ((NOT lookForPrimaryKey) && (isPrimaryKey))
continue;
NABoolean found = FALSE;
if (lookForAnyIndexOrPkey)
found = TRUE;
else if (lookForPrimaryKey && isPrimaryKey)
found = TRUE;
else if (lookForUniqueIndex && isUniqueIndex)
found = TRUE;
if (found)
{
if (lookForExplicitIndex) // need an explicit index to match.
{
if ((naf->isCreatedExplicitly()) ||
(isPrimaryKey))
found = TRUE;
else
found = FALSE;
}
}
if (NOT found)
continue;
Int32 numMatchedCols = 0;
NABoolean allColsMatched = TRUE;
if (numInputCols > 0)
{
const NAColumnArray &nacArr = naf->getIndexKeyColumns();
Lng32 numKeyCols = naf->getCountOfColumns(
TRUE, // exclude non-key cols
!isPrimaryKey, // exclude cols other than user-specified index cols
FALSE, // don't exclude all system cols like SYSKEY
excludeAlwaysComputedSystemCols);
// compare # of columns first and disqualify the index
// if it doesn't have the right number of columns
if (numInputCols != numKeyCols)
continue;
// compare individual key columns with the provided input columns
for (Int32 j = 0; j < nacArr.entries() && allColsMatched; j++)
{
NAColumn *nac = nacArr[j];
// exclude the same types of columns that we excluded in
// the call to naf->getCountOfColumns() above
if (!isPrimaryKey &&
nac->getIndexColName() == nac->getColName())
continue;
if (excludeAlwaysComputedSystemCols &&
nac->isComputedColumnAlways() && nac->isSystemColumn())
continue;
const NAString &keyColName = nac->getColName();
NABoolean colFound = FALSE;
// look up the key column name in the provided input columns
if (lookForSameSequenceOfCols)
{
// in this case we know exactly where to look
colFound = (keyColName == inputCols[numMatchedCols]);
}
else
for (Int32 k = 0; !colFound && k < numInputCols; k++)
{
if (keyColName == inputCols[k])
colFound = TRUE;
} // loop over provided input columns
if (colFound)
numMatchedCols++;
else
allColsMatched = FALSE;
} // loop over key columns of the index
if (allColsMatched)
{
// just checking that the above loop and
// getCountOfColumns() don't disagree
CMPASSERT(numMatchedCols == numKeyCols);
indexFound = TRUE;
}
} // inputCols specified
else
indexFound = TRUE; // primary key, no input cols specified
if (indexFound)
{
if (indexName)
{
*indexName = naf->getExtFileSetName();
}
}
} // loop over indexes of the table
return indexFound;
}
NABoolean NATable::getCorrespondingConstraint(NAList<NAString> &inputCols,
NABoolean uniqueConstr,
NAString *constrName,
NABoolean * isPkey,
NAList<int> *reorderList)
{
NABoolean constrFound = FALSE;
NABoolean lookForPrimaryKey = (inputCols.entries() == 0);
const AbstractRIConstraintList &constrList =
(uniqueConstr ? getUniqueConstraints() : getRefConstraints());
if (isPkey)
*isPkey = FALSE;
for (Int32 i = 0; (NOT constrFound && (i < constrList.entries())); i++)
{
AbstractRIConstraint *ariConstr = constrList[i];
if (uniqueConstr && (ariConstr->getOperatorType() != ITM_UNIQUE_CONSTRAINT))
continue;
if (lookForPrimaryKey && (NOT ((UniqueConstraint*)ariConstr)->isPrimaryKeyConstraint()))
continue;
if ((NOT uniqueConstr) && (ariConstr->getOperatorType() != ITM_REF_CONSTRAINT))
continue;
if (NOT lookForPrimaryKey)
{
Int32 numUniqueCols = 0;
NABoolean allColsMatched = TRUE;
NABoolean reorderNeeded = FALSE;
if (reorderList)
reorderList->clear();
for (Int32 j = 0; j < ariConstr->keyColumns().entries() && allColsMatched; j++)
{
// The RI constraint contains a dummy NAColumn, get to the
// real one to test for computed columns
NAColumn *nac = getNAColumnArray()[ariConstr->keyColumns()[j]->getPosition()];
if (nac->isComputedColumnAlways() && nac->isSystemColumn())
// always computed system columns in the key are redundant,
// don't include them (also don't include them in the DDL)
continue;
const NAString &uniqueColName = (ariConstr->keyColumns()[j])->getColName();
NABoolean colFound = FALSE;
// compare the unique column name to the provided input columns
for (Int32 k = 0; !colFound && k < inputCols.entries(); k++)
if (uniqueColName == inputCols[k])
{
colFound = TRUE;
numUniqueCols++;
if (reorderList)
reorderList->insert(k);
if (j != k)
// inputCols and key columns come in different order
// (order/sequence of column names, ignoring ASC/DESC)
reorderNeeded = TRUE;
}
if (!colFound)
allColsMatched = FALSE;
}
if (inputCols.entries() == numUniqueCols && allColsMatched)
{
constrFound = TRUE;
if (reorderList && !reorderNeeded)
reorderList->clear();
}
}
else
{
// found the primary key constraint we were looking for
constrFound = TRUE;
}
if (constrFound)
{
if (constrName)
{
*constrName = ariConstr->getConstraintName().getQualifiedNameAsAnsiString();
}
if (isPkey)
{
if ((uniqueConstr) && (((UniqueConstraint*)ariConstr)->isPrimaryKeyConstraint()))
*isPkey = TRUE;
}
}
else
if (reorderList)
reorderList->clear();
}
return constrFound;
}
void NATable::setupPrivInfo()
{
Int32 thisUserID = ComUser::getCurrentUser();
NAString privMDLoc = CmpSeabaseDDL::getSystemCatalogStatic();
privMDLoc += ".\"";
privMDLoc += SEABASE_PRIVMGR_SCHEMA;
privMDLoc += "\"";
PrivMgrCommands privInterface(privMDLoc.data(), CmpCommon::diags(),PrivMgr::PRIV_INITIALIZED);
if (privInterface.isPrivMgrTable(
qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString().data()))
{
isSeabasePrivSchemaTable_ = TRUE;
return;
}
privInfo_ = new(heap_) PrivMgrUserPrivs;
if ((!isSeabaseTable() && !isHiveTable()) ||
!CmpCommon::context()->isAuthorizationEnabled() ||
isVolatileTable() ||
ComUser::isRootUserID()||
ComUser::getCurrentUser() == owner_)
{
privInfo_->setOwnerDefaultPrivs();
return;
}
std::vector <ComSecurityKey *> secKeyVec;
bool testError = false;
#ifndef NDEBUG
char *tpie = getenv("TEST_PRIV_INTERFACE_ERROR");
if (tpie && *tpie == '1')
testError = true;
#endif
// use embedded compiler.
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return;
}
if (testError || (STATUS_GOOD !=
privInterface.getPrivileges(objectUid().get_value(), objectType_,
thisUserID, *privInfo_, &secKeyVec)))
{
if (testError)
#ifndef NDEBUG
*CmpCommon::diags() << DgSqlCode(-8142) <<
DgString0("TEST_PRIV_INTERFACE_ERROR") << DgString1(tpie) ;
#else
abort();
#endif
NADELETE(privInfo_, PrivMgrUserPrivs, heap_);
privInfo_ = NULL;
cmpSBD.switchBackCompiler();
return;
}
CMPASSERT (privInfo_);
cmpSBD.switchBackCompiler();
for (std::vector<ComSecurityKey*>::iterator iter = secKeyVec.begin();
iter != secKeyVec.end();
iter++)
{
// Insertion of the dereferenced pointer results in NASet making
// a copy of the object, and then we delete the original.
secKeySet_.insert(**iter);
delete *iter;
}
}
// Query the metadata to find the object uid of the table. This is used when
// the uid for a metadata table is requested, since 0 is usually stored for
// these tables.
//
Int64 NATable::lookupObjectUid()
{
QualifiedName qualName = getExtendedQualName().getQualifiedNameObj();
objectUID_ = lookupObjectUidByName(qualName, objectType_, FALSE);
if (objectUID_ <= 0 && CmpCommon::diags()->mainSQLCODE() >= 0)
// object not found, no serious error
objectUID_ = 0;
return objectUID_.get_value();
}
bool NATable::isEnabledForDDLQI() const
{
if (isSeabaseMD_ || isSMDTable_ || (getSpecialType() == ExtendedQualName::VIRTUAL_TABLE))
return false;
else
{
if (objectUID_.get_value() == 0)
{
// Looking up object UIDs at code-gen time was shown to cause
// more than 10% performance regression in YCSB benchmark. In
// that investigation, we learned that metadata and histogram
// NATables would have no object UID at code-gen and would
// require the lookup. We're pretty sure these are the only
// types of tables but will abend here otherwise. If this
// causes problems, the envvar below can be used as a
// temporary workaround.
char *noAbendOnLp1398600 = getenv("NO_ABEND_ON_LP_1398600");
if (!noAbendOnLp1398600 || *noAbendOnLp1398600 == '0')
abort();
}
return true;
}
}
NATable::~NATable()
{
// remove the map entries of associated table identifers in
// NAClusterInfo::tableToClusterMap_.
CMPASSERT(gpClusterInfo);
NAColumn *col;
NABoolean delHeading = ActiveSchemaDB()->getNATableDB()->cachingMetaData();
const LIST(CollIndex) & tableIdList = getTableIdList();
if (privInfo_)
{
NADELETE(privInfo_, PrivMgrUserPrivs, heap_);
privInfo_ = NULL;
}
if (! isHive_) {
for (int i = 0 ; i < colcount_ ; i++) {
col = (NAColumn *)colArray_[i];
if (delHeading) {
if (col->getDefaultValue())
NADELETEBASIC(col->getDefaultValue(), heap_);
if (col->getHeading())
NADELETEBASIC(col->getHeading(), heap_);
if (col->getComputedColumnExprString())
NADELETEBASIC(col->getComputedColumnExprString(),heap_);
}
NADELETE(col->getType(), NAType, heap_);
NADELETE(col, NAColumn, heap_);
}
colArray_.clear();
}
if (parentTableName_ != NULL)
{
NADELETEBASIC(parentTableName_, heap_);
parentTableName_ = NULL;
}
if (snapshotName_ != NULL)
{
NADELETEBASIC(snapshotName_, heap_);
snapshotName_ = NULL;
}
if (viewText_ != NULL)
{
NADELETEBASIC(viewText_, heap_);
viewText_ = NULL;
}
if (viewCheck_ != NULL)
{
NADELETEBASIC(viewCheck_, heap_);
viewCheck_ = NULL;
}
if (viewFileName_ != NULL)
{
NADELETEBASIC(viewFileName_, heap_);
viewFileName_ = NULL;
}
if (prototype_ != NULL)
{
NADELETE(prototype_, HostVar, heap_);
prototype_ = NULL;
}
if (sgAttributes_ != NULL)
{
NADELETE(sgAttributes_, SequenceGeneratorAttributes, heap_);
sgAttributes_ = NULL;
}
// clusteringIndex_ is part of indexes - No need to delete clusteringIndex_
CollIndex entryCount = indexes_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE(indexes_[i], NAFileSet, heap_);
}
indexes_.clear();
entryCount = vertParts_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE(vertParts_[i], NAFileSet, heap_);
}
vertParts_.clear();
entryCount = newColumns_.entries();
for (int i = 0 ; i < entryCount ; i++) {
col = (NAColumn *)newColumns_[i];
NADELETE(col, NAColumn, heap_);
}
newColumns_.clear();
entryCount = checkConstraints_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE(checkConstraints_[i], CheckConstraint, heap_);
}
checkConstraints_.clear();
entryCount = uniqueConstraints_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE((UniqueConstraint *)uniqueConstraints_[i], UniqueConstraint, heap_);
}
uniqueConstraints_.clear();
entryCount = refConstraints_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE((RefConstraint *)refConstraints_[i], RefConstraint, heap_);
}
refConstraints_.clear();
entryCount = mvsUsingMe_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE(mvsUsingMe_[i], UsingMvInfo, heap_);
}
mvsUsingMe_.clear();
// mvInfo_ is not used at all
// tableIDList_ is list of ints - No need to delete the entries
// colStats_ and colsWithMissingStats_ comes from STMTHEAP
// secKeySet_ is the set that holds ComSecurityKeySet object itself
}
void NATable::resetAfterStatement() // ## to be implemented?
{
if(resetAfterStatement_)
return;
//It is not clear to me whether virtual tables and resource forks
//(any "special" table type) can/should be reused. Maybe certain
//types can; I just have no idea right now. But as we're not reading
//metadata tables for them anyway, there seems little savings in
//caching them; perhaps we should just continue to build them on the fly.
//
//All the real metadata in NATable members can stay as it is.
//But there are a few pieces of for-this-query-only data:
referenceCount_ = 0;
refsIncompatibleDP2Halloween_ = FALSE;
isHalloweenTable_ = FALSE;
//And we now optimize/filter/reduce histogram statistics for each
//individual query, so stats and adminicular structures must be reset:
statsFetched_ = FALSE;
//set this to NULL, the object pointed to by mvInfo_ is on the
//statement heap, for the next statement this will be set again
//this is set in 'MVInfoForDML *NATable::getMVInfo' which is called
//in the binder after the construction of the NATable. Therefore
//This will be set for every statement
mvInfo_ = NULL;
//delete/clearAndDestroy colStats_
//set colStats_ pointer to NULL the object itself is deleted when
//the statement heap is disposed at the end of a statement
colStats_ = NULL;
//mark table as unaccessed for following statements
accessedInCurrentStatement_ = FALSE;
//for (i in colArray_) colArray_[i]->setReferenced(FALSE);
for (UInt32 i = 0; i < colArray_.entries(); i++)
{
//reset each NAColumn
if(colArray_[i])
colArray_[i]->resetAfterStatement();
}
//reset the clustering index
if(clusteringIndex_)
clusteringIndex_->resetAfterStatement();
//reset the fileset for indices
for (UInt32 j=0; j < indexes_.entries(); j++)
{
//reset the fileset for each index
if(indexes_[j])
indexes_[j]->resetAfterStatement();
}
//reset the fileset for each vertical partition
for (UInt32 k=0; k < vertParts_.entries(); k++)
{
//reset the fileset for each index
if(vertParts_[k])
vertParts_[k]->resetAfterStatement();
}
// reset the pointers (keyColumns_ in refConstraintsReferencingMe)
// that are referencing the NATable of the 'other table'.
uniqueConstraints_.resetAfterStatement();
// reset the pointers (keyColumns_ in uniqueConstraintsReferencedByMe_)
// that are referencing the NATable of the 'other table'.
refConstraints_.resetAfterStatement();
colsWithMissingStats_ = NULL;
resetAfterStatement_ = TRUE;
setupForStatement_ = FALSE;
sizeAfterLastStatement_ = heap_->getAllocSize();
return;
}
void NATable::setupForStatement()
{
if(setupForStatement_)
return;
if( NOT qualifiedName_.isSpecialTable() )
gpClusterInfo->setMaxOSV(qualifiedName_.getQualifiedNameObj(), osv_);
//reset the clustering index
if(clusteringIndex_)
clusteringIndex_->setupForStatement();
//reset the fileset for indices
for (UInt32 i=0; i < indexes_.entries(); i++)
{
//reset the fileset for each index
if(indexes_[i])
indexes_[i]->setupForStatement();
}
//reset the fileset for each vertical partition
for (UInt32 j=0; j < vertParts_.entries(); j++)
{
//reset the fileset for each index
if(vertParts_[j])
vertParts_[j]->setupForStatement();
}
// We are doing this here, as we want this to be maintained on a per statement basis
colsWithMissingStats_ = new (STMTHEAP) NAHashDictionary<CollIndexSet, Int32>
(&(hashColPosList),107,TRUE,STMTHEAP);
setupForStatement_ = TRUE;
resetAfterStatement_ = FALSE;
return;
}
static void formatPartitionNameString(const NAString &tbl,
const NAString &pName,
NAString &fmtOut)
{
fmtOut = NAString("(TABLE ") + tbl +
", PARTITION " + pName + ")";
}
static void formatPartitionNumberString(const NAString &tbl,
Lng32 pNumber,
NAString &fmtOut)
{
char buf[10];
sprintf(buf, "%d", pNumber);
fmtOut = NAString("(TABLE ") + tbl +
", PARTITION NUMBER " + buf + ")";
}
NABoolean NATable::filterUnusedPartitions(const PartitionClause& pClause)
{
if (pClause.isEmpty())
return TRUE;
if (getViewText())
{
*CmpCommon::diags()
<< DgSqlCode(-1276)
<< DgString0(pClause.getPartitionName())
<< DgTableName(getTableName().getQualifiedNameAsString());
return TRUE;
}
if ((pClause.partnNumSpecified() && pClause.getPartitionNumber() < 0) ||
(pClause.partnNameSpecified() && IsNAStringSpaceOrEmpty(pClause.getPartitionName())))
// Partion Number specified is less than zero or name specified was all blanks.
return TRUE ;
CMPASSERT(indexes_.entries() > 0);
NAFileSet* baseTable = indexes_[0];
PartitioningFunction* oldPartFunc = baseTable->getPartitioningFunction();
CMPASSERT(oldPartFunc);
const NodeMap* oldNodeMap = oldPartFunc->getNodeMap();
CMPASSERT(oldNodeMap);
const NodeMapEntry* oldNodeMapEntry = NULL;
PartitioningFunction* newPartFunc = NULL;
if (pClause.partnRangeSpecified())
{
/* if (NOT oldPartFunc->isAHash2PartitioningFunction())
{
// ERROR 1097 Unable to find specified partition...
*CmpCommon::diags()
<< DgSqlCode(-1097)
<< DgString0("")
<< DgTableName(getTableName().getQualifiedNameAsAnsiString());
return TRUE;
}
*/
NAString errorString;
// partition range specified
if ((pClause.getBeginPartitionNumber() == -1) ||
((pClause.getBeginPartitionNumber() > 0) &&
(oldPartFunc->getCountOfPartitions() >= pClause.getBeginPartitionNumber())))
{
oldPartFunc->setRestrictedBeginPartNumber(
pClause.getBeginPartitionNumber());
}
else
{
formatPartitionNumberString(
getTableName().getQualifiedNameAsAnsiString(),
pClause.getBeginPartitionNumber(), errorString);
}
if ((pClause.getEndPartitionNumber() == -1) ||
((pClause.getEndPartitionNumber() > 0) &&
(oldPartFunc->getCountOfPartitions() >= pClause.getEndPartitionNumber())))
{
oldPartFunc->setRestrictedEndPartNumber(
pClause.getEndPartitionNumber());
}
else
{
formatPartitionNumberString(
getTableName().getQualifiedNameAsAnsiString(),
pClause.getEndPartitionNumber(), errorString);
}
if (NOT errorString.isNull())
{
// ERROR 1097 Unable to find specified partition...
*CmpCommon::diags()
<< DgSqlCode(-1097)
<< DgString0(errorString)
<< DgTableName(getTableName().getQualifiedNameAsAnsiString());
return TRUE;
} // Unable to find specified partition.
} // partition range specified
else
{
// single partition specified
if (pClause.getPartitionNumber() >= 0) // PARTITION NUMBER was specified
{
if ((pClause.getPartitionNumber() > 0) &&
(oldPartFunc->getCountOfPartitions() >= pClause.getPartitionNumber()))
oldNodeMapEntry = oldNodeMap->getNodeMapEntry(pClause.getPartitionNumber()-1);
else
{
NAString errorString;
formatPartitionNumberString(getTableName().getQualifiedNameAsAnsiString(),
pClause.getPartitionNumber(), errorString);
// ERROR 1097 Unable to find specified partition...
*CmpCommon::diags()
<< DgSqlCode(-1097)
<< DgString0(errorString)
<< DgTableName(getTableName().getQualifiedNameAsAnsiString());
return TRUE;
} // Unable to find specified partition.
}
else // PARTITION NAME was specified
{
for (CollIndex i =0; i < oldNodeMap->getNumEntries(); i++)
{
oldNodeMapEntry = oldNodeMap->getNodeMapEntry(i);
if (oldNodeMapEntry->getGivenName() == pClause.getPartitionName())
break;
if ( i == (oldNodeMap->getNumEntries() -1)) // match not found
{
NAString errorString;
formatPartitionNameString(getTableName().getQualifiedNameAsAnsiString(),
pClause.getPartitionName(), errorString);
// ERROR 1097 Unable to find specified partition...
*CmpCommon::diags()
<< DgSqlCode(-1097)
<< DgString0(errorString)
<< DgTableName(getTableName().getQualifiedNameAsAnsiString());
return TRUE;
}
}
}
if (!isHbaseTable())
{
// Create DP2 node map for partitioning function with only the partition requested
NodeMap* newNodeMap = new (heap_) NodeMap(heap_);
NodeMapEntry newEntry((char *)oldNodeMapEntry->getPartitionName(),
(char *)oldNodeMapEntry->getGivenName(),
heap_,oldNodeMap->getTableIdent());
newNodeMap->setNodeMapEntry(0,newEntry,heap_);
newNodeMap->setTableIdent(oldNodeMap->getTableIdent());
/* if (oldPartFunc->getPartitioningFunctionType() ==
PartitioningFunction::ROUND_ROBIN_PARTITIONING_FUNCTION)
{
// For round robin partitioning, must create the partitioning function
// even for one partition, since the SYSKEY must be generated for
// round robin and this is trigger off the partitioning function.
newPartFunc = new (heap) RoundRobinPartitioningFunction(1, newNodeMap, heap_);
}
else */
newPartFunc = new (heap_) SinglePartitionPartitioningFunction(newNodeMap, heap_);
baseTable->setPartitioningFunction(newPartFunc);
baseTable->setCountOfFiles(1);
baseTable->setHasRemotePartitions(checkRemote(NULL,
(char *)oldNodeMapEntry->getPartitionName()));
// for now we are not changing indexlevels_ It could potentially be larger than the
// number of index levels for the requested partition.
QualifiedName physicalName(oldNodeMapEntry->getPartitionName(),
1, heap_, NULL);
baseTable->setFileSetName(physicalName);
}
else
{
// For HBase tables, we attach a predicate to select a single partition in Scan::bindNode
oldPartFunc->setRestrictedBeginPartNumber(pClause.getPartitionNumber());
oldPartFunc->setRestrictedEndPartNumber(pClause.getPartitionNumber());
}
} // single partition specified
return FALSE;
}
const LIST(CollIndex) &
NATable::getTableIdList() const
{
return tableIdList_;
}
void NATable::resetReferenceCount()
{
referenceCount_ = 0;
refsIncompatibleDP2Halloween_ = FALSE;
isHalloweenTable_ = FALSE;
}
void NATable::decrReferenceCount()
{
--referenceCount_;
if (referenceCount_ == 0)
{
refsIncompatibleDP2Halloween_ = FALSE;
isHalloweenTable_ = FALSE;
}
}
CollIndex NATable::getUserColumnCount() const
{
CollIndex result = 0;
for (CollIndex i=0; i<colArray_.entries(); i++)
if (colArray_[i]->isUserColumn())
result++;
return result;
}
// NATableDB function definitions
NATable * NATableDB::get(const ExtendedQualName* key, BindWA* bindWA, NABoolean findInCacheOnly)
{
//get the cached NATable entry
NATable * cachedNATable =
NAKeyLookup<ExtendedQualName,NATable>::get(key);
//entry not found in cache
if(!cachedNATable)
return NULL;
//This flag determines if a cached object should be deleted and
//reconstructed
NABoolean removeEntry = FALSE;
if ( cachedNATable->isHbaseTable() ) {
const NAFileSet* naSet = cachedNATable -> getClusteringIndex();
if ( naSet ) {
PartitioningFunction* pf = naSet->getPartitioningFunction();
if ( pf ) {
NABoolean rangeSplitSaltedTable =
CmpCommon::getDefault(HBASE_HASH2_PARTITIONING) == DF_OFF ||
(bindWA && bindWA->isTrafLoadPrep());
// if force to range partition a salted table, and the salted table is
// not a range, do not return the cached object.
if ( rangeSplitSaltedTable &&
cachedNATable->hasSaltedColumn() &&
pf->castToHash2PartitioningFunction() ) {
removeEntry = TRUE;
} else
// if force to hash2 partition a salted table, and the cached table is
// not a hash2, do not return the cached object.
if (
CmpCommon::getDefault(HBASE_HASH2_PARTITIONING) != DF_OFF &&
cachedNATable->hasSaltedColumn() &&
pf->castToHash2PartitioningFunction() == NULL
)
removeEntry = TRUE;
}
}
}
// the reload cqd will be set during aqr after compiletime and runtime
// timestamp mismatch is detected.
// If set, reload hive metadata.
if ((cachedNATable->isHiveTable()) &&
(CmpCommon::getDefault(HIVE_DATA_MOD_CHECK) == DF_ON) &&
(CmpCommon::getDefault(TRAF_RELOAD_NATABLE_CACHE) == DF_ON))
{
removeEntry = TRUE;
}
//Found in cache. If that's all the caller wanted, return now.
if ( !removeEntry && findInCacheOnly )
return cachedNATable;
//if this is the first time this cache entry has been accessed
//during the current statement
if( !removeEntry && !cachedNATable->accessedInCurrentStatement())
{
//Note: cachedNATable->labelDisplayKey_ won't not be NULL
//for NATable Objects that are in the cache. If the object
//is not a cached object from a previous statement then we
//will not come into this code.
//Read label to get time of last catalog operation
short error = 0;
//Get redef time of table
const Int64 tableRedefTime = cachedNATable->getRedefTime();
//Get last catalog operation time
Int64 labelCatalogOpTime = tableRedefTime;
Int64 rforkCatalogOpTime = tableRedefTime;
Int64 currentSchemaRedefTS = 0;
Int64 cachedSchemaRedefTS = 0;
if (!OSIM_runningSimulation())
{
if ((!cachedNATable->isHiveTable()) &&
(!cachedNATable->isHbaseTable()))
{
} // non-hive table
else if (!cachedNATable->isHbaseTable())
{
// oldest cache entries we will still accept
// Values for CQD HIVE_METADATA_REFRESH_INTERVAL:
// -1: Never invalidate any metadata
// 0: Always check for the latest metadata in the compiler,
// no check in the executor
// >0: Check in the compiler, metadata is valid n seconds
// (n = value of CQD). Recompile plan after n seconds.
// NOTE: n has to be long enough to compile the statement,
// values < 20 or so are impractical.
Int64 refreshInterval =
(Int64) CmpCommon::getDefaultLong(HIVE_METADATA_REFRESH_INTERVAL);
Int32 defaultStringLen =
CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH);
Int32 defaultStringLenInBytes =
CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH_IN_BYTES);
if(defaultStringLenInBytes != 32000)
defaultStringLen = defaultStringLenInBytes;
Int64 expirationTimestamp = refreshInterval;
NAString defSchema =
ActiveSchemaDB()->getDefaults().getValue(HIVE_DEFAULT_SCHEMA);
defSchema.toUpper();
if (refreshInterval > 0)
expirationTimestamp = NA_JulianTimestamp() - 1000000 * refreshInterval;
// if default string length changed, don't reuse this entry
if (defaultStringLen != cachedNATable->getHiveDefaultStringLen())
removeEntry = TRUE;
QualifiedName objName = cachedNATable->getTableName();
NAString sName = objName.getSchemaName();
const NAString tName = objName.getObjectName();
// map the Trafodion default Hive schema (usually "HIVE")
// to the name used in Hive (usually "default")
if (objName.getUnqualifiedSchemaNameAsAnsiString() == defSchema)
sName = hiveMetaDB_->getDefaultSchemaName();
// validate Hive table timestamps
if (!hiveMetaDB_->validate(cachedNATable->getHiveTableId(),
cachedNATable->getRedefTime(),
sName.data(), tName.data()))
removeEntry = TRUE;
// validate HDFS stats and update them in-place, if needed
if (!removeEntry)
removeEntry =
! (cachedNATable->getClusteringIndex()->
getHHDFSTableStats()->validateAndRefresh(expirationTimestamp));
}
} // ! osim simulation
//if time of last catalog operation and table redef times
//don't match, then delete this cache entry since it is
//stale.
//if error is non-zero then we were not able to read file
//label and therefore delete this cached entry because
//we cannot ensure it is fresh.
if((CmpCommon::statement()->recompiling())||
(labelCatalogOpTime != tableRedefTime )||
(error)||
(currentSchemaRedefTS != cachedSchemaRedefTS) ||
(!usingCache()) ||
(refreshCacheInThisStatement_) ||
(removeEntry == TRUE)) // to avoid unnecessary read of metadata
{
//mark this entry to be removed
removeEntry = TRUE;
}
} // !cachedNATable->accessedInCurrentStatement()
if(removeEntry)
{
//remove from list of cached NATables
cachedTableList_.remove(cachedNATable);
//remove pointer to NATable from cache
remove(key);
//if metadata caching is ON, then adjust cache size
//since we are deleting a caching entry
if(cacheMetaData_)
currentCacheSize_ = heap_->getAllocSize();
//insert into list of tables that will be deleted
//at the end of the statement after the query has
//been compiled and the plan has been sent to the
//executor. The delete is done in method
//NATableDB::resetAfterStatement(). This basically
//gives a little performance saving because the delete
//won't be part of the compile time as perceived by the
//client of the compiler
tablesToDeleteAfterStatement_.insert(cachedNATable);
return NULL;
}
else {
// Special tables are not added to the statement table list.
if( (NOT cachedNATable->getExtendedQualName().isSpecialTable()) ||
(cachedNATable->getExtendedQualName().getSpecialType() ==
ExtendedQualName::MV_TABLE) ||
(cachedNATable->getExtendedQualName().getSpecialType() ==
ExtendedQualName::GHOST_MV_TABLE) ||
(cachedNATable->getExtendedQualName().getSpecialType() ==
ExtendedQualName::GHOST_INDEX_TABLE) ||
(cachedNATable->getExtendedQualName().getSpecialType() ==
ExtendedQualName::INDEX_TABLE)
)
statementTableList_.insert(cachedNATable);
}
//increment the replacement, if not already max
if(cachedNATable)
{
cachedNATable->replacementCounter_+=2;
//don't let replacementcounter go over NATABLE_MAX_REFCOUNT
if(cachedNATable->replacementCounter_ > NATABLE_MAX_REFCOUNT)
cachedNATable->replacementCounter_ = NATABLE_MAX_REFCOUNT;
//Keep track of tables accessed during current statement
if((!cachedNATable->accessedInCurrentStatement()))
{
cachedNATable->setAccessedInCurrentStatement();
statementCachedTableList_.insert(cachedNATable);
}
}
//return NATable from cache
return cachedNATable;
}
// by default column histograms are marked to not be fetched,
// i.e. needHistogram_ is initialized to DONT_NEED_HIST.
// this method will mark columns for appropriate histograms depending on
// where they have been referenced in the query
void NATable::markColumnsForHistograms()
{
// Check if Show Query Stats command is being run
NABoolean runningShowQueryStatsCmd = CmpCommon::context()->showQueryStats();
// we want to get 1 key column that is not SYSKEY
NABoolean addSingleIntHist = FALSE;
if(colArray_.getColumn("SYSKEY"))
addSingleIntHist = TRUE;
// iterate over all the columns in the table
for(UInt32 i=0;i<colArray_.entries();i++)
{
// get a reference to the column
NAColumn * column = colArray_[i];
// is column part of a key
NABoolean isAKeyColumn = (column->isIndexKey() OR column->isPrimaryKey()
OR column->isPartitioningKey());
//check if this column requires histograms
if(column->isReferencedForHistogram() ||
(isAKeyColumn && isHbaseTable()))
column->setNeedFullHistogram();
else
// if column is:
// * a key
// OR
// * isReferenced but not for histogram and addSingleIntHist is true
if (isAKeyColumn ||
((runningShowQueryStatsCmd || addSingleIntHist) &&
column->isReferenced() && !column->isReferencedForHistogram()))
{
// if column is not a syskey
if (addSingleIntHist && (column->getColName() != "SYSKEY"))
addSingleIntHist = FALSE;
column->setNeedCompressedHistogram();
}
else
if (column->getType()->getVarLenHdrSize() &&
(CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) != DF_OFF ||
CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO) != DF_OFF ))
{
column->setNeedCompressedHistogram();
}
}
}
const QualifiedName& NATable::getFullyQualifiedGuardianName()
{
//qualified name and fileSetName are different
//so we use fileSetName because it will contain
//fully qualified guardian name
QualifiedName * fileName;
if(qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString()
!= fileSetName_.getQualifiedNameAsString())
{
fileName = new(CmpCommon::statementHeap()) QualifiedName
(fileSetName_,CmpCommon::statementHeap());
}
else
{
fileName = new(CmpCommon::statementHeap()) QualifiedName
(qualifiedName_.getQualifiedNameObj(),CmpCommon::statementHeap());
}
return *fileName;
}
ExtendedQualName::SpecialTableType NATable::getTableType()
{
return qualifiedName_.getSpecialType();
}
NABoolean NATable::hasSaltedColumn(Lng32 * saltColPos)
{
for (CollIndex i=0; i<colArray_.entries(); i++ )
{
if ( colArray_[i]->isSaltColumn() )
{
if (saltColPos)
*saltColPos = i;
return TRUE;
}
}
return FALSE;
}
NABoolean NATable::hasDivisioningColumn(Lng32 * divColPos)
{
for (CollIndex i=0; i<colArray_.entries(); i++ )
{
if ( colArray_[i]->isDivisioningColumn() )
{
if (divColPos)
*divColPos = i;
return TRUE;
}
}
return FALSE;
}
// Get the part of the row size that is computable with info we have available
// without accessing HBase. The result is passed to estimateHBaseRowCount(), which
// completes the row size calculation with HBase info.
//
// A row stored in HBase consists of the following fields for each column:
// -----------------------------------------------------------------------
// | Key |Value | Row | Row |Column|Column|Column| Time | Key |Value |
// Field |Length|Length| Key | Key |Family|Family|Qualif| stamp| Type | |
// | | |Length| |Length| | | | | |
// -----------------------------------------------------------------------
// # Bytes 4 4 2 1 8 1
//
// The field lengths calculated here are for Row Key, Column Qualif, and Value.
// The size of the Value fields are not known to HBase, which treats cols as
// untyped, so we add up their lengths here, as well as the row key lengths,
// which are readily accessible via Traf metadata. The qualifiers, which represent
// the names of individual columns, are not the Trafodion column names, but
// minimal binary values that are mapped to the actual column names.
// The fixed size fields could also be added in here, but we defer that to the Java
// side so constants of the org.apache.hadoop.hbase.KeyValue class can be used.
// The single column family used by Trafodion is also a known entity, but we
// again do it in Java using the HBase client interface as insulation against
// possible future changes.
Int32 NATable::computeHBaseRowSizeFromMetaData() const
{
Int32 partialRowSize = 0;
Int32 rowKeySize = 0;
const NAColumnArray& keyCols = clusteringIndex_->getIndexKeyColumns();
CollIndex numKeyCols = keyCols.entries();
// For each column of the table, add the length of its value and the length of
// its name (HBase column qualifier). If a given column is part of the primary
// key, add the length of its value again, because it is part of the HBase row
// key.
for (Int32 colInx=0; colInx<colcount_; colInx++)
{
// Get length of the column qualifier and its data.
NAColumn* col = colArray_[colInx];;
Lng32 colLen = col->getType()->getNominalSize(); // data length
Lng32 colPos = col->getPosition(); // position in table
partialRowSize += colLen;
// The qualifier is not the actual column name, but a binary value
// representing the ordinal position of the col in the table.
// Single byte is used if possible.
partialRowSize++;
if (colPos > 255)
partialRowSize++;
// Add col length again if a primary key column, because it will be part
// of the row key.
NABoolean found = FALSE;
for (CollIndex keyColInx=0; keyColInx<numKeyCols && !found; keyColInx++)
{
if (colPos == keyCols[keyColInx]->getPosition())
{
rowKeySize += colLen;
found = TRUE;
}
}
}
partialRowSize += rowKeySize;
return partialRowSize;
}
// For an HBase table, we can estimate the number of rows by dividing the number
// of KeyValues in all HFiles of the table by the number of columns (with a few
// other considerations).
Int64 NATable::estimateHBaseRowCount() const
{
Int64 estRowCount = 0;
ExpHbaseInterface* ehi = getHBaseInterface();
if (ehi)
{
HbaseStr fqTblName;
NAString tblName = getTableName().getQualifiedNameAsString();
fqTblName.len = tblName.length();
fqTblName.val = new(STMTHEAP) char[fqTblName.len+1];
strncpy(fqTblName.val, tblName.data(), fqTblName.len);
fqTblName.val[fqTblName.len] = '\0';
Int32 partialRowSize = computeHBaseRowSizeFromMetaData();
Lng32 retcode = ehi->estimateRowCount(fqTblName,
partialRowSize,
colcount_,
estRowCount);
NADELETEBASIC(fqTblName.val, STMTHEAP);
// Return 0 as the row count if an error occurred while estimating it.
// The estimate could also be 0 if there is less than 1MB of storage
// dedicated to the table -- no HFiles, and < 1MB in MemStore, for which
// size is reported only in megabytes.
if (retcode < 0)
estRowCount = 0;
delete ehi;
}
return estRowCount;
}
// Method to get hbase regions servers node names
ExpHbaseInterface* NATable::getHBaseInterface() const
{
if (!isHbaseTable() || isSeabaseMDTable() ||
getExtendedQualName().getQualifiedNameObj().getObjectName() == HBASE_HISTINT_NAME ||
getExtendedQualName().getQualifiedNameObj().getObjectName() == HBASE_HIST_NAME ||
getSpecialType() == ExtendedQualName::VIRTUAL_TABLE)
return NULL;
return NATable::getHBaseInterfaceRaw();
}
ExpHbaseInterface* NATable::getHBaseInterfaceRaw()
{
NADefaults* defs = &ActiveSchemaDB()->getDefaults();
const char* server = defs->getValue(HBASE_SERVER);
const char* zkPort = defs->getValue(HBASE_ZOOKEEPER_PORT);
ExpHbaseInterface* ehi = ExpHbaseInterface::newInstance
(STMTHEAP, server, zkPort);
Lng32 retcode = ehi->init(NULL);
if (retcode < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::init()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr().data());
delete ehi;
return NULL;
}
return ehi;
}
NAArray<HbaseStr> *NATable::getRegionsBeginKey(const char* hbaseName)
{
ExpHbaseInterface* ehi = getHBaseInterfaceRaw();
NAArray<HbaseStr> *keyArray = NULL;
if (!ehi)
return NULL;
else
{
keyArray = ehi->getRegionBeginKeys(hbaseName);
delete ehi;
}
return keyArray;
}
NABoolean NATable::getRegionsNodeName(Int32 partns, ARRAY(const char *)& nodeNames ) const
{
ExpHbaseInterface* ehi = getHBaseInterface();
if (!ehi)
return FALSE;
else
{
HbaseStr fqTblName;
CorrName corrName(getTableName());
NAString tblName = (corrName.isHbaseCell() || corrName.isHbaseRow()) ?
corrName.getQualifiedNameObj().getObjectName()
:
getTableName().getQualifiedNameAsString();
fqTblName.len = tblName.length();
fqTblName.val = new(STMTHEAP) char[fqTblName.len+1];
strncpy(fqTblName.val, tblName.data(), fqTblName.len);
fqTblName.val[fqTblName.len] = '\0';
Lng32 retcode = ehi->getRegionsNodeName(fqTblName, partns, nodeNames);
NADELETEBASIC(fqTblName.val, STMTHEAP);
delete ehi;
if (retcode < 0)
return FALSE;
}
return TRUE;
}
// Method to get hbase table index levels and block size
NABoolean NATable::getHbaseTableInfo(Int32& hbtIndexLevels, Int32& hbtBlockSize) const
{
ExpHbaseInterface* ehi = getHBaseInterface();
if (!ehi)
return FALSE;
else
{
HbaseStr fqTblName;
NAString tblName = getTableName().getQualifiedNameAsString();
fqTblName.len = tblName.length();
fqTblName.val = new(STMTHEAP) char[fqTblName.len+1];
strncpy(fqTblName.val, tblName.data(), fqTblName.len);
fqTblName.val[fqTblName.len] = '\0';
Lng32 retcode = ehi->getHbaseTableInfo(fqTblName,
hbtIndexLevels,
hbtBlockSize);
NADELETEBASIC(fqTblName.val, STMTHEAP);
delete ehi;
if (retcode < 0)
return FALSE;
}
return TRUE;
}
// This method is called on a hive NATable.
// If that table has a corresponding external table,
// then this method moves the relevant attributes from
// NATable of external table (etTable) to this.
// Currently, column and clustering key info is moved.
short NATable::updateExtTableAttrs(NATable *etTable)
{
NAFileSet *fileset = this->getClusteringIndex();
NAFileSet *etFileset = etTable->getClusteringIndex();
colcount_ = etTable->getColumnCount();
colArray_ = etTable->getNAColumnArray();
fileset->allColumns_ = etFileset->getAllColumns();
if (NOT etFileset->hasOnlySyskey()) // explicit key was specified
{
keyLength_ = etTable->getKeyLength();
recordLength_ = etTable->getRecordLength();
fileset->keysDesc_ = etFileset->getKeysDesc();
fileset->indexKeyColumns_ = etFileset->getIndexKeyColumns();
fileset->keyLength_ = etFileset->getKeyLength();
fileset->encodedKeyLength_ = etFileset->getEncodedKeyLength();
}
/*
fileset->partitioningKeyColumns_ = etFileset->getPartitioningKeyColumns();
fileset->partFunc_ = etFileset->getPartitioningFunction();
fileset->countOfFiles_ = etFileset->getCountOfFiles();
*/
return 0;
}
// get details of this NATable cache entry
void NATableDB::getEntryDetails(
Int32 ii, // (IN) : NATable cache iterator entry
NATableEntryDetails &details) // (OUT): cache entry's details
{
Int32 NumEnt = cachedTableList_.entries();
if ( ( NumEnt == 0 ) || ( NumEnt <= ii ) )
{
memset(&details, 0, sizeof(details));
}
else {
NATable * object = cachedTableList_[ii];
QualifiedName QNO = object->qualifiedName_.getQualifiedNameObj();
Int32 partLen = QNO.getCatalogName().length();
strncpy(details.catalog, (char *)(QNO.getCatalogName().data()), partLen );
details.catalog[partLen] = '\0';
partLen = QNO.getSchemaName().length();
strncpy(details.schema, (char *)(QNO.getSchemaName().data()), partLen );
details.schema[partLen] = '\0';
partLen = QNO.getObjectName().length();
strncpy(details.object, (char *)(QNO.getObjectName().data()), partLen );
details.object[partLen] = '\0';
details.size = object->sizeInCache_;
}
}
NABoolean NATableDB::isHiveTable(CorrName& corrName)
{
return corrName.isHive();
}
NABoolean NATableDB::isSQUtiDisplayExplain(CorrName& corrName)
{
const char* tblName = corrName.getQualifiedNameObj().getObjectName();
if ( !strcmp(tblName, "EXE_UTIL_DISPLAY_EXPLAIN__"))
return TRUE;
if ( !strcmp(tblName, "EXPLAIN__"))
return TRUE;
if ( !strcmp(tblName, "HIVEMD__"))
return TRUE;
if ( !strcmp(tblName, "DESCRIBE__"))
return TRUE;
if ( !strcmp(tblName, "EXE_UTIL_EXPR__"))
return TRUE;
if ( !strcmp(tblName, "STATISTICS__"))
return TRUE;
return FALSE;
}
NABoolean NATableDB::isSQInternalStoredProcedure(CorrName& corrName)
{
const char* tblName = corrName.getQualifiedNameObj().getObjectName();
if ( !strncmp(tblName, "SPTableOutQUERYCACHEENTRIES",
strlen("SPTableOutQUERYCACHEENTRIES")))
return TRUE;
if ( !strncmp(tblName, "SPTableOutQUERYCACHEDELETE",
strlen("SPTableOutQUERYCACHEDELETE")))
return TRUE;
if ( !strncmp(tblName, "SPTableOutQUERYCACHE",
strlen("SPTableOutQUERYCACHE")))
return TRUE;
if ( !strncmp(tblName, "SPTableOutHYBRIDQUERYCACHEENTRIES",
strlen("SPTableOutHYBRIDQUERYCACHEENTRIES")))
return TRUE;
if ( !strncmp(tblName, "SPTableOutHYBRIDQUERYCACHE",
strlen("SPTableOutHYBRIDQUERYCACHE")))
return TRUE;
return FALSE;
}
NABoolean NATableDB::isSQUmdTable(CorrName& corrName)
{
return FALSE;
}
NATable * NATableDB::get(CorrName& corrName, BindWA * bindWA,
desc_struct *inTableDescStruct){
//check cache to see if a cached NATable object exists
NATable *table = get(&corrName.getExtendedQualNameObj(), bindWA);
if (table && (corrName.isHbase() || corrName.isSeabase()) && inTableDescStruct)
{
remove(table->getKey());
table = NULL;
}
if (table && ((table->isHbaseTable() || table->isSeabaseTable()) &&
!(table->isSeabaseMDTable())))
{
if ((CmpCommon::getDefault(TRAF_RELOAD_NATABLE_CACHE) == DF_ON))
{
remove(table->getKey());
table = NULL;
}
}
if (table && (corrName.isHbaseCell() || corrName.isHbaseRow()))
{
if (NOT HbaseAccess::validateVirtualTableDesc(table))
{
remove(table->getKey());
table = NULL;
}
}
// for caching statistics
if ((cacheMetaData_ && useCache_) && corrName.isCacheable())
{
//One lookup counted
++totalLookupsCount_;
if (table) ++totalCacheHits_; //Cache hit counted
}
NABoolean isMV = (table && table->isAnMV());
if (NOT table ||
(NOT isMV && table->getSpecialType() != corrName.getSpecialType())) {
// in open source, only the SEABASE catalog is allowed.
// Return an error if some other catalog is being used.
if ((NOT corrName.isHbase()) &&
(NOT corrName.isSeabase()) &&
(NOT corrName.isHive()) &&
(corrName.getSpecialType() != ExtendedQualName::VIRTUAL_TABLE))
{
*CmpCommon::diags()
<< DgSqlCode(-1002)
<< DgCatalogName(corrName.getQualifiedNameObj().getCatalogName())
<< DgString0("");
bindWA->setErrStatus();
return NULL;
}
// If this is a 'special' table, generate a table descriptor for it.
//
if (NOT inTableDescStruct && corrName.isSpecialTable())
inTableDescStruct = generateSpecialDesc(corrName);
//Heap used by the NATable object
NAMemory * naTableHeap = CmpCommon::statementHeap();
size_t allocSizeBefore = 0;
//if NATable caching is on check if this table is not already
//in the NATable cache. If it is in the cache create this NATable
//on the statment heap, since the cache can only store one value per
//key, therefore all duplicates (two or more different NATable objects
//that have the same key) are deleted at the end of the statement.
//ALSO
//We don't cache any special tables across statements. Please check
//the class ExtendedQualName for method isSpecialTable to see what
//are special tables
if (((NOT table) && cacheMetaData_ && useCache_) &&
corrName.isCacheable()){
naTableHeap = getHeap();
allocSizeBefore = naTableHeap->getAllocSize();
}
//if table is in cache tableInCache will be non-NULL
//otherwise it is NULL.
NATable * tableInCache = table;
CmpSeabaseDDL cmpSBD((NAHeap *)CmpCommon::statementHeap());
if ((corrName.isHbase() || corrName.isSeabase()) &&
(!isSQUmdTable(corrName)) &&
(!isSQUtiDisplayExplain(corrName)) &&
(!isSQInternalStoredProcedure(corrName))
) {
// ------------------------------------------------------------------
// Create an NATable object for a Trafodion/HBase table
// ------------------------------------------------------------------
desc_struct *tableDesc = NULL;
NABoolean isSeabase = FALSE;
NABoolean isSeabaseMD = FALSE;
NABoolean isUserUpdatableSeabaseMD = FALSE;
NABoolean isHbaseCell = corrName.isHbaseCell();
NABoolean isHbaseRow = corrName.isHbaseRow();
if (isHbaseCell || isHbaseRow)// explicit cell or row format specification
{
const char* extHBaseName = corrName.getQualifiedNameObj().getObjectName();
if (cmpSBD.existsInHbase(extHBaseName) != 1)
{
*CmpCommon::diags()
<< DgSqlCode(-1389)
<< DgString0(corrName.getQualifiedNameObj().getObjectName());
bindWA->setErrStatus();
return NULL;
}
NAArray<HbaseStr> *keyArray = NATable::getRegionsBeginKey(extHBaseName);
tableDesc =
HbaseAccess::createVirtualTableDesc
(corrName.getExposedNameAsAnsiString(FALSE, TRUE).data(),
isHbaseRow, isHbaseCell, keyArray);
deleteNAArray(STMTHEAP, keyArray);
isSeabase = FALSE;
}
else if (corrName.isSeabaseMD())
{
if (corrName.isSpecialTable() && corrName.getSpecialType() == ExtendedQualName::INDEX_TABLE)
{
tableDesc =
cmpSBD.getSeabaseTableDesc(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName(),
COM_INDEX_OBJECT);
}
else
{
tableDesc =
cmpSBD.getSeabaseTableDesc(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName(),
COM_BASE_TABLE_OBJECT);
if (tableDesc)
{
if (cmpSBD.isUserUpdatableSeabaseMD(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName()))
isUserUpdatableSeabaseMD = TRUE;
}
}
isSeabase = TRUE;
isSeabaseMD = TRUE;
}
else if (! inTableDescStruct)
{
ComObjectType objectType = COM_BASE_TABLE_OBJECT;
isSeabase = TRUE;
if (corrName.isSpecialTable())
{
switch (corrName.getSpecialType())
{
case ExtendedQualName::INDEX_TABLE:
{
objectType = COM_INDEX_OBJECT;
break;
}
case ExtendedQualName::SG_TABLE:
{
objectType = COM_SEQUENCE_GENERATOR_OBJECT;
isSeabase = FALSE;
break;
}
case ExtendedQualName::LIBRARY_TABLE:
{
objectType = COM_LIBRARY_OBJECT;
isSeabase = FALSE;
break;
}
default: //TODO: No SpecialTableType for UDFs/Routines/COM_USER_DEFINED_ROUTINE_OBJECT
{
objectType = COM_BASE_TABLE_OBJECT;
}
}
}
tableDesc = cmpSBD.getSeabaseTableDesc(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName(),
objectType);
}
if (inTableDescStruct)
tableDesc = inTableDescStruct;
if (tableDesc)
table = new (naTableHeap)
NATable(bindWA, corrName, naTableHeap, tableDesc);
if (!tableDesc || !table || bindWA->errStatus())
{
if (isSeabase)
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(corrName.getExposedNameAsAnsiString());
else
*CmpCommon::diags()
<< DgSqlCode(-1389)
<< DgString0(corrName.getExposedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
table->setIsHbaseCellTable(isHbaseCell);
table->setIsHbaseRowTable(isHbaseRow);
table->setIsSeabaseTable(isSeabase);
table->setIsSeabaseMDTable(isSeabaseMD);
table->setIsUserUpdatableSeabaseMDTable(isUserUpdatableSeabaseMD);
}
else if (isHiveTable(corrName) &&
(!isSQUmdTable(corrName)) &&
(!isSQUtiDisplayExplain(corrName)) &&
(!corrName.isSpecialTable()) &&
(!isSQInternalStoredProcedure(corrName))
) {
// ------------------------------------------------------------------
// Create an NATable object for a Hive table
// ------------------------------------------------------------------
if ( hiveMetaDB_ == NULL ) {
if (CmpCommon::getDefault(HIVE_USE_FAKE_TABLE_DESC) != DF_ON)
{
hiveMetaDB_ = new (CmpCommon::contextHeap()) HiveMetaData();
if ( !hiveMetaDB_->init() ) {
*CmpCommon::diags() << DgSqlCode(-1190)
<< DgString0(hiveMetaDB_->getErrMethodName())
<< DgString1(hiveMetaDB_->getErrCodeStr())
<< DgString2(hiveMetaDB_->getErrDetail())
<< DgInt0(hiveMetaDB_->getErrCode());
bindWA->setErrStatus();
NADELETEBASIC(hiveMetaDB_, CmpCommon::contextHeap());
hiveMetaDB_ = NULL;
return NULL;
}
}
else
hiveMetaDB_ = new (CmpCommon::contextHeap())
HiveMetaData(); // fake metadata
}
// this default schema name is what the Hive default schema is called in SeaHive
NAString defSchema = ActiveSchemaDB()->getDefaults().getValue(HIVE_DEFAULT_SCHEMA);
defSchema.toUpper();
struct hive_tbl_desc* htbl;
NAString tableNameInt = corrName.getQualifiedNameObj().getObjectName();
NAString schemaNameInt = corrName.getQualifiedNameObj().getSchemaName();
if (corrName.getQualifiedNameObj().getUnqualifiedSchemaNameAsAnsiString() == defSchema)
schemaNameInt = hiveMetaDB_->getDefaultSchemaName();
// Hive stores names in lower case
// Right now, just downshift, could check for mixed case delimited
// identifiers at a later point, or wait until Hive supports delimited identifiers
schemaNameInt.toLower();
tableNameInt.toLower();
if (CmpCommon::getDefault(HIVE_USE_FAKE_TABLE_DESC) == DF_ON)
htbl = hiveMetaDB_->getFakedTableDesc(tableNameInt);
else
htbl = hiveMetaDB_->getTableDesc(schemaNameInt, tableNameInt);
if ( htbl )
{
table = new (naTableHeap) NATable
(bindWA, corrName, naTableHeap, htbl);
// 'table' is the NATable for underlying hive table.
// That table may also have an associated external table.
// Skip processing the external table defn, if only the
// underlying hive table is needed.
if (NOT bindWA->returnHiveTableDefn())
{
// if this hive/orc table has an associated external table,
// get table desc for it.
NAString extName = ComConvertNativeNameToTrafName(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName());
QualifiedName qn(extName, 3);
desc_struct *etDesc = cmpSBD.getSeabaseTableDesc(
qn.getCatalogName(),
qn.getSchemaName(),
qn.getObjectName(),
COM_BASE_TABLE_OBJECT);
if (table && etDesc)
{
CorrName cn(qn);
NATable * etTable = new (naTableHeap) NATable
(bindWA, cn, naTableHeap, etDesc);
// if ext and hive columns dont match, return error
// unless it is a drop stmt.
if ((table->getUserColumnCount() != etTable->getUserColumnCount()) &&
(NOT bindWA->externalTableDrop()))
{
*CmpCommon::diags()
<< DgSqlCode(-8437);
bindWA->setErrStatus();
return NULL;
}
if (etTable->hiveExtColAttrs() || etTable->hiveExtKeyAttrs())
{
// attrs were explicitly specified for this external
// table. Merge them with the hive table attrs.
short rc = table->updateExtTableAttrs(etTable);
if (rc)
{
bindWA->setErrStatus();
return NULL;
}
}
table->setHasHiveExtTable(TRUE);
} // ext table
} // allowExternalTables
} // htbl
else
{
if ((hiveMetaDB_->getErrCode() == 0)||
(hiveMetaDB_->getErrCode() == 100))
{
*CmpCommon::diags()
<< DgSqlCode(-1388)
<< DgTableName(corrName.getExposedNameAsAnsiString());
}
else
{
*CmpCommon::diags()
<< DgSqlCode(-1192)
<< DgString0(hiveMetaDB_->getErrMethodName())
<< DgString1(hiveMetaDB_->getErrCodeStr())
<< DgString2(hiveMetaDB_->getErrDetail())
<< DgInt0(hiveMetaDB_->getErrCode());
hiveMetaDB_->resetErrorInfo();
}
bindWA->setErrStatus();
return NULL;
}
} else
// ------------------------------------------------------------------
// Neither Trafodion nor Hive (probably dead code below)
// ------------------------------------------------------------------
table = new (naTableHeap)
NATable(bindWA, corrName, naTableHeap, inTableDescStruct);
CMPASSERT(table);
//if there was a problem in creating the NATable object
if (NOT ((table->getExtendedQualName().isSpecialTable()) &&
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::SG_TABLE)) &&
(table->getColumnCount() == 0)) {
bindWA->setErrStatus();
return NULL;
}
// Special tables are not added to the statement table list.
// Index tables are added to the statement table list
if( (NOT table->getExtendedQualName().isSpecialTable()) ||
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::INDEX_TABLE) ||
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::MV_TABLE) ||
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::GHOST_MV_TABLE) ||
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::GHOST_INDEX_TABLE)
)
statementTableList_.insert(table);
//if there was no entry in cache associated with this key then
//insert it into cache.
//if there is already a value associated with this in the cache
//then don't insert into cache.
//This might happen e.g. if we call this method twice for the same table
//in the same statement.
if(!tableInCache){
//insert into cache
insert(table);
//if we are using the cache
//if this NATable object is cacheable
if((useCache_) &&
(corrName.isCacheable()))
{
//insert into list of all cached tables;
cachedTableList_.insert(table);
//insert into list of cached tables accessed
//during this statement
statementCachedTableList_.insert(table);
//if metadata caching is ON then adjust the size of the cache
//since we are adding an entry to the cache
if(cacheMetaData_)
{
currentCacheSize_ = heap_->getAllocSize();
table->sizeInCache_ = currentCacheSize_ - allocSizeBefore;
}
//update the high watermark for caching statistics
if (currentCacheSize_ > highWatermarkCache_)
highWatermarkCache_ = currentCacheSize_;
//
// the CompilerTrackingInfo highWaterMark gets reset on each
// tracking interval so it is tracked independently
if (currentCacheSize_ > intervalWaterMark_)
intervalWaterMark_ = currentCacheSize_;
//if we are caching metadata and previously the cache was
//empty set this flag to TRUE to indicate that there is
//something in the cache
if(!metaDataCached_ && cacheMetaData_)
metaDataCached_ = TRUE;
//enforce the cache memory constraints
if(!enforceMemorySpaceConstraints())
{
//was not able to get cache size below
//max allowed cache size
#ifndef NDEBUG
CMPASSERT(FALSE);
#endif
}
}
else{
//this has to be on the context heap since we need
//it after the statement heap has been remove
ExtendedQualName * nonCacheableTableName = new(CmpCommon::contextHeap())
ExtendedQualName(corrName.getExtendedQualNameObj(),
CmpCommon::contextHeap());
//insert into list of names of special tables
nonCacheableTableList_.insert(nonCacheableTableName);
// insert into list of non cacheable table idents. This
// allows the idents to be removed after the statement so
// the context heap doesn't keep growing.
const LIST(CollIndex) & tableIdList = table->getTableIdList();
for(CollIndex i = 0; i < tableIdList.entries(); i++)
{
nonCacheableTableIdents_.insert(tableIdList[i]);
}
}
}
}
//setup this NATable object for use in current statement
//if this object has already been setup earlier in the
//statement then this method will just return without doing
//anything
if(table) {
table->setupForStatement();
}
return table;
}
void NATableDB::removeNATable2(CorrName &corrName, ComQiScope qiScope,
ComObjectType ot)
{
const ExtendedQualName* toRemove = &(corrName.getExtendedQualNameObj());
NAHashDictionaryIterator<ExtendedQualName,NATable> iter(*this);
ExtendedQualName *key = NULL;
NATable *cachedNATable = NULL;
NASet<Int64> objectUIDs(CmpCommon::statementHeap(), 1);
// iterate over all entries and remove the ones that match the name
// ignoring any partition clauses and other additional info
iter.getNext(key,cachedNATable);
while(key)
{
if (key->getQualifiedNameObj() == toRemove->getQualifiedNameObj())
{
//remove from list of cached NATables
if (cachedTableList_.remove(cachedNATable) > 0)
{
// LCOV_EXCL_START - caching is off by default for now
//if metadata caching is ON, then adjust cache size
//since we are deleting a caching entry
if(cacheMetaData_)
currentCacheSize_ = heap_->getAllocSize();
if (cachedNATable->heap_ &&
cachedNATable->heap_ != CmpCommon::statementHeap())
tablesToDeleteAfterStatement_.insert(cachedNATable);
// LCOV_EXCL_STOP
}
else
{
// this must have been a non-cacheable table
const LIST(CollIndex) & tableIdList = cachedNATable->getTableIdList();
for(CollIndex i = 0; i < tableIdList.entries(); i++)
{
nonCacheableTableIdents_.remove(tableIdList[i]);
}
for (CollIndex i=0; i<nonCacheableTableList_.entries(); i++)
{
if (*(nonCacheableTableList_[i]) == *key)
{
nonCacheableTableList_.removeAt(i);
i--;
}
}
}
//remove pointer to NATable from cache
remove(key);
objectUIDs.insert(cachedNATable->objectUid().castToInt64());
statementCachedTableList_.remove(cachedNATable);
statementTableList_.remove(cachedNATable);
}
iter.getNext(key,cachedNATable);
}
// clear out the other users' caches too.
if (qiScope == REMOVE_FROM_ALL_USERS)
{
// There are some scenarios where the affected object
// does not have an NATable cache entry. Need to get one and
// add its objectUID to the set.
if (0 == objectUIDs.entries())
{
Int64 ouid = lookupObjectUidByName(
toRemove->getQualifiedNameObj(),
ot,
FALSE);
if (ouid > 0)
objectUIDs.insert(ouid);
}
Int32 numKeys = objectUIDs.entries();
if (numKeys > 0)
{
SQL_QIKEY qiKeys[numKeys];
for (CollIndex i = 0; i < numKeys; i++)
{
qiKeys[i].ddlObjectUID = objectUIDs[i];
qiKeys[i].operation[0] = 'O';
qiKeys[i].operation[1] = 'R';
}
long retcode = SQL_EXEC_SetSecInvalidKeys(numKeys, qiKeys);
}
}
}
void NATableDB::removeNATable(CorrName &corrName, ComQiScope qiScope,
ComObjectType ot,
NABoolean ddlXns, NABoolean atCommit)
{
// if ddl xns are being used, add this name to ddlObjsList and
// invalidate NATable in my environment. This will allow subsequent
// operations running in my environemnt under my current transaction
// to access the latest definition.
//
// NATable removal for other users will happen at xn commit/rollback time.
//
// If atCommit is set, then this is being called at commit time.
// In that case, do NATable removal processing for all users
// instead of adding to ddlObjsList.
//
// If ddl xns are not being used, then invalidate NATable cache for
// all users.
if ((ddlXns) &&
(NOT atCommit))
{
CmpContext::DDLObjInfo ddlObj;
ddlObj.ddlObjName = corrName.getQualifiedNameAsString();
ddlObj.qiScope = qiScope;
ddlObj.ot = ot;
ddlObj.objUID = -1;
NABoolean found = FALSE;
for (Lng32 i = 0;
((NOT found) && (i < CmpCommon::context()->ddlObjsList().entries()));
i++)
{
CmpContext::DDLObjInfo &ddlObjInList =
CmpCommon::context()->ddlObjsList()[i];
if (ddlObj.ddlObjName == ddlObjInList.ddlObjName)
found = TRUE;
}
removeNATable2(corrName, qiScope, ot); //ComQiScope::REMOVE_MINE_ONLY, ot);
if (NOT found)
CmpCommon::context()->ddlObjsList().insert(ddlObj);
return;
}
removeNATable2(corrName, qiScope, ot);
}
//This method is called at the end of each statement to reset statement
//specific stuff in the NATable objects in the cache.
void NATableDB::resetAfterStatement(){
//Variable used for iteration in loops below
CollIndex i = 0;
//Variable used to point to a table's heap. Only delete the heap if it is
// neither the context nor the statement heap (i.e., allocated from the
// C++ system heap). The CmpContext heap is deleted in
// in ContextCli::deleteMe().
// The statement heap is deleted in the destructor of class CmpStatement.
NAMemory * tableHeap = NULL;
//if metadata caching (i.e. NATable caching) is not on then just
//flush the cache. Since it might be that there are still some
//tables in the cache.
if (!cacheMetaData_){
flushCache();
}
else{
//if caching is ON then reset all cached NATables used during statement
//if this was a DDL statment delete all NATables that participated in the
//statement
for (i=0; i < statementCachedTableList_.entries(); i++)
{
if(statementCachedTableList_[i])
{
//if the statment was a DDL statement, if so then delete
//all the tables used in the statement, since the DDL affected
//the tables and they should be reconstructed for whatever
//statement follows.
if((!useCache_)||
(statementCachedTableList_[i]->isAnMV())||
(statementCachedTableList_[i]->isAnMVMetaData())||
(statementCachedTableList_[i]->isAnMPTableWithAnsiName())||
(statementCachedTableList_[i]->constructionHadWarnings()) ||
(statementCachedTableList_[i]->getClearHDFSStatsAfterStmt())){
//remove from list of cached Tables
cachedTableList_.remove(statementCachedTableList_[i]);
//remove from the cache itself
remove(statementCachedTableList_[i]->getKey());
if ( statementCachedTableList_[i]->getHeapType() == NATable::OTHER ) {
delete statementCachedTableList_[i];
currentCacheSize_ = heap_->getAllocSize();
}
}
else{
statementCachedTableList_[i]->resetAfterStatement();
}
}
}
nonCacheableTableIdents_.clear();
//remove references to nonCacheable tables from cache
//and delete the name
for(i=0; i < nonCacheableTableList_.entries(); i++){
remove(nonCacheableTableList_[i]);
delete nonCacheableTableList_[i]; // delete the name only
}
//clear the list of special tables
nonCacheableTableList_.clear();
}
//delete tables that were not deleted earlier to
//save compile-time performance. Since the heaps
//deleted below are large 16KB+, it takes time
//to delete them. The time to delete these heaps
//at this point is not 'visible' in the compile-
//time since the statement has been compiled and
//sent to the executor.
for(i=0; i < tablesToDeleteAfterStatement_.entries(); i++)
{
if ( tablesToDeleteAfterStatement_[i]->getHeapType() == NATable::OTHER ) {
delete tablesToDeleteAfterStatement_[i];
}
currentCacheSize_ = heap_->getAllocSize();
}
//clear the list of tables to delete after statement
tablesToDeleteAfterStatement_.clear();
//clear the list of tables used in the current statement
statementTableList_.clear();
//clear the list of cached tables used in the current statement
statementCachedTableList_.clear();
//reset various statement level flags
refreshCacheInThisStatement_=FALSE;
useCache_=FALSE;
}
//flush the cache if there is anything cached in it
//otherwise just destroy all the keys in the cache.
//If there is nothing cached, which could mean either
//of the following:
//1. NATable caching is off.
//2. All entries currently in cache where created on
// the statment heap, i.e. not persistent across
// statements.
//In such a case we don't need to delete any NATable
//objects (since they will be removed when the statement
//heap is deleted. We only need to delete the keys.
void NATableDB::flushCache()
{
//if something is cached
if(metaDataCached_){
//set the flag to indicate cache is clear
metaDataCached_ = FALSE;
//Destroy the keys in the cache, this also
//clears out the cache entries without deleting
//the cached NATable
clearAndDestroyKeysOnly();
//delete the tables that were cached by deleting each table's
//heap. Each cached table and all of its stuff are allocated
//on a seperate heap (i.e. a heap per table). That seems to
//be the safest thing to do to avoid memory leaks.
for(CollIndex i=0; i < cachedTableList_.entries(); i++)
{
if(cachedTableList_[i])
{
delete cachedTableList_[i];
}
}
}
else{
//no metadata cached (i.e. metadata caching is off and there
//is no remaining metadata in the cache from when the caching
//was on). Just clear out the cache entries, of course we need
//to delete keys because the cache allocates keys on the context
//heap.
clearAndDestroyKeysOnly ();
}
//clear out the lists of tables in the cache
//1. list of tables in the cache used in this statement
//2. list of all tables in the cache
statementCachedTableList_.clear();
cachedTableList_.clear();
//set cache size to 0 to indicate nothing in cache
currentCacheSize_ = 0;
highWatermarkCache_ = 0; // High watermark of currentCacheSize_
totalLookupsCount_ = 0; // reset NATable entries lookup counter
totalCacheHits_ = 0; // reset cache hit counter
// per interval counters
intervalWaterMark_ = 0;
}
//check if cache size is with maximum allowed cache size.
//if cache size is above the maximum allowed cache size,
//then remove entries in the cache based on the cache
//replacement policy to get the cache size under the maximum
//allowed cache size.
NABoolean NATableDB::enforceMemorySpaceConstraints()
{
//check if cache size is within memory constraints
if (maxCacheSize_ == 0 || heap_->getAllocSize() <= maxCacheSize_)
return TRUE;
//need to get cache size under memory allowance
//if our cursor is pointing past the end of the
//list of cached entries, reset it to point to
//start of the list of cached entries.
if(replacementCursor_ >= (Int32) cachedTableList_.entries())
replacementCursor_ = 0;
//keep track of entry in the list of cached entries
//where we are starting from, since we start from
//where we left off the last time this method got
//called.
Int32 startingCursorPosition = replacementCursor_;
Int32 numLoops = 0; //number of loops around the list of cached objects
//this loop iterates over list of cached NATable objects.
//in each iteration it decrements the replacementCounter
//of a table.
//if a table with a replacementCounter value of zero is
//encountered, it is removed if it is not being used
//in the current statement.
//check if cache is now within memory constraints
while (heap_->getAllocSize() > maxCacheSize_){
//get reference to table
NATable * table = cachedTableList_[replacementCursor_];
if(table)
//check if table has a zero replacementCount
if(!table->replacementCounter_)
{
//if table is not being accessed in current statement then remove it
if(!table->accessedInCurrentStatement_)
{
RemoveFromNATableCache( table , replacementCursor_ );
}
}
else{
table->replacementCounter_--;
}
replacementCursor_++;
//if replacement cursor ran of the end of the list of cached tables
//reset it to the beginig of the list
if(replacementCursor_ >= (Int32) cachedTableList_.entries())
replacementCursor_ = 0;
//check if we completed one loop around all the cached entries
//if so, increment the loop count
if(replacementCursor_ == startingCursorPosition){
numLoops++;
}
//did NATABLE_MAX_REFCOUNT loops around list of cached objects
//still could not free up enough space
//We check for NATABLE_MAX_REFCOUNT loops since the replacementCounter_
//is capped at NATABLE_MAX_REFCOUNT loops.
if(numLoops==NATABLE_MAX_REFCOUNT)
return FALSE;
}
//return true indicating cache size is below maximum memory allowance.
return TRUE;
}
//Remove all the NATable objects from the cache that were used during
//the current statement.
//This is used when a binder error occurs. In rare cases the binder
//error might be due to a stale metadata cache entry.
// LCOV_EXCL_START :cnu
void NATableDB::flushCacheEntriesUsedInCurrentStatement(){
//do this only if metadata caching is 'ON'
if(cacheMetaData_)
{
for (CollIndex i=0; i < statementCachedTableList_.entries(); i++)
{
if(statementCachedTableList_[i])
{
//remove from list of cached Tables
cachedTableList_.remove(statementCachedTableList_[i]);
//remove from the cache itself
remove(statementCachedTableList_[i]->getKey());
//keep track of change in cache size
delete statementCachedTableList_[i];
currentCacheSize_ = heap_->getAllocSize();
}
}
//clear the list of tables used in the current statement
statementCachedTableList_.clear();
}
}
// LCOV_EXCL_STOP
//Turn metadata caching ON
void NATableDB::setCachingON()
{
resizeCache(getDefaultAsLong(METADATA_CACHE_SIZE)*1024*1024);
cacheMetaData_ = TRUE;
}
// Obtain a list of table identifiers for the current statement.
// Allocate the list on the heap passed.
const LIST(CollIndex) &
NATableDB::getStmtTableIdList(NAMemory *heap) const
{
LIST(CollIndex) *list = new (heap) LIST(CollIndex)(heap);
for(CollIndex i = 0; i < statementTableList_.entries(); i++)
{
NATable *table = statementTableList_[i];
list->insert(table->getTableIdList());
}
return *list;
}
// function to return number of entries in cachedTableList_ LIST.
Int32 NATableDB::end()
{
return cachedTableList_.entries() ;
}
void
NATableDB::free_entries_with_QI_key(Int32 numKeys, SQL_QIKEY* qiKeyArray)
{
UInt32 currIndx = 0;
// For each table in cache, see if it should be removed
while ( currIndx < cachedTableList_.entries() )
{
NATable * currTable = cachedTableList_[currIndx];
// Only need to remove seabase tables and external Hive/hbase tables
if (!currTable->isSeabaseTable() && !currTable->hasExternalTable())
{
currIndx++;
continue;
}
if (qiCheckForInvalidObject(numKeys, qiKeyArray,
currTable->objectUid().get_value(),
currTable->getSecKeySet()))
{
if ( currTable->accessedInCurrentStatement_ )
statementCachedTableList_.remove( currTable );
while ( statementTableList_.remove( currTable ) ) // Remove as many times as on list!
{ ; }
RemoveFromNATableCache( currTable , currIndx );
}
else currIndx++; //Increment if NOT found ... else currIndx already pointing at next entry!
}
}
//
// Remove a specifed NATable entry from the NATable Cache
//
void
NATableDB::RemoveFromNATableCache( NATable * NATablep , UInt32 currIndx )
{
NAMemory * tableHeap = NATablep->heap_;
NABoolean InStatementHeap = (tableHeap == (NAMemory *)CmpCommon::statementHeap());
remove(NATablep->getKey());
cachedTableList_.removeAt( currIndx );
if ( ! InStatementHeap )
delete NATablep;
if ( ! InStatementHeap )
currentCacheSize_ = heap_->getAllocSize();
}
//
// Remove ALL entries from the NATable Cache that have been
// marked for removal before the next compilation.
//
void
NATableDB::remove_entries_marked_for_removal()
{
NATableDB * TableDB = ActiveSchemaDB()->getNATableDB() ;
UInt32 currIndx = 0;
while ( currIndx < TableDB->cachedTableList_.entries() )
{
NATable * NATablep = TableDB->cachedTableList_[ currIndx ] ;
NABoolean accInCurrStmt = NATablep->accessedInCurrentStatement() ;
if ( NATablep->isToBeRemovedFromCacheBNC() ) //to be removed by CmpMain Before Next Comp. retry?
{
TableDB->RemoveFromNATableCache( NATablep, currIndx );
if ( accInCurrStmt )
{
TableDB->statementCachedTableList_.remove( NATablep );
}
while ( TableDB->statementTableList_.remove( NATablep ) ) // Remove as many times as on list!
{ ; }
}
else currIndx++ ; //Note: No increment if the entry was removed !
}
}
//
// UNMARK all entries from the NATable Cache that have been
// marked for removal before the next compilation. We have
// decided to leave them in the NATable cache afterall.
//
void
NATableDB::unmark_entries_marked_for_removal()
{
NATableDB * TableDB = ActiveSchemaDB()->getNATableDB() ;
UInt32 currIndx = 0;
while ( currIndx < TableDB->cachedTableList_.entries() )
{
NATable * NATablep = TableDB->cachedTableList_[ currIndx ] ;
if ( NATablep->isToBeRemovedFromCacheBNC() ) //to be removed by CmpMain Before Next Comp. retry?
{
NATablep->setRemoveFromCacheBNC(FALSE);
}
else currIndx++ ; //Note: No increment if the entry was removed !
}
}
void NATableDB::getCacheStats(NATableCacheStats & stats)
{
memset(stats.contextType, ' ', sizeof(stats.contextType));
stats.numLookups = totalLookupsCount_;
stats.numCacheHits = totalCacheHits_;
stats.currentCacheSize = currentCacheSize_;
stats.highWaterMark = highWatermarkCache_;
stats.maxCacheSize = maxCacheSize_;
stats.numEntries = cachedTableList_.entries();
}
| 1 | 13,115 | I don't think there is a maximum scale, neither for Hive nor for Trafodion. The only condition right now is that the scale can't exceed the precision. Example of a valid scale: DECIMAL(18,18). The maximum of 6 digits applies only to TIMESTAMP columns, where we don't support resolution below microseconds. | apache-trafodion | cpp |
@@ -775,6 +775,14 @@ func (a *FakeWebAPI) GetProject(ctx context.Context, req *webservice.GetProjectR
return nil, status.Error(codes.Unimplemented, "")
}
+func (a *FakeWebAPI) UpdateProjectStaticUser(ctx context.Context, req *webservice.UpdateProjectStaticUserRequest) (*webservice.UpdateProjectStaticUserResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "")
+}
+
+func (a *FakeWebAPI) UpdateProjectSingleSignOn(ctx context.Context, req *webservice.UpdateProjectSingleSignOnRequest) (*webservice.UpdateProjectSingleSignOnResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "")
+}
+
func (a *FakeWebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
fakeProjectID = "debug-project"
)
// FakeWebAPI implements the fake behaviors for the gRPC definitions of WebAPI.
type FakeWebAPI struct {
}
// NewFakeWebAPI creates a new FakeWebAPI instance.
func NewFakeWebAPI() *FakeWebAPI {
return &FakeWebAPI{}
}
// Register registers all handling of this service into the specified gRPC server.
func (a *FakeWebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *FakeWebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
now := time.Now()
envs := []*model.Environment{
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
Name: "development",
Desc: "For development",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "staging"),
Name: "staging",
Desc: "For staging",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "production"),
Name: "production",
Desc: "For production",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *FakeWebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
return &webservice.RegisterPipedResponse{
Id: "e357d99f-0f83-4ce0-8c8b-27f11f432ef9",
Key: "9bf9752a-54a2-451a-a541-444add56f96b",
}, nil
}
func (a *FakeWebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) {
return &webservice.RecreatePipedKeyResponse{
Key: "9bf9752a-54a2-451a-a541-444add56f96b",
}, nil
}
func (a *FakeWebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
now := time.Now()
pipeds := []*model.Piped{
{
Id: "492220b1-c080-4781-9e55-7e278760e0ef",
Desc: "piped for debug 1",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "bdd71c9e-5406-46fb-a0e4-b2124ea1c1ea",
Desc: "piped for debug 2",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "42e9fa90-22c1-4436-b10c-094044329c27",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
if req.WithStatus {
pipeds[0].Status = model.Piped_ONLINE
pipeds[1].Status = model.Piped_ONLINE
pipeds[2].Status = model.Piped_OFFLINE
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *FakeWebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
now := time.Now()
return &webservice.GetPipedResponse{
Piped: &model.Piped{
Id: "492220b1-c080-4781-9e55-7e278760e0ef",
Desc: "piped for debug 1",
KeyHash: "redacted",
ProjectId: fakeProjectID,
Version: "debug-version",
StartedAt: now.Add(-30 * time.Minute).Unix(),
CloudProviders: []*model.Piped_CloudProvider{
{
Name: "kubernetes-default",
Type: model.CloudProviderKubernetes.String(),
},
},
Repositories: []*model.ApplicationGitRepository{
{
Id: "piped-repo-1",
Remote: "[email protected]:pipe-cd/debug.git",
Branch: "master",
},
{
Id: "piped-repo-2",
Remote: "[email protected]:pipe-cd/debug2.git",
Branch: "master",
},
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}, nil
}
func (a *FakeWebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
return &webservice.AddApplicationResponse{}, nil
}
func (a *FakeWebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
return &webservice.EnableApplicationResponse{}, nil
}
func (a *FakeWebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
return &webservice.DisableApplicationResponse{}, nil
}
func (a *FakeWebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
now := time.Now()
fakeApplications := []*model.Application{
{
Id: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
Name: "debug-app",
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
Repo: &model.ApplicationGitRepository{
Id: "debug",
Remote: "[email protected]:pipe-cd/debug.git",
Branch: "master",
},
Path: "k8s",
},
CloudProvider: "kubernetes-default",
MostRecentlySuccessfulDeployment: &model.ApplicationDeploymentReference{
DeploymentId: "debug-deployment-id-01",
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Unix(),
},
Commander: "",
Timestamp: now.Unix(),
}, Version: "v0.1.0",
StartedAt: now.Add(-3 * 24 * time.Hour).Unix(),
CompletedAt: now.Add(-3 * 24 * time.Hour).Unix(),
},
SyncState: &model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "Short resson",
Reason: "Reason",
HeadDeploymentId: "debug-deployment-id-01",
Timestamp: now.Unix(),
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
return &webservice.ListApplicationsResponse{
Applications: fakeApplications,
}, nil
}
func (a *FakeWebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
return &webservice.SyncApplicationResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
now := time.Now()
application := model.Application{
Id: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
Name: "debug-app",
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
Repo: &model.ApplicationGitRepository{
Id: "debug",
Remote: "[email protected]:pipe-cd/debug.git",
Branch: "master",
},
Path: "k8s",
},
CloudProvider: "kubernetes-default",
MostRecentlySuccessfulDeployment: &model.ApplicationDeploymentReference{
DeploymentId: "debug-deployment-id-01",
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Unix(),
},
Commander: "",
Timestamp: now.Unix(),
},
Version: "v0.1.0",
StartedAt: now.Add(-3 * 24 * time.Hour).Unix(),
CompletedAt: now.Add(-3 * 24 * time.Hour).Unix(),
},
SyncState: &model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "Short resson",
Reason: "Reason",
HeadDeploymentId: "debug-deployment-id-01",
Timestamp: now.Unix(),
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetApplicationResponse{
Application: &application,
}, nil
}
func (a *FakeWebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
now := time.Now()
deploymentTime := now
fakeDeployments := make([]*model.Deployment, 15)
for i := 0; i < 15; i++ {
// 5 hour intervals
deploymentTime := deploymentTime.Add(time.Duration(-5*i) * time.Hour)
fakeDeployments[i] = &model.Deployment{
Id: fmt.Sprintf("debug-deployment-id-%02d", i),
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
GitPath: &model.ApplicationGitPath{
Repo: &model.ApplicationGitRepository{
Id: "debug",
Remote: "[email protected]:pipe-cd/debug.git",
Branch: "master",
},
Path: "k8s",
},
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: deploymentTime.Unix(),
},
Commander: "",
Timestamp: deploymentTime.Unix(),
},
RunningCommitHash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Summary: fmt.Sprintf("This deployment is debug-%02d", i),
Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS,
Stages: []*model.PipelineStage{
{
Id: "fake-stage-id-0-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_RUNNING,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-1",
Name: model.StageK8sPrimaryRollout.String(),
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-2",
Name: model.StageK8sCanaryRollout.String(),
Index: 2,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_FAILURE,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-0",
Name: model.StageK8sCanaryClean.String(),
Desc: "waiting approval",
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-1",
Name: model.StageK8sCanaryClean.String(),
Desc: "approved by cakecatz",
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-3-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-2-0",
"fake-stage-id-2-1",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
CreatedAt: deploymentTime.Unix(),
UpdatedAt: deploymentTime.Unix(),
}
}
return &webservice.ListDeploymentsResponse{
Deployments: fakeDeployments,
}, nil
}
func (a *FakeWebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
now := time.Now()
resp := &model.Deployment{
Id: "debug-deployment-id-01",
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
Repo: &model.ApplicationGitRepository{
Id: "debug",
Remote: "[email protected]:pipe-cd/debug.git",
Branch: "master",
},
Path: "k8s",
},
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Add(-30 * time.Minute).Unix(),
},
Commander: "cakecatz",
Timestamp: now.Add(-30 * time.Minute).Unix(),
},
RunningCommitHash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Summary: "This deployment is debug",
Status: model.DeploymentStatus_DEPLOYMENT_RUNNING,
Stages: []*model.PipelineStage{
{
Id: "fake-stage-id-0-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_RUNNING,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-1",
Name: model.StageK8sPrimaryRollout.String(),
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-2",
Name: model.StageK8sCanaryRollout.String(),
Index: 2,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_FAILURE,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-0",
Name: model.StageK8sCanaryClean.String(),
Desc: "waiting approval",
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-1",
Name: model.StageK8sCanaryClean.String(),
Desc: "approved by cakecatz",
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-3-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-2-0",
"fake-stage-id-2-1",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetDeploymentResponse{
Deployment: resp,
}, nil
}
func (a *FakeWebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
startTime := time.Now().Add(-10 * time.Minute)
resp := []*model.LogBlock{
{
Index: 1,
Log: "+ make build",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Unix(),
},
{
Index: 2,
Log: "bazelisk --output_base=/workspace/bazel_out build --config=ci -- //...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(5 * time.Second).Unix(),
},
{
Index: 3,
Log: "2020/06/01 08:52:07 Downloading https://releases.bazel.build/3.1.0/release/bazel-3.1.0-linux-x86_64...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(10 * time.Second).Unix(),
},
{
Index: 4,
Log: "Extracting Bazel installation...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(15 * time.Second).Unix(),
},
{
Index: 5,
Log: "Starting local Bazel server and connecting to it...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(20 * time.Second).Unix(),
},
{
Index: 6,
Log: "(08:52:14) Loading: 0 packages loaded",
Severity: model.LogSeverity_SUCCESS,
CreatedAt: startTime.Add(30 * time.Second).Unix(),
},
{
Index: 7,
Log: "(08:53:21) Analyzing: 157 targets (88 packages loaded, 0 targets configured)",
Severity: model.LogSeverity_SUCCESS,
CreatedAt: startTime.Add(35 * time.Second).Unix(),
},
{
Index: 8,
Log: "Error: Error building: logged 2 error(s)",
Severity: model.LogSeverity_ERROR,
CreatedAt: startTime.Add(45 * time.Second).Unix(),
},
}
return &webservice.GetStageLogResponse{
Blocks: resp,
}, nil
}
func (a *FakeWebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
return &webservice.CancelDeploymentResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
return &webservice.ApproveStageResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
now := time.Now()
snapshot := &model.ApplicationLiveStateSnapshot{
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
Kubernetes: &model.KubernetesApplicationLiveState{
Resources: []*model.KubernetesResourceState{
{
Id: "f2c832a3-1f5b-4982-8f6e-72345ecb3c82",
Name: "demo-application",
ApiVersion: "networking.k8s.io/v1beta1",
Kind: "Ingress",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "8423fb53-5170-4864-a7d2-b84f8d36cb02",
Name: "demo-application",
ApiVersion: "v1",
Kind: "Service",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
Name: "demo-application",
ApiVersion: "apps/v1",
Kind: "Deployment",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "8621f186-6641-4f7a-9be4-5983eb647f8d",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
Name: "demo-application-9504e8601a",
ApiVersion: "apps/v1",
Kind: "ReplicaSet",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "ae5d0031-1f63-4396-b929-fa9987d1e6de",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-7vrdw",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "f55c7891-ba25-44bb-bca4-ffbc16b0089f",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-vlgd5",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "c2a81415-5bbf-44e8-9101-98bbd636bbeb",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-tmwp5",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
},
Version: &model.ApplicationLiveStateVersion{
Index: 1,
Timestamp: now.Unix(),
},
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
func (a *FakeWebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
now := time.Now()
cmd := model.Command{
Id: uuid.New().String(),
PipedId: "debug-piped",
ApplicationId: "debug-application-id",
DeploymentId: "debug-deployment-id",
Commander: "anonymous",
Status: model.CommandStatus_COMMAND_NOT_HANDLED_YET,
Type: model.Command_CANCEL_DEPLOYMENT,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: "debug-deployment-id-01",
},
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetCommandResponse{
Command: &cmd,
}, nil
}
func (a *FakeWebAPI) ListDeploymentConfigTemplates(ctx context.Context, req *webservice.ListDeploymentConfigTemplatesRequest) (*webservice.ListDeploymentConfigTemplatesResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
| 1 | 9,135 | `ctx` is unused in UpdateProjectStaticUser | pipe-cd-pipe | go |
@@ -35,4 +35,10 @@ public class JSTypeNameGenerator extends TypeNameGenerator {
public String getStringFormatExample(String format) {
return getStringFormatExample(format, "Date.toISOString()", "Date.toISOString()");
}
+
+ @Override
+ public String getDiscoveryDocUrl(String apiName, String apiVersion) {
+ return String.format(
+ "https://content.googleapis.com/discovery/v1/apis/%s/%s/rest", apiName, apiVersion);
+ }
} | 1 | /* Copyright 2017 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.discovery.config.js;
import com.google.api.codegen.discovery.config.TypeNameGenerator;
import java.util.List;
public class JSTypeNameGenerator extends TypeNameGenerator {
@Override
public List<String> getMethodNameComponents(List<String> nameComponents) {
// In JS, we want the whole name components list to make it to the
// transformer because every element is part of the method construction.
return nameComponents;
}
@Override
public String stringDelimiter() {
return "'";
}
@Override
public String getStringFormatExample(String format) {
return getStringFormatExample(format, "Date.toISOString()", "Date.toISOString()");
}
}
| 1 | 21,889 | Why is this not the default, and why only for JS? | googleapis-gapic-generator | java |
@@ -0,0 +1,7 @@
+class ProjectBadge < ActiveRecord::Base
+ belongs_to :project
+ belongs_to :repository
+
+ validates :url, presence: true
+ validates :repository_id, presence: true, uniqueness: { scope: :project_id }
+end | 1 | 1 | 8,838 | A repository has many badges so we should also add type column in scope. | blackducksoftware-ohloh-ui | rb |
|
@@ -82,7 +82,7 @@ namespace Microsoft.CodeAnalysis.Sarif.Converters
});
Assert.AreEqual(1, result.CodeFlows.Count);
- result.CodeFlows[0].Should().Equal(new[]
+ result.CodeFlows.First().Locations.ToArray().Should().Equal(new[]
{
new AnnotatedCodeLocation {
PhysicalLocation = new PhysicalLocation { | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Linq;
using System.Xml;
using FluentAssertions;
using Microsoft.VisualStudio.TestTools.UnitTesting;
namespace Microsoft.CodeAnalysis.Sarif.Converters
{
[TestClass]
public class CppCheckErrorTests
{
private readonly ImmutableArray<CppCheckLocation> _dummyLocations = ImmutableArray.Create(new CppCheckLocation("file.cpp", 42));
[TestMethod]
public void CppCheckError_PassesThroughConstructorParameters()
{
var uut = new CppCheckError("id", "message", "verbose", "style", _dummyLocations);
AssertOuterPropertiesAreExampleError(uut);
Assert.AreEqual(_dummyLocations, uut.Locations);
}
[TestMethod]
[ExpectedException(typeof(ArgumentException))]
public void CppCheckError_RequiresNonEmptyLocations()
{
new CppCheckError("id", "message", "verbose", "style", ImmutableArray<CppCheckLocation>.Empty);
}
[TestMethod]
public void CppCheckError_MinimalErrorCanBeConvertedToSarifIssue()
{
Result result = new CppCheckError("id", "message", "verbose", "style", _dummyLocations)
.ToSarifIssue();
Assert.AreEqual("id", result.RuleId);
Assert.AreEqual("message", result.ShortMessage);
Assert.AreEqual("verbose", result.FullMessage);
result.Properties.Should().Equal(new Dictionary<string, string> { { "Severity", "style" } });
Assert.AreEqual("file.cpp", result.Locations.First().ResultFile.Uri.ToString());
}
[TestMethod]
public void CppCheckError_ErrorWithSingleLocationIsConvertedToSarifIssue()
{
Result result = new CppCheckError("id", "message", "verbose", "my fancy severity", ImmutableArray.Create(
new CppCheckLocation("foo.cpp", 1234)
)).ToSarifIssue();
Assert.AreEqual("id", result.RuleId);
Assert.AreEqual("message", result.ShortMessage);
Assert.AreEqual("verbose", result.FullMessage);
result.Properties.Should().Equal(new Dictionary<string, string> { { "Severity", "my fancy severity" } });
result.Locations.Should().Equal(new[] { new Location {
ResultFile = new PhysicalLocation
{
Uri = new Uri("foo.cpp", UriKind.RelativeOrAbsolute),
Region = new Region { StartLine = 1234 }
}
}
});
Assert.IsNull(result.CodeFlows);
}
[TestMethod]
public void CppCheckError_ErrorWithMultipleLocationsFillsOutCodeFlow()
{
Result result = new CppCheckError("id", "message", "verbose", "my fancy severity", ImmutableArray.Create(
new CppCheckLocation("foo.cpp", 1234),
new CppCheckLocation("bar.cpp", 5678)
)).ToSarifIssue();
result.Locations.Should().Equal(new[] { new Location {
ResultFile = new PhysicalLocation
{
Uri = new Uri("bar.cpp", UriKind.RelativeOrAbsolute),
Region = new Region { StartLine = 5678 }
}
}
});
Assert.AreEqual(1, result.CodeFlows.Count);
result.CodeFlows[0].Should().Equal(new[]
{
new AnnotatedCodeLocation {
PhysicalLocation = new PhysicalLocation {
Uri = new Uri("foo.cpp", UriKind.RelativeOrAbsolute),
Region = new Region { StartLine = 1234 }
}
},
new AnnotatedCodeLocation {
PhysicalLocation = new PhysicalLocation {
Uri = new Uri("bar.cpp", UriKind.RelativeOrAbsolute),
Region = new Region { StartLine = 5678 }
}
}
});
}
[TestMethod]
public void CppCheckError_DoesNotEmitShortMessageWhenVerboseMessageIsTheSame()
{
Result result = new CppCheckError("id", "message", "message", "style", _dummyLocations)
.ToSarifIssue();
Assert.IsNull(result.ShortMessage);
Assert.AreEqual("message", result.FullMessage);
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void CppCheckError_RejectsSelfClosingError()
{
using (XmlReader xml = Utilities.CreateXmlReaderFromString(exampleErrorXmlSelfClosed))
{
var uut = Parse(xml);
AssertOuterPropertiesAreExampleError(uut);
uut.Locations.Should().BeEmpty();
}
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void CppCheckError_RejectsErrorWithNoLocations()
{
using (XmlReader xml = Utilities.CreateXmlReaderFromString(exampleErrorXmlOpen + exampleErrorClose))
{
var uut = Parse(xml);
AssertOuterPropertiesAreExampleError(uut);
uut.Locations.Should().BeEmpty();
}
}
[TestMethod]
public void CppCheckError_CanParseErrorWithSingleLocation()
{
string errorXml = exampleErrorXmlOpen + " <location file=\"foo.cpp\" line=\"42\" /> " + exampleErrorClose;
using (XmlReader xml = Utilities.CreateXmlReaderFromString(errorXml))
{
var uut = Parse(xml);
AssertOuterPropertiesAreExampleError(uut);
uut.Locations.Should().Equal(new[] { new CppCheckLocation("foo.cpp", 42) });
}
}
[TestMethod]
public void CppCheckError_CanParseErrorWithMultipleLocations()
{
string errorXml = exampleErrorXmlOpen + " <location file=\"foo.cpp\" line=\"42\" /> <location file=\"bar.cpp\" line=\"1729\" /> " + exampleErrorClose;
using (XmlReader xml = Utilities.CreateXmlReaderFromString(errorXml))
{
var uut = Parse(xml);
AssertOuterPropertiesAreExampleError(uut);
uut.Locations.Should().Equal(new[] {
new CppCheckLocation("foo.cpp", 42),
new CppCheckLocation("bar.cpp", 1729)
});
}
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void CppCheckError_InvalidParse_BadRootNodeDetected()
{
using (XmlReader xml = Utilities.CreateXmlReaderFromString("<foobar />"))
{
Parse(xml);
}
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void CppCheckError_InvalidParse_BadChildrenNodeDetected()
{
using (XmlReader xml = Utilities.CreateXmlReaderFromString(exampleErrorXmlOpen + "<foobar />" + exampleErrorClose))
{
Parse(xml);
}
}
private const string exampleErrorXmlBase = "<error id=\"id\" msg=\"message\" verbose=\"verbose\" severity=\"style\"";
private const string exampleErrorXmlOpen = exampleErrorXmlBase + ">";
private const string exampleErrorClose = "</error>";
private const string exampleErrorXmlSelfClosed = exampleErrorXmlBase + " />";
private static void AssertOuterPropertiesAreExampleError(CppCheckError uut)
{
Assert.AreEqual("id", uut.Id);
Assert.AreEqual("message", uut.Message);
Assert.AreEqual("verbose", uut.VerboseMessage);
Assert.AreEqual("style", uut.Severity);
}
private static CppCheckError Parse(XmlReader xml)
{
return CppCheckError.Parse(xml, new CppCheckStrings(xml.NameTable));
}
}
}
| 1 | 10,594 | Now a hash set, so can't index into it. | microsoft-sarif-sdk | .cs |
@@ -273,6 +273,11 @@ module Bolt
!!File::ALT_SEPARATOR
end
+ # Returns true if running in PowerShell.
+ def powershell?
+ !!ENV['PSModulePath']
+ end
+
# Accept hash and return hash with top level keys of type "String" converted to symbols.
def symbolize_top_level_keys(hsh)
hsh.each_with_object({}) { |(k, v), h| k.is_a?(String) ? h[k.to_sym] = v : h[k] = v } | 1 | # frozen_string_literal: true
module Bolt
module Util
class << self
# Gets input for an argument.
def get_arg_input(value)
if value.start_with?('@')
file = value.sub(/^@/, '')
read_arg_file(file)
elsif value == '-'
$stdin.read
else
value
end
end
# Reads a file passed as an argument to a command.
def read_arg_file(file)
File.read(File.expand_path(file))
rescue StandardError => e
raise Bolt::FileError.new("Error attempting to read #{file}: #{e}", file)
end
def read_yaml_hash(path, file_name)
require 'yaml'
logger = Bolt::Logger.logger(self)
path = File.expand_path(path)
content = File.open(path, "r:UTF-8") { |f| YAML.safe_load(f.read) } || {}
unless content.is_a?(Hash)
msg = "Invalid content for #{file_name} file: #{path} should be a Hash or empty, not #{content.class}"
raise Bolt::FileError.new(msg, path)
end
logger.trace("Loaded #{file_name} from #{path}")
content
rescue Errno::ENOENT
raise Bolt::FileError.new("Could not read #{file_name} file: #{path}", path)
rescue Psych::Exception => e
raise Bolt::FileError.new("Could not parse #{file_name} file: #{path}\n"\
"Error at line #{e.line} column #{e.column}", path)
rescue IOError, SystemCallError => e
raise Bolt::FileError.new("Could not read #{file_name} file: #{path}\n"\
"error: #{e}", path)
end
def read_optional_yaml_hash(path, file_name)
File.exist?(path) ? read_yaml_hash(path, file_name) : {}
end
# Accepts a path with either 'plans' or 'tasks' in it and determines
# the name of the module
def module_name(path)
# Remove extra dots and slashes
path = Pathname.new(path).cleanpath.to_s
fs = File::SEPARATOR
regex = Regexp.new("#{fs}plans#{fs}|#{fs}tasks#{fs}")
# Only accept paths with '/plans/' or '/tasks/'
unless path.match?(regex)
msg = "Could not determine module from #{path}. "\
"The path must include 'plans' or 'tasks' directory"
raise Bolt::Error.new(msg, 'bolt/modulepath-error')
end
# Split the path on the first instance of /plans/ or /tasks/
parts = path.split(regex, 2)
# Module name is the last entry before 'plans' or 'tasks'
modulename = parts[0].split(fs)[-1]
filename = File.basename(path).split('.')[0]
# Remove "/init.*" if filename is init or just remove the file
# extension
if filename == 'init'
parts[1].chomp!(File.basename(path))
else
parts[1].chomp!(File.extname(path))
end
# The plan or task name is the rest of the path
[modulename, parts[1].split(fs)].flatten.join('::')
end
def to_code(string)
case string
when Bolt::PAL::YamlPlan::DoubleQuotedString
string.value.inspect
when Bolt::PAL::YamlPlan::BareString
if string.value.start_with?('$')
string.value.to_s
else
"'#{string.value}'"
end
when Bolt::PAL::YamlPlan::EvaluableString, Bolt::PAL::YamlPlan::CodeLiteral
string.value.to_s
when String
"'#{string}'"
when Hash
formatted = String.new("{")
string.each do |k, v|
formatted << "#{to_code(k)} => #{to_code(v)}, "
end
formatted.chomp!(", ")
formatted << "}"
formatted
when Array
formatted = String.new("[")
formatted << string.map { |str| to_code(str) }.join(', ')
formatted << "]"
formatted
else
string
end
end
def deep_merge(hash1, hash2)
recursive_merge = proc do |_key, h1, h2|
if h1.is_a?(Hash) && h2.is_a?(Hash)
h1.merge(h2, &recursive_merge)
else
h2
end
end
hash1.merge(hash2, &recursive_merge)
end
# Accepts a Data object and returns a copy with all hash keys
# modified by block. use &:to_s to stringify keys or &:to_sym to symbolize them
def walk_keys(data, &block)
case data
when Hash
data.each_with_object({}) do |(k, v), acc|
v = walk_keys(v, &block)
acc[yield(k)] = v
end
when Array
data.map { |v| walk_keys(v, &block) }
else
data
end
end
# Accepts a Data object and returns a copy with all hash and array values
# Arrays and hashes including the initial object are modified before
# their descendants are.
def walk_vals(data, skip_top = false, &block)
data = yield(data) unless skip_top
case data
when Hash
data.transform_values { |v| walk_vals(v, &block) }
when Array
data.map { |v| walk_vals(v, &block) }
else
data
end
end
# Accepts a Data object and returns a copy with all hash and array values
# modified by the given block. Descendants are modified before their
# parents.
def postwalk_vals(data, skip_top = false, &block)
new_data = case data
when Hash
data.transform_values { |v| postwalk_vals(v, &block) }
when Array
data.map { |v| postwalk_vals(v, &block) }
else
data
end
if skip_top
new_data
else
yield(new_data)
end
end
# Performs a deep_clone, using an identical copy if the cloned structure contains multiple
# references to the same object and prevents endless recursion.
# Credit to Jan Molic via https://github.com/rubyworks/facets/blob/master/LICENSE.txt
def deep_clone(obj, cloned = {})
return cloned[obj.object_id] if cloned.include?(obj.object_id)
# The `defined?` method will not reliably find the Java::JavaLang::CloneNotSupportedException constant
# presumably due to some sort of optimization that short-cuts doing a bunch of Java introspection.
# Java::JavaLang::<...> IS defining the constant (via const_missing or const_get magic perhaps) so
# it is safe to reference it in the error_types array when a JRuby interpreter is evaluating the code
# (detected by RUBY_PLATFORM == `java`). SO instead of conditionally adding the CloneNotSupportedException
# constant to the error_types array based on `defined?` detecting the Java::JavaLang constant it is added
# based on detecting a JRuby interpreter.
# TypeError handles unclonable Ruby ojbects (TrueClass, Fixnum, ...)
# CloneNotSupportedException handles uncloneable Java objects (JRuby only)
error_types = [TypeError]
error_types << Java::JavaLang::CloneNotSupportedException if RUBY_PLATFORM == 'java'
begin
# We can't recurse on frozen objects to populate them with cloned
# data. Instead we store the freeze-state of the original object,
# deep_clone, then set the cloned object to frozen if the original
# object was frozen
frozen = obj.frozen?
cl = begin
obj.clone(freeze: false)
# Some datatypes, such as FalseClass, can't be unfrozen. These
# aren't the types we recurse on, so we can leave them frozen
rescue ArgumentError => e
if e.message =~ /can't unfreeze/
obj.clone
else
raise e
end
end
rescue *error_types
cloned[obj.object_id] = obj
obj
else
cloned[obj.object_id] = cl
cloned[cl.object_id] = cl
case cl
when Hash
obj.each { |k, v| cl[k] = deep_clone(v, cloned) }
when Array
cl.collect! { |v| deep_clone(v, cloned) }
when Struct
obj.each_pair { |k, v| cl[k] = deep_clone(v, cloned) }
end
cl.instance_variables.each do |var|
v = cl.instance_variable_get(var)
v_cl = deep_clone(v, cloned)
cl.instance_variable_set(var, v_cl)
end
cl.freeze if frozen
cl
end
end
# This is stubbed for testing validate_file
def file_stat(path)
File.stat(File.expand_path(path))
end
def snake_name_to_class_name(snake_name)
snake_name.split('_').map(&:capitalize).join
end
def class_name_to_file_name(cls_name)
# Note this turns Bolt::CLI -> 'bolt/cli' not 'bolt/c_l_i'
# this won't handle Bolt::Inventory2Foo
cls_name.gsub(/([a-z])([A-Z])/, '\1_\2').gsub('::', '/').downcase
end
def validate_file(type, path, allow_dir = false)
stat = file_stat(path)
if !stat.readable?
raise Bolt::FileError.new("The #{type} '#{path}' is unreadable", path)
elsif !stat.file? && (!allow_dir || !stat.directory?)
expected = allow_dir ? 'file or directory' : 'file'
raise Bolt::FileError.new("The #{type} '#{path}' is not a #{expected}", path)
elsif stat.directory?
Dir.foreach(path) do |file|
next if %w[. ..].include?(file)
validate_file(type, File.join(path, file), allow_dir)
end
end
rescue Errno::ENOENT
raise Bolt::FileError.new("The #{type} '#{path}' does not exist", path)
end
# Returns true if windows false if not.
def windows?
!!File::ALT_SEPARATOR
end
# Accept hash and return hash with top level keys of type "String" converted to symbols.
def symbolize_top_level_keys(hsh)
hsh.each_with_object({}) { |(k, v), h| k.is_a?(String) ? h[k.to_sym] = v : h[k] = v }
end
# Recursively searches a data structure for plugin references
def references?(input)
case input
when Hash
input.key?('_plugin') || input.values.any? { |v| references?(v) }
when Array
input.any? { |v| references?(v) }
else
false
end
end
def unix_basename(path)
raise Bolt::ValidationError, "path must be a String, received #{path.class} #{path}" unless path.is_a?(String)
path.split('/').last
end
def windows_basename(path)
raise Bolt::ValidationError, "path must be a String, received #{path.class} #{path}" unless path.is_a?(String)
path.split(%r{[/\\]}).last
end
# Prompts yes or no, returning true for yes and false for no.
#
def prompt_yes_no(prompt, outputter)
choices = {
'y' => true,
'yes' => true,
'n' => false,
'no' => false
}
loop do
outputter.print_prompt("#{prompt} ([y]es/[n]o) ")
response = $stdin.gets.to_s.downcase.chomp
if choices.key?(response)
return choices[response]
else
outputter.print_prompt_error("Invalid response, must pick [y]es or [n]o")
end
end
end
end
end
end
| 1 | 16,799 | @jpogran Does this seem like a reasonable way to know if we're in powershell vs. CMD or *sh? | puppetlabs-bolt | rb |
@@ -63,7 +63,7 @@ public class BodyProcessorCreator implements ParamValueProcessorCreator {
}
String contentType = request.getContentType();
- if (contentType != null && !contentType.startsWith(MediaType.APPLICATION_JSON)) {
+ if (contentType != null && !contentType.toLowerCase().startsWith(MediaType.APPLICATION_JSON)) {
// TODO: we should consider body encoding
return IOUtils.toString(inputStream, "UTF-8");
} | 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.common.rest.codec.param;
import java.io.InputStream;
import java.lang.reflect.Type;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import org.apache.commons.io.IOUtils;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.type.TypeFactory;
import io.servicecomb.common.rest.RestConst;
import io.servicecomb.common.rest.codec.RestClientRequest;
import io.servicecomb.common.rest.codec.RestObjectMapper;
import io.servicecomb.foundation.vertx.stream.BufferOutputStream;
import io.servicecomb.swagger.generator.core.utils.ClassUtils;
import io.swagger.models.parameters.Parameter;
import io.vertx.core.buffer.Buffer;
public class BodyProcessorCreator implements ParamValueProcessorCreator {
public static final String PARAMTYPE = "body";
public static class BodyProcessor implements ParamValueProcessor {
protected JavaType targetType;
public BodyProcessor(JavaType targetType) {
this.targetType = targetType;
}
@Override
public Object getValue(HttpServletRequest request) throws Exception {
Object body = request.getAttribute(RestConst.BODY_PARAMETER);
if (body != null) {
return convertValue(body, targetType);
}
// for standard HttpServletRequest, getInputStream will never return null
// but for mocked HttpServletRequest, maybe get a null
// like io.servicecomb.provider.springmvc.reference.ClientToHttpServletRequest
InputStream inputStream = request.getInputStream();
if (inputStream == null) {
return null;
}
String contentType = request.getContentType();
if (contentType != null && !contentType.startsWith(MediaType.APPLICATION_JSON)) {
// TODO: we should consider body encoding
return IOUtils.toString(inputStream, "UTF-8");
}
return RestObjectMapper.INSTANCE.readValue(inputStream, targetType);
}
@Override
public void setValue(RestClientRequest clientRequest, Object arg) throws Exception {
try (BufferOutputStream output = new BufferOutputStream()) {
clientRequest.putHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON);
RestObjectMapper.INSTANCE.writeValue(output, arg);
clientRequest.write(output.getBuffer());
}
}
@Override
public String getParameterPath() {
return "";
}
@Override
public String getProcessorType() {
return PARAMTYPE;
}
}
public static class RawJsonBodyProcessor extends BodyProcessor {
public RawJsonBodyProcessor(JavaType targetType) {
super(targetType);
}
@Override
public Object getValue(HttpServletRequest request) throws Exception {
Object body = request.getAttribute(RestConst.BODY_PARAMETER);
if (body != null) {
return convertValue(body, targetType);
}
InputStream inputStream = request.getInputStream();
if (inputStream == null) {
return null;
}
// TODO: we should consider body encoding
return IOUtils.toString(inputStream, "UTF-8");
}
@Override
public void setValue(RestClientRequest clientRequest, Object arg) throws Exception {
clientRequest.putHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON);
clientRequest.write(Buffer.buffer((String) arg));
}
}
public BodyProcessorCreator() {
ParamValueProcessorCreatorManager.INSTANCE.register(PARAMTYPE, this);
}
@Override
public ParamValueProcessor create(Parameter parameter, Type genericParamType) {
JavaType targetType = TypeFactory.defaultInstance().constructType(genericParamType);
boolean rawJson = ClassUtils.isRawJsonType(parameter);
if (genericParamType.getTypeName().equals(String.class.getTypeName()) && rawJson) {
return new RawJsonBodyProcessor(targetType);
}
return new BodyProcessor(targetType);
}
}
| 1 | 7,441 | 1.toLowerCase(Locale.US)? 2.where is accept bug fix? | apache-servicecomb-java-chassis | java |
@@ -105,16 +105,9 @@ def test_execute_with_invalid_driver(
):
_command_args['driver_name'] = 'ec3'
- with pytest.raises(SystemExit) as e:
+ with pytest.raises(KeyError):
_instance.execute()
- assert 1 == e.value.code
-
- msg = (
- 'The specified template directory ({template_dir})' ' does not exist'
- ).format(template_dir=_instance._resolve_template_dir('scenario/driver/ec3'))
- patched_logger_critical.assert_called_once_with(msg)
-
def test_execute_with_custom_template(
temp_dir, custom_template_dir, custom_readme_content, _command_args | 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import pytest
from molecule.command.init import scenario
@pytest.fixture
def _command_args():
return {
'driver_name': 'docker',
'role_name': 'test-role',
'scenario_name': 'test-scenario',
'subcommand': __name__,
'verifier_name': 'testinfra',
}
@pytest.fixture
def _instance(_command_args):
return scenario.Scenario(_command_args)
@pytest.fixture
def custom_template_dir(resources_folder_path):
custom_template_dir_path = os.path.join(
resources_folder_path, 'custom_scenario_template'
)
return custom_template_dir_path
@pytest.fixture
def invalid_template_dir(resources_folder_path):
invalid_role_template_path = os.path.join(
resources_folder_path, 'invalid_scenario_template'
)
return invalid_role_template_path
@pytest.fixture
def custom_readme_content(custom_template_dir, _command_args):
readme_path = os.path.join(
custom_template_dir,
_command_args['driver_name'],
'{{cookiecutter.molecule_directory}}',
'{{cookiecutter.scenario_name}}',
'README.md',
)
custom_readme_content = ""
with open(readme_path, 'r') as readme:
custom_readme_content = readme.read()
return custom_readme_content
def test_execute(temp_dir, _instance, patched_logger_info, patched_logger_success):
_instance.execute()
msg = 'Initializing new scenario test-scenario...'
patched_logger_info.assert_called_once_with(msg)
assert os.path.isdir('./molecule/test-scenario')
assert os.path.isdir('./molecule/test-scenario/tests')
scenario_directory = os.path.join(temp_dir.strpath, 'molecule', 'test-scenario')
msg = 'Initialized scenario in {} successfully.'.format(scenario_directory)
patched_logger_success.assert_called_once_with(msg)
def test_execute_scenario_exists(temp_dir, _instance, patched_logger_critical):
_instance.execute()
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
msg = 'The directory molecule/test-scenario exists. ' 'Cannot create new scenario.'
patched_logger_critical.assert_called_once_with(msg)
def test_execute_with_invalid_driver(
temp_dir, _instance, _command_args, patched_logger_critical
):
_command_args['driver_name'] = 'ec3'
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
msg = (
'The specified template directory ({template_dir})' ' does not exist'
).format(template_dir=_instance._resolve_template_dir('scenario/driver/ec3'))
patched_logger_critical.assert_called_once_with(msg)
def test_execute_with_custom_template(
temp_dir, custom_template_dir, custom_readme_content, _command_args
):
_command_args['driver_template'] = custom_template_dir
custom_template_instance = scenario.Scenario(_command_args)
custom_template_instance.execute()
assert os.path.isdir('./molecule/test-scenario')
assert os.path.isdir('./molecule/test-scenario/tests')
readme_path = './molecule/test-scenario/README.md'
assert os.path.isfile(readme_path)
with open(readme_path, 'r') as readme:
assert readme.read() == custom_readme_content
def test_execute_with_absent_custom_template(
temp_dir, _command_args, patched_logger_critical
):
_command_args['driver_template'] = "absent_template_dir"
absent_template_instance = scenario.Scenario(_command_args)
with pytest.raises(SystemExit) as e:
absent_template_instance.execute()
assert e.value.code == 1
patched_logger_critical.assert_called_once()
def test_execute_with_absent_driver_in_custom_template(
temp_dir, _command_args, custom_template_dir, patched_logger_warn
):
_command_args['driver_name'] = 'ec2'
_command_args['driver_template'] = custom_template_dir
absent_driver_instance = scenario.Scenario(_command_args)
absent_driver_instance.execute()
patched_logger_warn.assert_called_once_with(
"Driver not found in custom template directory"
"({driver_template}/{driver_name}), "
"using the default template instead".format(**_command_args)
)
assert os.path.isdir('./molecule/test-scenario')
assert os.path.isdir('./molecule/test-scenario/tests')
install_file = os.path.join(
temp_dir.strpath, 'molecule', 'test-scenario', 'INSTALL.rst'
)
with open(install_file) as f:
content = f.read()
assert "Amazon Web Services driver installation guide" in content
def test_execute_with_incorrect_template(
temp_dir, invalid_template_dir, _command_args, patched_logger_critical
):
_command_args['driver_template'] = invalid_template_dir
invalid_template_instance = scenario.Scenario(_command_args)
with pytest.raises(SystemExit) as e:
invalid_template_instance.execute()
assert e.value.code == 1
patched_logger_critical.assert_called_once()
| 1 | 10,111 | We should still be providing something to the user in the case of a `KeyError` instead of a stack trace!? | ansible-community-molecule | py |
@@ -90,6 +90,7 @@ namespace Nethermind.DataMarketplace.Core.Services
{
var txData = _abiEncoder.Encode(AbiEncodingStyle.IncludeSignature, ContractData.DepositAbiSig,
deposit.Id.Bytes, deposit.Units, deposit.ExpiryTime);
+ var nonce = await _blockchainBridge.GetNonceAsync(onBehalfOf);
Transaction transaction = new Transaction
{
Value = deposit.Value, | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.IO;
using System.Threading.Tasks;
using Nethermind.Abi;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.DataMarketplace.Core.Domain;
using Nethermind.DataMarketplace.Core.Services.Models;
using Nethermind.Int256;
using Nethermind.Wallet;
namespace Nethermind.DataMarketplace.Core.Services
{
public class DepositService : IDepositService
{
private readonly IAbiEncoder _abiEncoder;
private readonly INdmBlockchainBridge _blockchainBridge;
private readonly IWallet _wallet;
private readonly Address _contractAddress;
public DepositService(INdmBlockchainBridge blockchainBridge, IAbiEncoder abiEncoder, IWallet wallet,
Address contractAddress)
{
_blockchainBridge = blockchainBridge ?? throw new ArgumentNullException(nameof(blockchainBridge));
_abiEncoder = abiEncoder ?? throw new ArgumentNullException(nameof(abiEncoder));
_wallet = wallet ?? throw new ArgumentNullException(nameof(wallet));
_contractAddress = contractAddress ?? throw new ArgumentNullException(nameof(contractAddress));
}
public ulong GasLimit { get; } = 70000;
public async Task<UInt256> ReadDepositBalanceAsync(Address onBehalfOf, Keccak depositId)
{
var txData = _abiEncoder.Encode(AbiEncodingStyle.IncludeSignature, ContractData.DepositBalanceAbiSig,
depositId.Bytes);
Transaction transaction = new Transaction
{
Value = 0,
Data = txData,
To = _contractAddress,
SenderAddress = onBehalfOf,
GasLimit = 100000,
GasPrice = 0.GWei(),
Nonce = await _blockchainBridge.GetNonceAsync(onBehalfOf)
};
var data = await _blockchainBridge.CallAsync(transaction);
return data.ToUInt256();
}
public async Task ValidateContractAddressAsync(Address contractAddress)
{
if (contractAddress != _contractAddress)
{
throw new InvalidDataException($"Contract address {contractAddress} is different than configured {_contractAddress}");
}
var code = await _blockchainBridge.GetCodeAsync(contractAddress);
if (code is null || code.Length == 0)
{
throw new InvalidDataException($"No contract code at address {contractAddress}.");
}
if (!Bytes.AreEqual(code, Bytes.FromHexString(ContractData.DeployedCode)))
{
throw new InvalidDataException($"Code at address {contractAddress} is different than expected.");
}
}
public async Task<Keccak?> MakeDepositAsync(Address onBehalfOf, Deposit deposit, UInt256 gasPrice)
{
var txData = _abiEncoder.Encode(AbiEncodingStyle.IncludeSignature, ContractData.DepositAbiSig,
deposit.Id.Bytes, deposit.Units, deposit.ExpiryTime);
Transaction transaction = new Transaction
{
Value = deposit.Value,
Data = txData,
To = _contractAddress,
SenderAddress = onBehalfOf,
GasLimit = (long) GasLimit,
GasPrice = gasPrice
};
// check
_wallet.Sign(transaction, await _blockchainBridge.GetNetworkIdAsync());
return await _blockchainBridge.SendOwnTransactionAsync(transaction);
}
public async Task<uint> VerifyDepositAsync(Address onBehalfOf, Keccak depositId)
{
var transaction = await GetTransactionAsync(onBehalfOf, depositId);
var data = await _blockchainBridge.CallAsync(transaction);
return data.AsSpan().ReadEthUInt32();
}
public async Task<uint> VerifyDepositAsync(Address onBehalfOf, Keccak depositId, long blockNumber)
{
var transaction = await GetTransactionAsync(onBehalfOf, depositId);
var data = await _blockchainBridge.CallAsync(transaction, blockNumber);
return data.AsSpan().ReadEthUInt32();
}
private async Task<Transaction> GetTransactionAsync(Address onBehalfOf, Keccak depositId)
=> new Transaction
{
Value = 0,
Data = _abiEncoder.Encode(AbiEncodingStyle.IncludeSignature, ContractData.VerifyDepositAbiSig,
depositId.Bytes),
To = _contractAddress,
SenderAddress = onBehalfOf,
GasLimit = 100000,
GasPrice = 0.GWei(),
Nonce = await _blockchainBridge.GetNonceAsync(onBehalfOf)
};
}
} | 1 | 24,372 | check Lukasz's fix with NonceReserving - is that not better? | NethermindEth-nethermind | .cs |
@@ -106,11 +106,8 @@ Blockly.FieldColour.prototype.setValue = function(colour) {
}
this.colour_ = colour;
if (this.sourceBlock_) {
- this.sourceBlock_.setColour(
- colour,
- this.sourceBlock_.getColourSecondary(),
- this.sourceBlock_.getColourTertiary()
- );
+ // Set the primary, secondary and tertiary colour to this value.
+ this.sourceBlock_.setColour(colour, colour, colour);
}
};
| 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Colour input field.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.FieldColour');
goog.require('Blockly.Field');
goog.require('goog.dom');
goog.require('goog.events');
goog.require('goog.style');
goog.require('goog.ui.ColorPicker');
/**
* Class for a colour input field.
* @param {string} colour The initial colour in '#rrggbb' format.
* @param {Function=} opt_validator A function that is executed when a new
* colour is selected. Its sole argument is the new colour value. Its
* return value becomes the selected colour, unless it is undefined, in
* which case the new colour stands, or it is null, in which case the change
* is aborted.
* @extends {Blockly.Field}
* @constructor
*/
Blockly.FieldColour = function(colour, opt_validator) {
Blockly.FieldColour.superClass_.constructor.call(this, colour, opt_validator);
this.addArgType('colour');
};
goog.inherits(Blockly.FieldColour, Blockly.Field);
/**
* By default use the global constants for colours.
* @type {Array.<string>}
* @private
*/
Blockly.FieldColour.prototype.colours_ = null;
/**
* By default use the global constants for columns.
* @type {number}
* @private
*/
Blockly.FieldColour.prototype.columns_ = 0;
/**
* Install this field on a block.
* @param {!Blockly.Block} block The block containing this field.
*/
Blockly.FieldColour.prototype.init = function(block) {
Blockly.FieldColour.superClass_.init.call(this, block);
this.setValue(this.getValue());
};
/**
* Mouse cursor style when over the hotspot that initiates the editor.
*/
Blockly.FieldColour.prototype.CURSOR = 'default';
/**
* Close the colour picker if this input is being deleted.
*/
Blockly.FieldColour.prototype.dispose = function() {
Blockly.WidgetDiv.hideIfOwner(this);
Blockly.FieldColour.superClass_.dispose.call(this);
};
/**
* Return the current colour.
* @return {string} Current colour in '#rrggbb' format.
*/
Blockly.FieldColour.prototype.getValue = function() {
return this.colour_;
};
/**
* Set the colour.
* @param {string} colour The new colour in '#rrggbb' format.
*/
Blockly.FieldColour.prototype.setValue = function(colour) {
if (this.sourceBlock_ && Blockly.Events.isEnabled() &&
this.colour_ != colour) {
Blockly.Events.fire(new Blockly.Events.BlockChange(
this.sourceBlock_, 'field', this.name, this.colour_, colour));
}
this.colour_ = colour;
if (this.sourceBlock_) {
this.sourceBlock_.setColour(
colour,
this.sourceBlock_.getColourSecondary(),
this.sourceBlock_.getColourTertiary()
);
}
};
/**
* Get the text from this field. Used when the block is collapsed.
* @return {string} Current text.
*/
Blockly.FieldColour.prototype.getText = function() {
var colour = this.colour_;
// Try to use #rgb format if possible, rather than #rrggbb.
var m = colour.match(/^#(.)\1(.)\2(.)\3$/);
if (m) {
colour = '#' + m[1] + m[2] + m[3];
}
return colour;
};
/**
* Returns the fixed height and width.
* @return {!goog.math.Size} Height and width.
*/
Blockly.FieldColour.prototype.getSize = function() {
return new goog.math.Size(Blockly.BlockSvg.FIELD_WIDTH, Blockly.BlockSvg.FIELD_HEIGHT);
};
/**
* An array of colour strings for the palette.
* See bottom of this page for the default:
* http://docs.closure-library.googlecode.com/git/closure_goog_ui_colorpicker.js.source.html
* @type {!Array.<string>}
*/
Blockly.FieldColour.COLOURS = goog.ui.ColorPicker.SIMPLE_GRID_COLORS;
/**
* Number of columns in the palette.
*/
Blockly.FieldColour.COLUMNS = 7;
/**
* Set a custom colour grid for this field.
* @param {Array.<string>} colours Array of colours for this block,
* or null to use default (Blockly.FieldColour.COLOURS).
* @return {!Blockly.FieldColour} Returns itself (for method chaining).
*/
Blockly.FieldColour.prototype.setColours = function(colours) {
this.colours_ = colours;
return this;
};
/**
* Set a custom grid size for this field.
* @param {number} columns Number of columns for this block,
* or 0 to use default (Blockly.FieldColour.COLUMNS).
* @return {!Blockly.FieldColour} Returns itself (for method chaining).
*/
Blockly.FieldColour.prototype.setColumns = function(columns) {
this.columns_ = columns;
return this;
};
/**
* Create a palette under the colour field.
* @private
*/
Blockly.FieldColour.prototype.showEditor_ = function() {
Blockly.WidgetDiv.show(this, this.sourceBlock_.RTL,
Blockly.FieldColour.widgetDispose_);
// Create the palette using Closure.
var picker = new goog.ui.ColorPicker();
picker.setSize(this.columns_ || Blockly.FieldColour.COLUMNS);
picker.setColors(this.colours_ || Blockly.FieldColour.COLOURS);
// Position the palette to line up with the field.
// Record windowSize and scrollOffset before adding the palette.
var windowSize = goog.dom.getViewportSize();
var scrollOffset = goog.style.getViewportPageOffset(document);
var xy = this.getAbsoluteXY_();
var borderBBox = this.getScaledBBox_();
var div = Blockly.WidgetDiv.DIV;
picker.render(div);
picker.setSelectedColor(this.getValue());
// Record paletteSize after adding the palette.
var paletteSize = goog.style.getSize(picker.getElement());
// Flip the palette vertically if off the bottom.
if (xy.y + paletteSize.height + borderBBox.height >=
windowSize.height + scrollOffset.y) {
xy.y -= paletteSize.height - 1;
} else {
xy.y += borderBBox.height - 1;
}
if (this.sourceBlock_.RTL) {
xy.x += borderBBox.width;
xy.x -= paletteSize.width;
// Don't go offscreen left.
if (xy.x < scrollOffset.x) {
xy.x = scrollOffset.x;
}
} else {
// Don't go offscreen right.
if (xy.x > windowSize.width + scrollOffset.x - paletteSize.width) {
xy.x = windowSize.width + scrollOffset.x - paletteSize.width;
}
}
Blockly.WidgetDiv.position(xy.x, xy.y, windowSize, scrollOffset,
this.sourceBlock_.RTL);
// Configure event handler.
var thisField = this;
Blockly.FieldColour.changeEventKey_ = goog.events.listen(picker,
goog.ui.ColorPicker.EventType.CHANGE,
function(event) {
var colour = event.target.getSelectedColor() || '#000000';
Blockly.WidgetDiv.hide();
if (thisField.sourceBlock_) {
// Call any validation function, and allow it to override.
colour = thisField.callValidator(colour);
}
if (colour !== null) {
thisField.setValue(colour);
}
});
};
/**
* Hide the colour palette.
* @private
*/
Blockly.FieldColour.widgetDispose_ = function() {
if (Blockly.FieldColour.changeEventKey_) {
goog.events.unlistenByKey(Blockly.FieldColour.changeEventKey_);
}
Blockly.Events.setGroup(false);
};
| 1 | 8,541 | Would you explain why here please? E.g. from the PR description > the renderer expects to be able to use the secondary color as the fill for a shadow. | LLK-scratch-blocks | js |
@@ -643,6 +643,8 @@ func (hd *HeaderDownload) SaveExternalAnnounce(hash common.Hash) {
}
func (hd *HeaderDownload) getLink(linkHash common.Hash) (*Link, bool) {
+ hd.lock.RLock()
+ defer hd.lock.RUnlock()
if link, ok := hd.links[linkHash]; ok {
return link, true
} | 1 | package headerdownload
import (
"bytes"
"compress/gzip"
"container/heap"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"math/big"
"sort"
"strings"
"time"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/consensus"
"github.com/ledgerwatch/turbo-geth/core/rawdb"
"github.com/ledgerwatch/turbo-geth/core/types"
"github.com/ledgerwatch/turbo-geth/eth/stagedsync/stages"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/log"
"github.com/ledgerwatch/turbo-geth/params"
"github.com/ledgerwatch/turbo-geth/rlp"
)
// Implements sort.Interface so we can sort the incoming header in the message by block height
type HeadersByBlockHeight []*types.Header
func (h HeadersByBlockHeight) Len() int {
return len(h)
}
func (h HeadersByBlockHeight) Less(i, j int) bool {
// Note - the ordering is the inverse ordering of the block heights
return h[i].Number.Cmp(h[j].Number) > 0
}
func (h HeadersByBlockHeight) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
// SplitIntoSegments converts message containing headers into a collection of chain segments
func (hd *HeaderDownload) SplitIntoSegments(headersRaw [][]byte, msg []*types.Header) ([]*ChainSegment, Penalty, error) {
hd.lock.RLock()
defer hd.lock.RUnlock()
sort.Sort(HeadersByBlockHeight(msg))
// Now all headers are order from the highest block height to the lowest
var segments []*ChainSegment // Segments being built
segmentMap := make(map[common.Hash]int) // Mapping of the header hash to the index of the chain segment it belongs
childrenMap := make(map[common.Hash][]*types.Header) // Mapping parent hash to the children
dedupMap := make(map[common.Hash]struct{}) // Map used for detecting duplicate headers
for i, header := range msg {
headerHash := header.Hash()
if _, bad := hd.badHeaders[headerHash]; bad {
return nil, BadBlockPenalty, nil
}
if _, duplicate := dedupMap[headerHash]; duplicate {
return nil, DuplicateHeaderPenalty, nil
}
dedupMap[headerHash] = struct{}{}
var segmentIdx int
children := childrenMap[headerHash]
for _, child := range children {
if valid, penalty := hd.childParentValid(child, header); !valid {
return nil, penalty, nil
}
}
if len(children) == 1 {
// Single child, extract segmentIdx
segmentIdx = segmentMap[headerHash]
} else {
// No children, or more than one child, create new segment
segmentIdx = len(segments)
segments = append(segments, &ChainSegment{})
}
segments[segmentIdx].Headers = append(segments[segmentIdx].Headers, header)
segments[segmentIdx].HeadersRaw = append(segments[segmentIdx].HeadersRaw, headersRaw[i])
segmentMap[header.ParentHash] = segmentIdx
siblings := childrenMap[header.ParentHash]
siblings = append(siblings, header)
childrenMap[header.ParentHash] = siblings
}
return segments, NoPenalty, nil
}
// Checks whether child-parent relationship between two headers is correct
// (excluding Proof Of Work validity)
func (hd *HeaderDownload) childParentValid(child, parent *types.Header) (bool, Penalty) {
if parent.Number.Uint64()+1 != child.Number.Uint64() {
return false, WrongChildBlockHeightPenalty
}
return true, NoPenalty
}
// SingleHeaderAsSegment converts message containing 1 header into one singleton chain segment
func (hd *HeaderDownload) SingleHeaderAsSegment(headerRaw []byte, header *types.Header) ([]*ChainSegment, Penalty, error) {
hd.lock.RLock()
defer hd.lock.RUnlock()
headerHash := header.Hash()
if _, bad := hd.badHeaders[headerHash]; bad {
return nil, BadBlockPenalty, nil
}
return []*ChainSegment{{HeadersRaw: [][]byte{headerRaw}, Headers: []*types.Header{header}}}, NoPenalty, nil
}
// FindAnchors attempts to find anchors to which given chain segment can be attached to
func (hd *HeaderDownload) findAnchors(segment *ChainSegment) (found bool, start int) {
// Walk the segment from children towards parents
for i, header := range segment.Headers {
// Check if the header can be attached to an anchor of a working tree
if _, attaching := hd.anchors[header.Hash()]; attaching {
return true, i
}
}
return false, 0
}
// FindLink attempts to find a non-persisted link that given chain segment can be attached to.
func (hd *HeaderDownload) findLink(segment *ChainSegment, start int) (found bool, end int) {
if _, duplicate := hd.getLink(segment.Headers[start].Hash()); duplicate {
return false, 0
}
// Walk the segment from children towards parents
for i, header := range segment.Headers[start:] {
// Check if the header can be attached to any links
if _, attaching := hd.getLink(header.ParentHash); attaching {
return true, start + i + 1
}
}
return false, len(segment.Headers)
}
func (hd *HeaderDownload) removeUpwards(toRemove []*Link) {
for len(toRemove) > 0 {
removal := toRemove[len(toRemove)-1]
toRemove = toRemove[:len(toRemove)-1]
delete(hd.links, removal.header.Hash())
heap.Remove(hd.linkQueue, removal.idx)
toRemove = append(toRemove, removal.next...)
}
}
func (hd *HeaderDownload) markPreverified(link *Link) {
// Go through all parent links that are not preveried and mark them too
// var prevLink *Link
for link != nil && !link.preverified {
link.preverified = true
// if prevLink != nil && len(link.next) > 1 {
// // Remove all non-canonical links
// var toRemove []*Link
// for _, n := range link.next {
// if n != prevLink {
// toRemove = append(toRemove, n)
// }
// }
// hd.removeUpwards(toRemove)
// link.next = append(link.next[:0], prevLink)
// }
link = hd.links[link.header.ParentHash]
}
}
// ExtendUp extends a working tree up from the link, using given chain segment
func (hd *HeaderDownload) extendUp(segment *ChainSegment, start, end int) error {
// Find attachment link again
linkHeader := segment.Headers[end-1]
attachmentLink, attaching := hd.getLink(linkHeader.ParentHash)
if attaching {
if attachmentLink.preverified && len(attachmentLink.next) > 0 {
return fmt.Errorf("cannot extendUp from preverified link %d with children", attachmentLink.blockHeight)
}
// Iterate over headers backwards (from parents towards children)
prevLink := attachmentLink
for i := end - 1; i >= start; i-- {
header := segment.Headers[i]
link := hd.addHeaderAsLink(header, false /* persisted */)
prevLink.next = append(prevLink.next, link)
prevLink = link
if _, ok := hd.preverifiedHashes[header.Hash()]; ok {
hd.markPreverified(link)
}
}
} else {
return fmt.Errorf("extendUp attachment link not found for %x", linkHeader.ParentHash)
}
if attachmentLink.persisted {
link := hd.links[linkHeader.Hash()]
hd.insertList = append(hd.insertList, link)
}
return nil
}
// ExtendDown extends some working trees down from the anchor, using given chain segment
// it creates a new anchor and collects all the links from the attached anchors to it
func (hd *HeaderDownload) extendDown(segment *ChainSegment, start, end int) error {
// Find attachment anchor again
anchorHeader := segment.Headers[start]
if anchor, attaching := hd.anchors[anchorHeader.Hash()]; attaching {
anchorPreverified := false
for _, link := range anchor.links {
if link.preverified {
anchorPreverified = true
break
}
}
newAnchorHeader := segment.Headers[end-1]
newAnchor := &Anchor{
parentHash: newAnchorHeader.ParentHash,
timestamp: 0,
peerID: anchor.peerID,
blockHeight: newAnchorHeader.Number.Uint64(),
}
if newAnchor.blockHeight > 0 {
hd.anchors[newAnchorHeader.ParentHash] = newAnchor
heap.Push(hd.anchorQueue, newAnchor)
}
delete(hd.anchors, anchor.parentHash)
// Add all headers in the segments as links to this anchor
var prevLink *Link
for i := end - 1; i >= start; i-- {
header := segment.Headers[i]
link := hd.addHeaderAsLink(header, false /* pesisted */)
if prevLink == nil {
newAnchor.links = append(newAnchor.links, link)
} else {
prevLink.next = append(prevLink.next, link)
}
prevLink = link
if !anchorPreverified {
if _, ok := hd.preverifiedHashes[header.Hash()]; ok {
hd.markPreverified(link)
}
}
}
prevLink.next = anchor.links
anchor.links = nil
if anchorPreverified {
// Mark the entire segment as preverified
hd.markPreverified(prevLink)
}
} else {
return fmt.Errorf("extendDown attachment anchors not found for %x", anchorHeader.Hash())
}
return nil
}
// Connect connects some working trees using anchors of some, and a link of another
func (hd *HeaderDownload) connect(segment *ChainSegment, start, end int) error {
// Find attachment link again
linkHeader := segment.Headers[end-1]
// Find attachement anchors again
anchorHeader := segment.Headers[start]
attachmentLink, ok1 := hd.getLink(linkHeader.ParentHash)
if !ok1 {
return fmt.Errorf("connect attachment link not found for %x", linkHeader.ParentHash)
}
if attachmentLink.preverified && len(attachmentLink.next) > 0 {
return fmt.Errorf("cannot connect to preverified link %d with children", attachmentLink.blockHeight)
}
anchor, ok2 := hd.anchors[anchorHeader.Hash()]
if !ok2 {
return fmt.Errorf("connect attachment anchors not found for %x", anchorHeader.Hash())
}
anchorPreverified := false
for _, link := range anchor.links {
if link.preverified {
anchorPreverified = true
break
}
}
delete(hd.anchors, anchor.parentHash)
// Iterate over headers backwards (from parents towards children)
prevLink := attachmentLink
for i := end - 1; i >= start; i-- {
header := segment.Headers[i]
link := hd.addHeaderAsLink(header, false /* persisted */)
prevLink.next = append(prevLink.next, link)
prevLink = link
if !anchorPreverified {
if _, ok := hd.preverifiedHashes[header.Hash()]; ok {
hd.markPreverified(link)
}
}
}
prevLink.next = anchor.links
anchor.links = nil
if anchorPreverified {
// Mark the entire segment as preverified
hd.markPreverified(prevLink)
}
if attachmentLink.persisted {
link := hd.links[linkHeader.Hash()]
hd.insertList = append(hd.insertList, link)
}
return nil
}
// if anchor will be abandoned - given peerID will get Penalty
func (hd *HeaderDownload) newAnchor(segment *ChainSegment, start, end int, peerID string) error {
anchorHeader := segment.Headers[end-1]
if anchorHeader.Number.Uint64() < hd.highestInDb {
return fmt.Errorf("new anchor too far in the past: %d, latest header in db: %d", anchorHeader.Number.Uint64(), hd.highestInDb)
}
if len(hd.anchors) >= hd.anchorLimit {
return fmt.Errorf("too many anchors: %d, limit %d", len(hd.anchors), hd.anchorLimit)
}
anchor := &Anchor{
parentHash: anchorHeader.ParentHash,
peerID: peerID,
timestamp: 0,
blockHeight: anchorHeader.Number.Uint64(),
}
hd.anchors[anchorHeader.ParentHash] = anchor
heap.Push(hd.anchorQueue, anchor)
// Iterate over headers backwards (from parents towards children)
var prevLink *Link
for i := end - 1; i >= start; i-- {
header := segment.Headers[i]
link := hd.addHeaderAsLink(header, false /* persisted */)
if prevLink == nil {
anchor.links = append(anchor.links, link)
} else {
prevLink.next = append(prevLink.next, link)
}
prevLink = link
if _, ok := hd.preverifiedHashes[header.Hash()]; ok {
hd.markPreverified(link)
}
}
return nil
}
func (hd *HeaderDownload) AnchorState() string {
hd.lock.RLock()
defer hd.lock.RUnlock()
return hd.anchorState()
}
func (hd *HeaderDownload) anchorState() string {
//nolint:prealloc
var ss []string
for anchorParent, anchor := range hd.anchors {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("{%8d", anchor.blockHeight))
// Try to figure out end
var end uint64
var searchList []*Link
searchList = append(searchList, anchor.links...)
var bs []int
for len(searchList) > 0 {
link := searchList[len(searchList)-1]
if link.blockHeight > end {
end = link.blockHeight
}
searchList = searchList[:len(searchList)-1]
if len(link.next) > 0 {
searchList = append(searchList, link.next...)
}
bs = append(bs, int(link.blockHeight))
}
var sbb strings.Builder
sort.Ints(bs)
for j, b := range bs {
if j == 0 {
sbb.WriteString(fmt.Sprintf("%d", b))
} else if j == len(bs)-1 {
if bs[j-1]+1 == b {
// Close interval
sbb.WriteString(fmt.Sprintf("-%d", b))
} else {
// Standalone
sbb.WriteString(fmt.Sprintf(" %d", b))
}
} else {
if bs[j-1] == b {
// Skip
} else if bs[j-1]+1 == b {
if b+1 == bs[j+1] {
// Skip
} else {
// Close interval
sbb.WriteString(fmt.Sprintf("-%d", b))
}
} else {
// Open interval or standalone
sbb.WriteString(fmt.Sprintf(" %d", b))
}
}
}
sb.WriteString(fmt.Sprintf("-%d links=%d (%s)}", end, len(bs), sbb.String()))
sb.WriteString(fmt.Sprintf(" => %x", anchorParent))
ss = append(ss, sb.String())
}
sort.Strings(ss)
return strings.Join(ss, "\n")
}
func InitPreverifiedHashes(chain string) (map[common.Hash]struct{}, uint64) {
var encodings []string
var height uint64
switch chain {
case params.MainnetChainName:
encodings = mainnetPreverifiedHashes
height = mainnetPreverifiedHeight
case params.RopstenChainName:
encodings = ropstenPreverifiedHashes
height = ropstenPreverifiedHeight
default:
log.Warn("Preverified hashes not found for", "chain", chain)
return nil, 0
}
return DecodeHashes(encodings), height
}
func DecodeHashes(encodings []string) map[common.Hash]struct{} {
hashes := make(map[common.Hash]struct{}, len(encodings))
for _, encoding := range encodings {
hashes[common.HexToHash(encoding)] = struct{}{}
}
return hashes
}
func (hd *HeaderDownload) SetPreverifiedHashes(preverifiedHashes map[common.Hash]struct{}, preverifiedHeight uint64) {
hd.lock.Lock()
defer hd.lock.Unlock()
hd.preverifiedHashes = preverifiedHashes
hd.preverifiedHeight = preverifiedHeight
}
func (hd *HeaderDownload) RecoverFromDb(db ethdb.Database) error {
// Drain persistedLinksQueue and remove links
for hd.persistedLinkQueue.Len() > 0 {
link := heap.Pop(hd.persistedLinkQueue).(*Link)
delete(hd.links, link.hash)
}
err := db.(ethdb.HasRwKV).RwKV().View(context.Background(), func(tx ethdb.Tx) error {
c, err := tx.Cursor(dbutils.HeadersBucket)
if err != nil {
return err
}
// Take hd.persistedLinkLimit headers (with the highest heights) as links
for k, v, err := c.Last(); k != nil && hd.persistedLinkQueue.Len() < hd.persistedLinkLimit; k, v, err = c.Prev() {
if err != nil {
return err
}
var h types.Header
if err = rlp.DecodeBytes(v, &h); err != nil {
return err
}
hd.addHeaderAsLink(&h, true /* persisted */)
}
return nil
})
if err != nil {
return err
}
hd.highestInDb, err = stages.GetStageProgress(db, stages.Headers)
if err != nil {
return err
}
return nil
}
// ReadProgressFromDb updates highestInDb field according to the information
// in the database. It is useful in the situations when transaction was
// aborted and highestInDb became out-of-sync
func (hd *HeaderDownload) ReadProgressFromDb(tx ethdb.RwTx) (err error) {
hd.lock.Lock()
defer hd.lock.Unlock()
hd.highestInDb, err = stages.GetStageProgress(tx, stages.Headers)
if err != nil {
return err
}
return nil
}
func (hd *HeaderDownload) invalidateAnchor(anchor *Anchor) {
log.Warn("Invalidating anchor for suspected unavailability", "height", anchor.blockHeight)
delete(hd.anchors, anchor.parentHash)
hd.removeUpwards(anchor.links)
}
func (hd *HeaderDownload) RequestMoreHeaders(currentTime uint64) (*HeaderRequest, []PenaltyItem) {
hd.lock.Lock()
defer hd.lock.Unlock()
var penalties []PenaltyItem
if hd.anchorQueue.Len() == 0 {
log.Debug("Empty anchor queue")
return nil, penalties
}
for hd.anchorQueue.Len() > 0 {
anchor := (*hd.anchorQueue)[0]
if _, ok := hd.anchors[anchor.parentHash]; ok {
if anchor.timestamp > currentTime {
// Anchor not ready for re-request yet
return nil, penalties
}
if anchor.timeouts < 10 {
return &HeaderRequest{Hash: anchor.parentHash, Number: anchor.blockHeight - 1, Length: 192, Skip: 0, Reverse: true}, penalties
} else {
// Ancestors of this anchor seem to be unavailable, invalidate and move on
hd.invalidateAnchor(anchor)
penalties = append(penalties, PenaltyItem{Penalty: AbandonedAnchorPenalty, PeerID: anchor.peerID})
}
}
// Anchor disappeared or unavailable, pop from the queue and move on
heap.Remove(hd.anchorQueue, 0)
}
return nil, penalties
}
func (hd *HeaderDownload) SentRequest(req *HeaderRequest, currentTime, timeout uint64) {
hd.lock.Lock()
defer hd.lock.Unlock()
anchor, ok := hd.anchors[req.Hash]
if !ok {
return
}
anchor.timeouts++
anchor.timestamp = currentTime + timeout
heap.Fix(hd.anchorQueue, 0)
}
func (hd *HeaderDownload) RequestSkeleton() *HeaderRequest {
hd.lock.RLock()
defer hd.lock.RUnlock()
log.Debug("Request skeleton", "anchors", len(hd.anchors), "top seen height", hd.topSeenHeight, "highestInDb", hd.highestInDb)
if len(hd.anchors) > 16 {
return nil // Need to be below anchor threshold to produce skeleton request
}
stride := uint64(8 * 192)
if hd.topSeenHeight < hd.highestInDb+stride {
return nil
}
length := (hd.topSeenHeight - hd.highestInDb) / stride
if length > 192 {
length = 192
}
return &HeaderRequest{Number: hd.highestInDb + stride, Length: length, Skip: stride, Reverse: false}
}
// InsertHeaders attempts to insert headers into the database, verifying them first
// It returns true in the first return value if the system is "in sync"
func (hd *HeaderDownload) InsertHeaders(hf func(header *types.Header, blockHeight uint64) error, logPrefix string, logChannel <-chan time.Time) (bool, error) {
hd.lock.Lock()
defer hd.lock.Unlock()
var linksInFuture []*Link // Here we accumulate links that fail validation as "in the future"
for len(hd.insertList) > 0 {
// Make sure long insertions do not appear as a stuck stage 1
select {
case <-logChannel:
log.Info(fmt.Sprintf("[%s] Inserting headers", logPrefix), "progress", hd.highestInDb)
default:
}
link := hd.insertList[len(hd.insertList)-1]
if link.blockHeight <= hd.preverifiedHeight && !link.preverified {
// Header should be preverified, but not yet, try again later
break
}
hd.insertList = hd.insertList[:len(hd.insertList)-1]
skip := false
if !link.preverified {
if err := hd.engine.VerifyHeader(hd.headerReader, link.header, true /* seal */); err != nil {
log.Warn("Verification failed for header", "hash", link.header.Hash(), "height", link.blockHeight, "error", err)
if errors.Is(err, consensus.ErrFutureBlock) {
// This may become valid later
linksInFuture = append(linksInFuture, link)
log.Warn("Added future link", "hash", link.header.Hash(), "height", link.blockHeight, "timestamp", link.header.Time)
continue // prevent removal of the link from the hd.linkQueue
} else {
skip = true
}
} else if hd.seenAnnounces.Pop(link.hash) {
hd.toAnnounce = append(hd.toAnnounce, Announce{Hash: link.hash, Number: link.blockHeight})
}
}
if _, ok := hd.links[link.hash]; ok {
heap.Remove(hd.linkQueue, link.idx)
}
if skip {
continue
}
if err := hf(link.header, link.blockHeight); err != nil {
return false, err
}
if link.blockHeight > hd.highestInDb {
hd.highestInDb = link.blockHeight
}
link.persisted = true
heap.Push(hd.persistedLinkQueue, link)
if len(link.next) > 0 {
hd.insertList = append(hd.insertList, link.next...)
}
}
for hd.persistedLinkQueue.Len() > hd.persistedLinkLimit {
link := heap.Pop(hd.persistedLinkQueue).(*Link)
delete(hd.links, link.hash)
}
if len(linksInFuture) > 0 {
hd.insertList = append(hd.insertList, linksInFuture...)
linksInFuture = nil //nolint
}
return hd.highestInDb >= hd.preverifiedHeight && hd.topSeenHeight > 0 && hd.highestInDb >= hd.topSeenHeight, nil
}
// GrabAnnounces - returns all available announces and forget them
func (hd *HeaderDownload) GrabAnnounces() []Announce {
hd.lock.Lock()
defer hd.lock.Unlock()
res := hd.toAnnounce
hd.toAnnounce = []Announce{}
return res
}
func (hd *HeaderDownload) Progress() uint64 {
hd.lock.RLock()
defer hd.lock.RUnlock()
return hd.highestInDb
}
func (hd *HeaderDownload) HasLink(linkHash common.Hash) bool {
hd.lock.RLock()
defer hd.lock.RUnlock()
if _, ok := hd.getLink(linkHash); ok {
return true
}
return false
}
// SaveExternalAnnounce - does mark hash as seen in external announcement
// only such hashes will broadcast further after
func (hd *HeaderDownload) SaveExternalAnnounce(hash common.Hash) {
hd.lock.Lock()
defer hd.lock.Unlock()
hd.seenAnnounces.Add(hash)
}
func (hd *HeaderDownload) getLink(linkHash common.Hash) (*Link, bool) {
if link, ok := hd.links[linkHash]; ok {
return link, true
}
return nil, false
}
// addHeaderAsLink wraps header into a link and adds it to either queue of persisted links or queue of non-persisted links
func (hd *HeaderDownload) addHeaderAsLink(header *types.Header, persisted bool) *Link {
height := header.Number.Uint64()
linkHash := header.Hash()
link := &Link{
blockHeight: height,
hash: linkHash,
header: header,
persisted: persisted,
}
hd.links[linkHash] = link
if persisted {
heap.Push(hd.persistedLinkQueue, link)
} else {
heap.Push(hd.linkQueue, link)
}
return link
}
func (hi *HeaderInserter) FeedHeaderFunc(db ethdb.StatelessRwTx) func(header *types.Header, blockHeight uint64) error {
return func(header *types.Header, blockHeight uint64) error {
return hi.FeedHeader(db, header, blockHeight)
}
}
func (hi *HeaderInserter) FeedHeader(db ethdb.StatelessRwTx, header *types.Header, blockHeight uint64) error {
hash := header.Hash()
if hash == hi.prevHash {
// Skip duplicates
return nil
}
if blockHeight < hi.prevHeight {
return fmt.Errorf("[%s] headers are unexpectedly unsorted, got %d after %d", hi.logPrefix, blockHeight, hi.prevHeight)
}
if oldH := rawdb.ReadHeader(db, hash, blockHeight); oldH != nil {
// Already inserted, skip
return nil
}
// Load parent header
parent := rawdb.ReadHeader(db, header.ParentHash, blockHeight-1)
if parent == nil {
log.Warn(fmt.Sprintf("Could not find parent with hash %x and height %d for header %x %d", header.ParentHash, blockHeight-1, hash, blockHeight))
// Skip headers without parents
return nil
}
// Parent's total difficulty
parentTd, err := rawdb.ReadTd(db, header.ParentHash, blockHeight-1)
if err != nil {
return fmt.Errorf("[%s] parent's total difficulty not found with hash %x and height %d for header %x %d: %v", hi.logPrefix, header.ParentHash, blockHeight-1, hash, blockHeight, err)
}
// Calculate total difficulty of this header using parent's total difficulty
td := new(big.Int).Add(parentTd, header.Difficulty)
// Now we can decide wether this header will create a change in the canonical head
if td.Cmp(hi.localTd) > 0 {
hi.newCanonical = true
// Find the forking point - i.e. the latest header on the canonical chain which is an ancestor of this one
// Most common case - forking point is the height of the parent header
var forkingPoint uint64
ch, err1 := rawdb.ReadCanonicalHash(db, blockHeight-1)
if err1 != nil {
return fmt.Errorf("reading canonical hash for height %d: %w", blockHeight-1, err1)
}
if ch == (common.Hash{}) || ch == header.ParentHash {
forkingPoint = blockHeight - 1
} else {
// Going further back
ancestorHash := parent.ParentHash
ancestorHeight := blockHeight - 2
for ch, err = rawdb.ReadCanonicalHash(db, ancestorHeight); err == nil && ch != ancestorHash; ch, err = rawdb.ReadCanonicalHash(db, ancestorHeight) {
ancestor := rawdb.ReadHeader(db, ancestorHash, ancestorHeight)
ancestorHash = ancestor.ParentHash
ancestorHeight--
}
if err != nil {
return fmt.Errorf("[%s] reading canonical hash for %d: %w", hi.logPrefix, ancestorHeight, err)
}
// Loop above terminates when either err != nil (handled already) or ch == ancestorHash, therefore ancestorHeight is our forking point
forkingPoint = ancestorHeight
}
if err = rawdb.WriteHeadHeaderHash(db, hash); err != nil {
return fmt.Errorf("[%s] marking head header hash as %x: %w", hi.logPrefix, hash, err)
}
hi.headerProgress = blockHeight
if err = stages.SaveStageProgress(db, stages.Headers, blockHeight); err != nil {
return fmt.Errorf("[%s] saving Headers progress: %w", hi.logPrefix, err)
}
// See if the forking point affects the unwindPoint (the block number to which other stages will need to unwind before the new canonical chain is applied)
if forkingPoint < hi.unwindPoint {
hi.unwindPoint = forkingPoint
}
// This makes sure we end up chosing the chain with the max total difficulty
hi.localTd.Set(td)
}
data, err2 := rlp.EncodeToBytes(header)
if err2 != nil {
return fmt.Errorf("[%s] failed to RLP encode header: %w", hi.logPrefix, err2)
}
if err = rawdb.WriteTd(db, hash, blockHeight, td); err != nil {
return fmt.Errorf("[%s] failed to WriteTd: %w", hi.logPrefix, err)
}
if err = db.Put(dbutils.HeadersBucket, dbutils.HeaderKey(blockHeight, hash), data); err != nil {
return fmt.Errorf("[%s] failed to store header: %w", hi.logPrefix, err)
}
hi.prevHash = hash
if blockHeight > hi.highest {
hi.highest = blockHeight
hi.highestHash = hash
hi.highestTimestamp = header.Time
}
return nil
}
func (hi *HeaderInserter) GetHighest() uint64 {
return hi.highest
}
func (hi *HeaderInserter) GetHighestHash() common.Hash {
return hi.highestHash
}
func (hi *HeaderInserter) GetHighestTimestamp() uint64 {
return hi.highestTimestamp
}
func (hi *HeaderInserter) UnwindPoint() uint64 {
return hi.unwindPoint
}
func (hi *HeaderInserter) AnythingDone() bool {
return hi.newCanonical
}
// ProcessSegment - handling single segment.
// If segment were processed by extendDown or newAnchor method, then it returns `requestMore=true`
// it allows higher-level algo immediately request more headers without waiting all stages precessing,
// speeds up visibility of new blocks
// It remember peerID - then later - if anchors created from segments will abandoned - this peerID gonna get Penalty
func (hd *HeaderDownload) ProcessSegment(segment *ChainSegment, newBlock bool, peerID string) (requestMore bool) {
log.Debug("processSegment", "from", segment.Headers[0].Number.Uint64(), "to", segment.Headers[len(segment.Headers)-1].Number.Uint64())
hd.lock.Lock()
defer hd.lock.Unlock()
foundAnchor, start := hd.findAnchors(segment)
foundTip, end := hd.findLink(segment, start) // We ignore penalty because we will check it as part of PoW check
if end == 0 {
log.Debug("Duplicate segment")
return
}
height := segment.Headers[len(segment.Headers)-1].Number.Uint64()
hash := segment.Headers[len(segment.Headers)-1].Hash()
if newBlock || hd.seenAnnounces.Seen(hash) {
if height > hd.topSeenHeight {
hd.topSeenHeight = height
}
}
startNum := segment.Headers[start].Number.Uint64()
endNum := segment.Headers[end-1].Number.Uint64()
// There are 4 cases
if foundAnchor {
if foundTip {
// Connect
if err := hd.connect(segment, start, end); err != nil {
log.Debug("Connect failed", "error", err)
return
}
log.Debug("Connected", "start", startNum, "end", endNum)
} else {
// ExtendDown
if err := hd.extendDown(segment, start, end); err != nil {
log.Debug("ExtendDown failed", "error", err)
return
}
requestMore = true
log.Debug("Extended Down", "start", startNum, "end", endNum)
}
} else if foundTip {
if end > 0 {
// ExtendUp
if err := hd.extendUp(segment, start, end); err != nil {
log.Debug("ExtendUp failed", "error", err)
return
}
log.Debug("Extended Up", "start", startNum, "end", endNum)
}
} else {
// NewAnchor
if err := hd.newAnchor(segment, start, end, peerID); err != nil {
log.Debug("NewAnchor failed", "error", err)
return
}
requestMore = true
log.Debug("NewAnchor", "start", startNum, "end", endNum)
}
//log.Info(hd.anchorState())
log.Debug("Link queue", "size", hd.linkQueue.Len())
if hd.linkQueue.Len() > hd.linkLimit {
log.Debug("Too many links, cutting down", "count", hd.linkQueue.Len(), "tried to add", end-start, "limit", hd.linkLimit)
}
for hd.linkQueue.Len() > hd.linkLimit {
link := heap.Pop(hd.linkQueue).(*Link)
delete(hd.links, link.hash)
if parentLink, ok := hd.links[link.header.ParentHash]; ok {
for i, n := range parentLink.next {
if n == link {
if i == len(parentLink.next)-1 {
parentLink.next = parentLink.next[:i]
} else {
parentLink.next = append(parentLink.next[:i], parentLink.next[i+1:]...)
}
break
}
}
}
if anchor, ok := hd.anchors[link.header.ParentHash]; ok {
for i, n := range anchor.links {
if n == link {
if i == len(anchor.links)-1 {
anchor.links = anchor.links[:i]
} else {
anchor.links = append(anchor.links[:i], anchor.links[i+1:]...)
}
break
}
}
}
}
select {
case hd.DeliveryNotify <- struct{}{}:
default:
}
return hd.requestChaining && requestMore
}
func (hd *HeaderDownload) TopSeenHeight() uint64 {
hd.lock.RLock()
defer hd.lock.RUnlock()
return hd.topSeenHeight
}
func (hd *HeaderDownload) SetHeaderReader(headerReader consensus.ChainHeaderReader) {
hd.lock.Lock()
defer hd.lock.Unlock()
hd.headerReader = headerReader
}
func (hd *HeaderDownload) EnableRequestChaining() {
hd.lock.Lock()
defer hd.lock.Unlock()
hd.requestChaining = true
}
func DecodeTips(encodings []string) (map[common.Hash]HeaderRecord, error) {
hardTips := make(map[common.Hash]HeaderRecord, len(encodings))
var buf bytes.Buffer
for i, encoding := range encodings {
b, err := base64.RawStdEncoding.DecodeString(encoding)
if err != nil {
return nil, fmt.Errorf("decoding hard coded header on %d: %w", i, err)
}
if _, err = buf.Write(b); err != nil {
return nil, fmt.Errorf("gzip write string on %d: %w", i, err)
}
zr, err := gzip.NewReader(&buf)
if err != nil {
return nil, fmt.Errorf("gzip reader on %d: %w %q", i, err, encoding)
}
res, err := io.ReadAll(zr)
if err != nil {
return nil, fmt.Errorf("gzip copy on %d: %w %q", i, err, encoding)
}
if err := zr.Close(); err != nil {
return nil, fmt.Errorf("gzip close on %d: %w", i, err)
}
var h types.Header
if err := rlp.DecodeBytes(res, &h); err != nil {
return nil, fmt.Errorf("parsing hard coded header on %d: %w", i, err)
}
hardTips[h.Hash()] = HeaderRecord{Raw: b, Header: &h}
buf.Reset()
}
return hardTips, nil
}
| 1 | 22,195 | Lock needs to go to `RecoverFromDb` instead. Here it may cause deadlocks. My convention was that un-exported functions do not lock, only exported ones (with name starting with a capital letter) | ledgerwatch-erigon | go |
@@ -63,4 +63,9 @@ public class GermanKeyboard extends BaseKeyboard {
public String getSpaceKeyText(String aComposingText) {
return StringUtils.getStringByLocale(mContext, R.string.settings_language_german, getLocale());
}
+
+ @Override
+ public String[] getDomains(String... domains) {
+ return super.getDomains(".de");
+ }
} | 1 | package org.mozilla.vrbrowser.ui.keyboards;
import android.content.Context;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import org.mozilla.vrbrowser.R;
import org.mozilla.vrbrowser.input.CustomKeyboard;
import org.mozilla.vrbrowser.ui.widgets.WidgetPlacement;
import org.mozilla.vrbrowser.utils.StringUtils;
import java.util.Locale;
public class GermanKeyboard extends BaseKeyboard {
private CustomKeyboard mKeyboard;
private CustomKeyboard mSymbolsKeyboard;
public GermanKeyboard(Context aContext) {
super(aContext);
}
@NonNull
@Override
public CustomKeyboard getAlphabeticKeyboard() {
if (mKeyboard == null) {
mKeyboard = new CustomKeyboard(mContext.getApplicationContext(), R.xml.keyboard_qwerty_german);
}
return mKeyboard;
}
@Nullable
@Override
public CustomKeyboard getSymbolsKeyboard() {
if (mSymbolsKeyboard == null) {
mSymbolsKeyboard = new CustomKeyboard(mContext.getApplicationContext(), R.xml.keyboard_symbols_german);
}
return mSymbolsKeyboard;
}
@Override
public float getAlphabeticKeyboardWidth() {
return WidgetPlacement.dpDimension(mContext, R.dimen.keyboard_alphabetic_width_extra_column);
}
@Nullable
@Override
public CandidatesResult getCandidates(String aText) {
return null;
}
@Override
public String getKeyboardTitle() {
return StringUtils.getStringByLocale(mContext, R.string.settings_language_german, getLocale());
}
@Override
public Locale getLocale() {
return Locale.GERMAN;
}
@Override
public String getSpaceKeyText(String aComposingText) {
return StringUtils.getStringByLocale(mContext, R.string.settings_language_german, getLocale());
}
}
| 1 | 8,538 | German keyboard would also be used in Austria and Switzerland, so this should have included `.at` and `.ch`. | MozillaReality-FirefoxReality | java |
@@ -38,8 +38,6 @@ final class CompositeIndexer {
CacheBuilder.from(spec).<PartitionKeyToTraceId, Pair<Long>>build().asMap();
Indexer.Factory factory = new Indexer.Factory(session, indexTtl, sharedState);
this.indexers = ImmutableSet.of(
- factory.create(new InsertTraceIdByServiceName(bucketCount)),
- factory.create(new InsertTraceIdBySpanName()),
factory.create(new InsertTraceIdByAnnotation(bucketCount))
);
} | 1 | /**
* Copyright 2015-2016 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage.cassandra;
import com.datastax.driver.core.Session;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheBuilderSpec;
import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import zipkin.Span;
import zipkin.internal.Nullable;
import zipkin.internal.Pair;
final class CompositeIndexer {
private final Set<Indexer> indexers;
// Shared across all threads as updates can come from any thread.
// Shared for all indexes to make data management easier (ex. maximumSize)
private final ConcurrentMap<PartitionKeyToTraceId, Pair<Long>> sharedState;
CompositeIndexer(Session session, CacheBuilderSpec spec, int bucketCount,
@Nullable Integer indexTtl) {
this.sharedState = spec == null ? null :
CacheBuilder.from(spec).<PartitionKeyToTraceId, Pair<Long>>build().asMap();
Indexer.Factory factory = new Indexer.Factory(session, indexTtl, sharedState);
this.indexers = ImmutableSet.of(
factory.create(new InsertTraceIdByServiceName(bucketCount)),
factory.create(new InsertTraceIdBySpanName()),
factory.create(new InsertTraceIdByAnnotation(bucketCount))
);
}
ImmutableSet<ListenableFuture<?>> index(List<Span> spans) {
ImmutableSet.Builder<ListenableFuture<?>> result = ImmutableSet.builder();
for (Indexer optimizer : indexers) {
result.addAll(optimizer.index(spans));
}
return result.build();
}
public void clear() {
sharedState.clear();
}
}
| 1 | 11,412 | since this makes the indexer thing really only used for a single table, now, you can put in a TODO for me to cleanup and simplify this | openzipkin-zipkin | java |
@@ -83,6 +83,10 @@ public class KubernetesContainerizedImpl implements ContainerizedImpl {
"/export/apps/azkaban/azkaban-exec-server/current/plugins/jobtypes";
public static final String IMAGE = "image";
public static final String VERSION = "version";
+ public static final String NSCD_SOCKET_VOLUME_NAME = "nscd-socket";
+ public static final String NSCD_SOCKET_HOST_PATH = "/var/run/nscd/socket";
+ public static final String HOST_PATH_TYPE = "Socket";
+ public static final String NSCD_SOCKET_VOLUME_MOUNT_PATH = "/var/run/nscd/socket";
private final String namespace;
private final ApiClient client; | 1 | /*
* Copyright 2020 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor.container;
import azkaban.Constants;
import azkaban.Constants.ConfigurationKeys;
import azkaban.Constants.ContainerizedDispatchManagerProperties;
import azkaban.container.models.AzKubernetesV1PodBuilder;
import azkaban.container.models.AzKubernetesV1ServiceBuilder;
import azkaban.container.models.AzKubernetesV1SpecBuilder;
import azkaban.container.models.ImagePullPolicy;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutableFlowBase;
import azkaban.executor.ExecutableNode;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.imagemgmt.rampup.ImageRampupManager;
import azkaban.imagemgmt.version.VersionSet;
import azkaban.imagemgmt.version.VersionSetBuilder;
import azkaban.imagemgmt.version.VersionSetLoader;
import azkaban.utils.Props;
import com.google.common.collect.ImmutableMap;
import com.google.common.annotations.VisibleForTesting;
import io.kubernetes.client.openapi.ApiClient;
import io.kubernetes.client.openapi.ApiException;
import io.kubernetes.client.openapi.apis.CoreV1Api;
import io.kubernetes.client.openapi.models.V1DeleteOptions;
import io.kubernetes.client.openapi.models.V1Pod;
import io.kubernetes.client.openapi.models.V1PodSpec;
import io.kubernetes.client.openapi.models.V1Service;
import io.kubernetes.client.openapi.models.V1Status;
import io.kubernetes.client.util.ClientBuilder;
import io.kubernetes.client.util.KubeConfig;
import io.kubernetes.client.util.Yaml;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is Kubernetes based implementation for containerization. It has implementation for
* creation/deletion of Pod and service. For any execution, it will identify version set and create
* a pod for all the valid jobTypes of a flow.
*/
@Singleton
public class KubernetesContainerizedImpl implements ContainerizedImpl {
public static final String DEFAULT_FLOW_CONTAINER_NAME_PREFIX = "az-flow-container";
public static final String DEFAULT_POD_NAME_PREFIX = "fc-dep";
public static final String DEFAULT_SERVICE_NAME_PREFIX = "fc-svc";
public static final String DEFAULT_CLUSTER_NAME = "azkaban";
public static final String CPU_LIMIT = "4";
public static final String DEFAULT_CPU_REQUEST = "1";
public static final String MEMORY_LIMIT = "64Gi";
public static final String DEFAULT_MEMORY_REQUEST = "2Gi";
public static final String MAPPING = "Mapping";
public static final String SERVICE_API_VERSION_2 = "ambassador/v2";
public static final String DEFAULT_INIT_MOUNT_PATH_PREFIX_FOR_JOBTYPES = "/data/jobtypes";
public static final String DEFAULT_APP_MOUNT_PATH_PREFIX_FOR_JOBTYPES =
"/export/apps/azkaban/azkaban-exec-server/current/plugins/jobtypes";
public static final String IMAGE = "image";
public static final String VERSION = "version";
private final String namespace;
private final ApiClient client;
private final CoreV1Api coreV1Api;
private final Props azkProps;
private final ExecutorLoader executorLoader;
private final String podPrefix;
private final String servicePrefix;
private final String clusterName;
private final String flowContainerName;
private final String cpuLimit;
private final String cpuRequest;
private final String memoryLimit;
private final String memoryRequest;
private final int servicePort;
private final long serviceTimeout;
private final VersionSetLoader versionSetLoader;
private final ImageRampupManager imageRampupManager;
private final String initMountPathPrefixForJobtypes;
private final String appMountPathPrefixForJobtypes;
private static final Logger logger = LoggerFactory
.getLogger(KubernetesContainerizedImpl.class);
@Inject
public KubernetesContainerizedImpl(final Props azkProps,
final ExecutorLoader executorLoader,
final VersionSetLoader versionSetLoader,
final ImageRampupManager imageRampupManager)
throws ExecutorManagerException {
this.azkProps = azkProps;
this.executorLoader = executorLoader;
this.versionSetLoader = versionSetLoader;
this.imageRampupManager = imageRampupManager;
this.namespace = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_NAMESPACE);
this.flowContainerName =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_NAME
, DEFAULT_FLOW_CONTAINER_NAME_PREFIX);
this.podPrefix =
this.azkProps.getString(ContainerizedDispatchManagerProperties.KUBERNETES_POD_NAME_PREFIX,
DEFAULT_POD_NAME_PREFIX);
this.servicePrefix = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_NAME_PREFIX,
DEFAULT_SERVICE_NAME_PREFIX);
this.clusterName = this.azkProps.getString(ConfigurationKeys.AZKABAN_CLUSTER_NAME,
DEFAULT_CLUSTER_NAME);
this.cpuLimit = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_CPU_LIMIT,
CPU_LIMIT);
this.cpuRequest = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_CPU_REQUEST,
DEFAULT_CPU_REQUEST);
this.memoryLimit = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_MEMORY_LIMIT,
MEMORY_LIMIT);
this.memoryRequest = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_MEMORY_REQUEST,
DEFAULT_MEMORY_REQUEST);
this.servicePort =
this.azkProps.getInt(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_PORT,
54343);
this.serviceTimeout =
this.azkProps
.getLong(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_CREATION_TIMEOUT_MS,
60000);
this.initMountPathPrefixForJobtypes =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_INIT_MOUNT_PATH_FOR_JOBTYPES,
DEFAULT_INIT_MOUNT_PATH_PREFIX_FOR_JOBTYPES);
this.appMountPathPrefixForJobtypes =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_MOUNT_PATH_FOR_JOBTYPES,
DEFAULT_APP_MOUNT_PATH_PREFIX_FOR_JOBTYPES);
try {
// Path to the configuration file for Kubernetes which contains information about
// Kubernetes API Server and identity for authentication
final String kubeConfigPath = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_KUBE_CONFIG_PATH);
logger.info("Kube config path is : {}", kubeConfigPath);
this.client =
ClientBuilder.kubeconfig(KubeConfig.loadKubeConfig(
Files.newBufferedReader(Paths.get(kubeConfigPath), Charset.defaultCharset())))
.build();
this.coreV1Api = new CoreV1Api(this.client);
} catch (final IOException exception) {
logger.error("Unable to read kube config file: {}", exception.getMessage());
throw new ExecutorManagerException(exception);
}
}
/**
* This method is used to create container during dispatch of execution. It will create pod for a
* flow execution. It will also create a service for a pod if azkaban.kubernetes.service .required
* property is set.
*
* @param executionId
* @throws ExecutorManagerException
*/
@Override
public void createContainer(final int executionId) throws ExecutorManagerException {
createPod(executionId);
if (isServiceRequired()) {
createService(executionId);
}
}
/**
* This method is used to delete container. It will delete pod for a flow execution. If the
* service was created then it will also delete the service. This method can be called as a part
* of cleanup process for containers in case containers didn't shutdown gracefully.
*
* @param executionId
* @throws ExecutorManagerException
*/
@Override
public void deleteContainer(final int executionId) throws ExecutorManagerException {
deletePod(executionId);
if (isServiceRequired()) {
deleteService(executionId);
}
}
/**
* Construct the flow override parameter (key) for image version.
* @param imageType
* @return flow override param
*/
private String imageTypeOverrideParam(String imageType) {
return String.join(".", IMAGE, imageType, VERSION);
}
/**
* This method fetches the complete version set information (Map of jobs and their versions)
* required to run the flow.
*
* @param flowParams
* @param imageTypesUsedInFlow
* @return VersionSet
* @throws ExecutorManagerException
*/
@VisibleForTesting
VersionSet fetchVersionSet(final int executionId, Map<String, String> flowParams,
Set<String> imageTypesUsedInFlow) throws ExecutorManagerException {
VersionSet versionSet = null;
try {
if (flowParams != null &&
flowParams.containsKey(Constants.FlowParameters.FLOW_PARAM_VERSION_SET_ID)) {
int versionSetId = Integer.parseInt(flowParams
.get(Constants.FlowParameters.FLOW_PARAM_VERSION_SET_ID));
try {
versionSet = this.versionSetLoader.getVersionSetById(versionSetId).get();
/*
* Validate that all images part of the flow are included in the retrieved
* VersionSet. If there are images that were not part of the retrieved version
* set, then create a new VersionSet with a superset of all images.
*/
Set<String> imageVersionsNotFound = new TreeSet<>();
Map<String, String> overlayMap = new HashMap<>();
for (String imageType : imageTypesUsedInFlow) {
if (flowParams.containsKey(imageTypeOverrideParam(imageType))) {
overlayMap.put(imageType, flowParams.get(imageTypeOverrideParam(imageType)));
} else if (!(imageType.equals("noop") || versionSet.getVersion(imageType).isPresent())) {
logger.info("ExecId: {}, imageType: {} not found in versionSet {}",
executionId, imageType, versionSetId);
imageVersionsNotFound.add(imageType);
}
}
if (!(imageVersionsNotFound.isEmpty() && overlayMap.isEmpty())) {
// Populate a new Version Set
logger.info("ExecId: {}, Flow had more imageTypes than specified in versionSet {}. "
+ "Constructing a new one", executionId, versionSetId);
VersionSetBuilder versionSetBuilder = new VersionSetBuilder(this.versionSetLoader);
versionSetBuilder.addElements(versionSet.getImageToVersionMap());
// The following is a safety check. Just in case: getVersionByImageTypes fails below due to an
// exception, we will have an incomplete/incorrect versionSet. Setting it null ensures, it will
// be processed from scratch in the following code block
versionSet = null;
if (!imageVersionsNotFound.isEmpty())
versionSetBuilder.addElements(this.imageRampupManager.getVersionByImageTypes(imageVersionsNotFound));
if (!overlayMap.isEmpty())
versionSetBuilder.addElements(overlayMap);
versionSet = versionSetBuilder.build();
}
} catch (Exception e) {
logger.error("ExecId: {}, Could not find version set id: {} as specified by flow params. "
+ "Will continue by creating a new one.", executionId, versionSetId);
}
}
if (versionSet == null) {
// Need to build a version set
imageTypesUsedInFlow.remove("noop"); // Remove noop type if exists in the input map
Map<String, String> versionMap = imageRampupManager.getVersionByImageTypes(imageTypesUsedInFlow);
// Now we will check the flow params for any override versions provided and apply them
for (String imageType : imageTypesUsedInFlow) {
final String imageTypeVersionOverrideParam = imageTypeOverrideParam(imageType);
if (flowParams != null && flowParams.containsKey(imageTypeVersionOverrideParam)) {
// We will trust that the user-provided version exists for now. May need to add some validation here!
versionMap.put(imageType, flowParams.get(imageTypeVersionOverrideParam));
}
}
VersionSetBuilder versionSetBuilder = new VersionSetBuilder(this.versionSetLoader);
versionSet = versionSetBuilder.addElements(versionMap).build();
}
} catch (IOException e) {
logger.error("ExecId: {}, Exception in fetching the VersionSet. Error msg: {}",
executionId, e.getMessage());
throw new ExecutorManagerException(e);
}
return versionSet;
}
/**
*
* @param executionId
* @param versionSet
* @param jobTypes
* @return
* @throws ExecutorManagerException
*/
@VisibleForTesting
V1PodSpec createPodSpec(final int executionId, final VersionSet versionSet, SortedSet<String> jobTypes)
throws ExecutorManagerException {
final String azkabanBaseImageVersion = getAzkabanBaseImageVersion();
final String azkabanConfigVersion = getAzkabanConfigVersion();
final AzKubernetesV1SpecBuilder v1SpecBuilder =
new AzKubernetesV1SpecBuilder(this.clusterName, Optional.empty()).addFlowContainer(this.flowContainerName,
azkabanBaseImageVersion, ImagePullPolicy.IF_NOT_PRESENT, azkabanConfigVersion)
.withResources(this.cpuLimit, this.cpuRequest, this.memoryLimit, this.memoryRequest);
// Create init container yaml file for each jobType
addInitContainerForAllJobTypes(executionId, jobTypes, v1SpecBuilder, versionSet);
return v1SpecBuilder.build();
}
/**
*
* @param executionId
* @param podSpec
* @return
*/
@VisibleForTesting
V1Pod createPodFromSpec(int executionId, V1PodSpec podSpec) {
final ImmutableMap<String, String> labels = getLabelsForPod();
final ImmutableMap<String, String> annotations = getAnnotationsForPod();
final V1Pod pod = new AzKubernetesV1PodBuilder(getPodName(executionId), this.namespace, podSpec)
.withPodLabels(labels)
.withPodAnnotations(annotations)
.build();
return pod;
}
/**
* This method is used to create pod. 1. Fetch jobTypes for the flow 2. Fetch flow parameters for
* version set and each image type if it is set. 3. If valid version set is provided then use
* versions from it. 4. If valid version set is not provided then call Ramp up manager API and get
* image version for each image type. 5. Add all the validation around a) whether version set is
* valid or not. b) If it is valid then is there any change in flow and new jobType is introduced
* after version set was created? If so, create new version set using versions mentioned in
* version set and ramp up for new jobType. 6. Create pod spec using all the version information
* 7. Insert version set into execution_flows tables for a reference 8. Emit version set as a part
* of flow life cycle event.
*
* @param executionId
* @throws ExecutorManagerException
*/
private void createPod(final int executionId) throws ExecutorManagerException {
// Fetch execution flow from execution Id.
final ExecutableFlow flow = this.executorLoader.fetchExecutableFlow(executionId);
// Step 1: Fetch set of jobTypes for a flow from executionId
final TreeSet<String> jobTypes = getJobTypesForFlow(flow);
logger.info("ExecId: {}, Jobtypes for flow {} are: {}", executionId, flow.getFlowId(), jobTypes);
final Map<String, String> flowParam =
flow.getExecutionOptions().getFlowParameters();
if (flowParam != null && !flowParam.isEmpty()) {
logger.info("ExecId: {}, Flow Parameters are: {}", executionId, flowParam);
}
final VersionSet versionSet = fetchVersionSet(executionId, flowParam, jobTypes);
final V1PodSpec podSpec = createPodSpec(executionId, versionSet, jobTypes);
final V1Pod pod = createPodFromSpec(executionId, podSpec);
String podSpecYaml = Yaml.dump(pod).trim();
logger.debug("ExecId: {}, Pod spec is {}", executionId, podSpecYaml);
// TODO: Add version set number and json in flow life cycle event so users can use this
// information
try {
this.coreV1Api.createNamespacedPod(this.namespace, pod, null, null, null);
logger.info("ExecId: {}, Dispatched pod for execution.", executionId);
} catch (ApiException e) {
logger.error("ExecId: {}, Unable to create Pod: {}", executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
// TODO: Store version set id in execution_flows for execution_id
}
/**
* TODO: Get azkaban base image version from version set.
*
* @return
*/
private String getAzkabanBaseImageVersion() {
return null;
}
private String getAzkabanConfigVersion() {
return null;
}
/**
* TODO: Add implementation to get labels for Pod.
*
* @return
*/
private ImmutableMap getLabelsForPod() {
return ImmutableMap.of("cluster", this.clusterName);
}
/**
* TODO: Add implementation to get annotations for Pod.
*
* @return
*/
private ImmutableMap getAnnotationsForPod() {
return ImmutableMap.of();
}
/**
* TODO: Check if we need to turn everything into lower case?
*
* @param executionId
* @param jobTypes
* @param v1SpecBuilder
* @param versionSet
* @throws ExecutorManagerException
*/
private void addInitContainerForAllJobTypes(final int executionId,
final Set<String> jobTypes, final AzKubernetesV1SpecBuilder v1SpecBuilder,
final VersionSet versionSet)
throws ExecutorManagerException {
for (String jobType: jobTypes) {
try {
String imageVersion = versionSet.getVersion(jobType).get();
v1SpecBuilder.addJobType(jobType, imageVersion, ImagePullPolicy.IF_NOT_PRESENT,
String.join("/", this.initMountPathPrefixForJobtypes, jobType),
String.join("/", this.appMountPathPrefixForJobtypes, jobType));
} catch (Exception e) {
throw new ExecutorManagerException("Did not find the version string for image type: " +
jobType + " in versionSet");
}
}
}
/**
* This method is used to get jobTypes for a flow. This method is going to call
* populateJobTypeForFlow which has recursive method call to traverse the DAG for a flow.
*
* @param flow Executable flow object
* @return
* @throws ExecutorManagerException
*/
public TreeSet<String> getJobTypesForFlow(final ExecutableFlow flow) {
final TreeSet<String> jobTypes = new TreeSet<>();
populateJobTypeForFlow(flow, jobTypes);
return jobTypes;
}
/**
* This method is used to populate jobTypes for ExecutableNode.
*
* @param node
* @param jobTypes
*/
private void populateJobTypeForFlow(final ExecutableNode node, Set<String> jobTypes) {
if (node instanceof ExecutableFlowBase) {
final ExecutableFlowBase base = (ExecutableFlowBase) node;
for (ExecutableNode subNode : base.getExecutableNodes()) {
populateJobTypeForFlow(subNode, jobTypes);
}
} else {
jobTypes.add(node.getType());
}
}
/**
* This method is used to create service for flow container for execution id.
*
* @param executionId
* @throws ExecutorManagerException
*/
private void createService(final int executionId) throws ExecutorManagerException {
try {
final AzKubernetesV1ServiceBuilder azKubernetesV1ServiceBuilder =
new AzKubernetesV1ServiceBuilder(
"v1Service.yaml");
final V1Service serviceObject = azKubernetesV1ServiceBuilder
.withExecId(String.valueOf(executionId))
.withServiceName(getServiceName(executionId))
.withNamespace(this.namespace)
.withApiVersion(SERVICE_API_VERSION_2)
.withKind(MAPPING)
.withPort(String.valueOf(this.servicePort))
.withTimeoutMs(String.valueOf(this.serviceTimeout))
.build();
this.coreV1Api.createNamespacedService(this.namespace, serviceObject, null, null, null);
logger.info("ExecId: {}, Service is created.", executionId);
} catch (final IOException e) {
logger.error("ExecId: {}, Unable to create service in Kubernetes. Msg: {}", executionId, e.getMessage());
throw new ExecutorManagerException(e);
} catch (final ApiException e) {
logger.error("ExecId: {}, Unable to create service in Kubernetes. Msg: {} ",
executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to check whether service should be created in Kubernetes for flow container
* pod or not.
*
* @return
*/
private boolean isServiceRequired() {
return this.azkProps
.getBoolean(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_REQUIRED, false);
}
/**
* This method is used to delete pod in Kubernetes. It will terminate the pod. deployment is
* fixed
*
* @param executionId
* @throws ExecutorManagerException
*/
private void deletePod(final int executionId) throws ExecutorManagerException {
try {
final String podName = getPodName(executionId);
this.coreV1Api.deleteNamespacedPod(podName, this.namespace, null, null,
null, null, null, new V1DeleteOptions());
logger.info("ExecId: {}, Action: Pod Deletion, Pod Name: {}", executionId, podName);
} catch (ApiException e) {
logger.error("ExecId: {}, Unable to delete Pod in Kubernetes: {}", executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to delete service in Kubernetes which is created for Pod.
*
* @param executionId
* @throws ExecutorManagerException
*/
public void deleteService(final int executionId) throws ExecutorManagerException {
final String serviceName = getServiceName(executionId);
try {
final V1Status deleteResult = this.coreV1Api.deleteNamespacedService(
serviceName,
this.namespace,
null,
null,
null,
null,
null,
new V1DeleteOptions());
logger.info("ExecId: {}, Action: Service Deletion, Service Name: {}, code: {}, message: {}",
executionId,
serviceName,
deleteResult.getCode(),
deleteResult.getMessage());
} catch (ApiException e) {
logger.error("ExecId: {}, Unable to delete service in Kubernetes: {}", executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to get service name. It will be created using service name prefix, azkaban
* cluster name and execution id.
*
* @param executionId
* @return
*/
private String getServiceName(final int executionId) {
return String.join("-", this.servicePrefix, this.clusterName, String.valueOf(executionId));
}
/**
* This method is used to get name of Pod based on naming convention. It will be created using pod
* name prefix, azkaban cluster name and execution id.
*
* @param executionId
* @return
*/
private String getPodName(final int executionId) {
return String.join("-", this.podPrefix, this.clusterName, String.valueOf(executionId));
}
}
| 1 | 21,045 | Should we have this path passed from the Azkaban properties? If this is the standard path for nscd even outside LinkedIn, then we can keep it. | azkaban-azkaban | java |
@@ -533,7 +533,12 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error {
s.Warnf(" Error adding Stream %q to Template %q: %v", cfg.Name, cfg.Template, err)
}
}
- mset, err := a.AddStream(&cfg.StreamConfig)
+ // TODO: We should not rely on the stream name.
+ // However, having a StreamConfig property, such as AllowNoSubject,
+ // was not accepted because it does not make sense outside of the
+ // MQTT use-case. So need to revisit this.
+ mqtt := cfg.StreamConfig.Name == mqttStreamName
+ mset, err := a.addStreamWithStore(&cfg.StreamConfig, nil, mqtt)
if err != nil {
s.Warnf(" Error recreating Stream %q: %v", cfg.Name, err)
continue | 1 | // Copyright 2019-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/minio/highwayhash"
"github.com/nats-io/nats-server/v2/server/sysmem"
)
// JetStreamConfig determines this server's configuration.
// MaxMemory and MaxStore are in bytes.
type JetStreamConfig struct {
MaxMemory int64
MaxStore int64
StoreDir string
}
// TODO(dlc) - need to track and rollup against server limits, etc.
type JetStreamAccountLimits struct {
MaxMemory int64 `json:"max_memory"`
MaxStore int64 `json:"max_storage"`
MaxStreams int `json:"max_streams"`
MaxConsumers int `json:"max_consumers"`
}
// JetStreamAccountStats returns current statistics about the account's JetStream usage.
type JetStreamAccountStats struct {
Memory uint64 `json:"memory"`
Store uint64 `json:"storage"`
Streams int `json:"streams"`
Limits JetStreamAccountLimits `json:"limits"`
}
// This is for internal accounting for JetStream for this server.
type jetStream struct {
mu sync.RWMutex
srv *Server
config JetStreamConfig
accounts map[*Account]*jsAccount
memReserved int64
storeReserved int64
}
// This represents a jetstream enabled account.
// Worth noting that we include the js ptr, this is because
// in general we want to be very efficient when receiving messages on
// and internal sub for a msgSet, so we will direct link to the msgSet
// and walk backwards as needed vs multiple hash lookups and locks, etc.
type jsAccount struct {
mu sync.RWMutex
js *jetStream
account *Account
limits JetStreamAccountLimits
memReserved int64
memUsed int64
storeReserved int64
storeUsed int64
storeDir string
streams map[string]*Stream
templates map[string]*StreamTemplate
store TemplateStore
}
// EnableJetStream will enable JetStream support on this server with the given configuration.
// A nil configuration will dynamically choose the limits and temporary file storage directory.
// If this server is part of a cluster, a system account will need to be defined.
func (s *Server) EnableJetStream(config *JetStreamConfig) error {
s.mu.Lock()
if !s.standAloneMode() {
s.mu.Unlock()
return fmt.Errorf("jetstream restricted to single server mode for now")
}
if s.js != nil {
s.mu.Unlock()
return fmt.Errorf("jetstream already enabled")
}
s.Noticef("Starting JetStream")
if config == nil || config.MaxMemory <= 0 || config.MaxStore <= 0 {
var storeDir string
var maxStore int64
if config != nil {
storeDir = config.StoreDir
maxStore = config.MaxStore
}
config = s.dynJetStreamConfig(storeDir, maxStore)
s.Debugf("JetStream creating dynamic configuration - %s memory, %s disk", FriendlyBytes(config.MaxMemory), FriendlyBytes(config.MaxStore))
}
// Copy, don't change callers version.
cfg := *config
if cfg.StoreDir == "" {
cfg.StoreDir = filepath.Join(os.TempDir(), JetStreamStoreDir)
}
s.js = &jetStream{srv: s, config: cfg, accounts: make(map[*Account]*jsAccount)}
s.mu.Unlock()
// FIXME(dlc) - Allow memory only operation?
if stat, err := os.Stat(cfg.StoreDir); os.IsNotExist(err) {
if err := os.MkdirAll(cfg.StoreDir, 0755); err != nil {
return fmt.Errorf("could not create storage directory - %v", err)
}
} else {
// Make sure its a directory and that we can write to it.
if stat == nil || !stat.IsDir() {
return fmt.Errorf("storage directory is not a directory")
}
tmpfile, err := ioutil.TempFile(cfg.StoreDir, "_test_")
if err != nil {
return fmt.Errorf("storage directory is not writable")
}
os.Remove(tmpfile.Name())
}
// JetStream is an internal service so we need to make sure we have a system account.
// This system account will export the JetStream service endpoints.
if sacc := s.SystemAccount(); sacc == nil {
s.SetDefaultSystemAccount()
}
// Setup our internal subscriptions.
if err := s.setJetStreamExportSubs(); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
s.Warnf(" _ ___ _____ ___ _____ ___ ___ _ __ __")
s.Warnf(" _ | | __|_ _/ __|_ _| _ \\ __| /_\\ | \\/ |")
s.Warnf("| || | _| | | \\__ \\ | | | / _| / _ \\| |\\/| |")
s.Warnf(" \\__/|___| |_| |___/ |_| |_|_\\___/_/ \\_\\_| |_|")
s.Warnf("")
s.Warnf(" _ _")
s.Warnf(" | |__ ___| |_ __ _")
s.Warnf(" | '_ \\/ -_) _/ _` |")
s.Warnf(" |_.__/\\___|\\__\\__,_|")
s.Warnf("")
s.Warnf(" JetStream is a Beta feature")
s.Warnf(" https://github.com/nats-io/jetstream")
s.Noticef("")
s.Noticef("----------- JETSTREAM -----------")
s.Noticef(" Max Memory: %s", FriendlyBytes(cfg.MaxMemory))
s.Noticef(" Max Storage: %s", FriendlyBytes(cfg.MaxStore))
s.Noticef(" Store Directory: %q", cfg.StoreDir)
// Setup our internal system exports.
sacc := s.SystemAccount()
// FIXME(dlc) - Should we lock these down?
s.Debugf(" Exports:")
for _, export := range allJsExports {
s.Debugf(" %s", export)
if err := sacc.AddServiceExport(export, nil); err != nil {
return fmt.Errorf("Error setting up jetstream service exports: %v", err)
}
}
s.Noticef("----------------------------------------")
// If we have no configured accounts setup then setup imports on global account.
if s.globalAccountOnly() {
if err := s.GlobalAccount().EnableJetStream(nil); err != nil {
return fmt.Errorf("Error enabling jetstream on the global account")
}
} else if err := s.configAllJetStreamAccounts(); err != nil {
return fmt.Errorf("Error enabling jetstream on configured accounts: %v", err)
}
return nil
}
// enableAllJetStreamServiceImports turns on all service imports for jetstream for this account.
func (a *Account) enableAllJetStreamServiceImports() error {
a.mu.RLock()
s := a.srv
a.mu.RUnlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
// In case the enabled import exists here.
a.removeServiceImport(JSApiAccountInfo)
sys := s.SystemAccount()
for _, export := range allJsExports {
if !a.serviceImportExists(sys, export) {
if err := a.AddServiceImport(sys, export, _EMPTY_); err != nil {
return fmt.Errorf("Error setting up jetstream service imports for account: %v", err)
}
}
}
return nil
}
// enableJetStreamEnabledServiceImportOnly will enable the single service import responder.
// Should we do them all regardless?
func (a *Account) enableJetStreamInfoServiceImportOnly() error {
a.mu.RLock()
s := a.srv
a.mu.RUnlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
sys := s.SystemAccount()
if err := a.AddServiceImport(sys, JSApiAccountInfo, _EMPTY_); err != nil {
return fmt.Errorf("Error setting up jetstream service imports for account: %v", err)
}
return nil
}
func (s *Server) configJetStream(acc *Account) error {
if acc.jsLimits != nil {
// Check if already enabled. This can be during a reload.
if acc.JetStreamEnabled() {
if err := acc.enableAllJetStreamServiceImports(); err != nil {
return err
}
if err := acc.UpdateJetStreamLimits(acc.jsLimits); err != nil {
return err
}
} else if err := acc.EnableJetStream(acc.jsLimits); err != nil {
return err
}
acc.jsLimits = nil
} else if acc != s.SystemAccount() {
if acc.JetStreamEnabled() {
acc.DisableJetStream()
}
// We will setup basic service imports to respond to
// requests if JS is enabled for this account.
if err := acc.enableJetStreamInfoServiceImportOnly(); err != nil {
return err
}
}
return nil
}
// configAllJetStreamAccounts walk all configured accounts and turn on jetstream if requested.
func (s *Server) configAllJetStreamAccounts() error {
// Check to see if system account has been enabled. We could arrive here via reload and
// a non-default system account.
if sacc := s.SystemAccount(); sacc != nil && !sacc.IsExportService(JSApiAccountInfo) {
for _, export := range allJsExports {
if err := sacc.AddServiceExport(export, nil); err != nil {
return fmt.Errorf("Error setting up jetstream service exports: %v", err)
}
}
}
// Snapshot into our own list. Might not be needed.
s.mu.Lock()
// Bail if server not enabled. If it was enabled and a reload turns it off
// that will be handled elsewhere.
if s.js == nil {
s.mu.Unlock()
return nil
}
var jsAccounts []*Account
s.accounts.Range(func(k, v interface{}) bool {
jsAccounts = append(jsAccounts, v.(*Account))
return true
})
s.mu.Unlock()
// Process any jetstream enabled accounts here.
for _, acc := range jsAccounts {
if err := s.configJetStream(acc); err != nil {
return err
}
}
return nil
}
// JetStreamEnabled reports if jetstream is enabled.
func (s *Server) JetStreamEnabled() bool {
s.mu.Lock()
enabled := s.js != nil
s.mu.Unlock()
return enabled
}
// Shutdown jetstream for this server.
func (s *Server) shutdownJetStream() {
s.mu.Lock()
if s.js == nil {
s.mu.Unlock()
return
}
var _jsa [512]*jsAccount
jsas := _jsa[:0]
// Collect accounts.
for _, jsa := range s.js.accounts {
jsas = append(jsas, jsa)
}
s.mu.Unlock()
for _, jsa := range jsas {
s.js.disableJetStream(jsa)
}
s.mu.Lock()
s.js.accounts = nil
s.js = nil
s.mu.Unlock()
}
// JetStreamConfig will return the current config. Useful if the system
// created a dynamic configuration. A copy is returned.
func (s *Server) JetStreamConfig() *JetStreamConfig {
var c *JetStreamConfig
s.mu.Lock()
if s.js != nil {
copy := s.js.config
c = &(copy)
}
s.mu.Unlock()
return c
}
// JetStreamNumAccounts returns the number of enabled accounts this server is tracking.
func (s *Server) JetStreamNumAccounts() int {
js := s.getJetStream()
if js == nil {
return 0
}
js.mu.Lock()
defer js.mu.Unlock()
return len(js.accounts)
}
// JetStreamReservedResources returns the reserved resources if JetStream is enabled.
func (s *Server) JetStreamReservedResources() (int64, int64, error) {
js := s.getJetStream()
if js == nil {
return -1, -1, ErrJetStreamNotEnabled
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.memReserved, js.storeReserved, nil
}
func (s *Server) getJetStream() *jetStream {
s.mu.Lock()
js := s.js
s.mu.Unlock()
return js
}
// EnableJetStream will enable JetStream on this account with the defined limits.
// This is a helper for JetStreamEnableAccount.
func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error {
a.mu.RLock()
s := a.srv
a.mu.RUnlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
// FIXME(dlc) - cluster mode
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
if s.SystemAccount() == a {
return fmt.Errorf("jetstream can not be enabled on the system account")
}
// No limits means we dynamically set up limits.
if limits == nil {
limits = js.dynamicAccountLimits()
}
js.mu.Lock()
// Check the limits against existing reservations.
if _, ok := js.accounts[a]; ok {
js.mu.Unlock()
return fmt.Errorf("jetstream already enabled for account")
}
if err := js.sufficientResources(limits); err != nil {
js.mu.Unlock()
return err
}
jsa := &jsAccount{js: js, account: a, limits: *limits, streams: make(map[string]*Stream)}
jsa.storeDir = path.Join(js.config.StoreDir, a.Name)
js.accounts[a] = jsa
js.reserveResources(limits)
js.mu.Unlock()
// Stamp inside account as well.
a.mu.Lock()
a.js = jsa
a.mu.Unlock()
// Create the proper imports here.
if err := a.enableAllJetStreamServiceImports(); err != nil {
return err
}
s.Debugf("Enabled JetStream for account %q", a.Name)
s.Debugf(" Max Memory: %s", FriendlyBytes(limits.MaxMemory))
s.Debugf(" Max Storage: %s", FriendlyBytes(limits.MaxStore))
// Do quick fixup here for new directory structure.
// TODO(dlc) - We can remove once we do MVP IMO.
sdir := path.Join(jsa.storeDir, streamsDir)
if _, err := os.Stat(sdir); os.IsNotExist(err) {
// If we are here that means this is old school directory, upgrade in place.
s.Noticef(" Upgrading storage directory structure for %q", a.Name)
omdirs, _ := ioutil.ReadDir(jsa.storeDir)
if err := os.MkdirAll(sdir, 0755); err != nil {
return fmt.Errorf("could not create storage streams directory - %v", err)
}
for _, fi := range omdirs {
os.Rename(path.Join(jsa.storeDir, fi.Name()), path.Join(sdir, fi.Name()))
}
}
// Restore any state here.
s.Noticef(" Recovering JetStream state for account %q", a.Name)
// Check templates first since messsage sets will need proper ownership.
// FIXME(dlc) - Make this consistent.
tdir := path.Join(jsa.storeDir, tmplsDir)
if stat, err := os.Stat(tdir); err == nil && stat.IsDir() {
key := sha256.Sum256([]byte("templates"))
hh, err := highwayhash.New64(key[:])
if err != nil {
return err
}
fis, _ := ioutil.ReadDir(tdir)
for _, fi := range fis {
metafile := path.Join(tdir, fi.Name(), JetStreamMetaFile)
metasum := path.Join(tdir, fi.Name(), JetStreamMetaFileSum)
buf, err := ioutil.ReadFile(metafile)
if err != nil {
s.Warnf(" Error reading StreamTemplate metafile %q: %v", metasum, err)
continue
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
s.Warnf(" Missing StreamTemplate checksum for %q", metasum)
continue
}
sum, err := ioutil.ReadFile(metasum)
if err != nil {
s.Warnf(" Error reading StreamTemplate checksum %q: %v", metasum, err)
continue
}
hh.Reset()
hh.Write(buf)
checksum := hex.EncodeToString(hh.Sum(nil))
if checksum != string(sum) {
s.Warnf(" StreamTemplate checksums do not match %q vs %q", sum, checksum)
continue
}
var cfg StreamTemplateConfig
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling StreamTemplate metafile: %v", err)
continue
}
cfg.Config.Name = _EMPTY_
if _, err := a.AddStreamTemplate(&cfg); err != nil {
s.Warnf(" Error recreating StreamTemplate %q: %v", cfg.Name, err)
continue
}
}
}
// Now recover the streams.
fis, _ := ioutil.ReadDir(sdir)
for _, fi := range fis {
mdir := path.Join(sdir, fi.Name())
key := sha256.Sum256([]byte(fi.Name()))
hh, err := highwayhash.New64(key[:])
if err != nil {
return err
}
metafile := path.Join(mdir, JetStreamMetaFile)
metasum := path.Join(mdir, JetStreamMetaFileSum)
if _, err := os.Stat(metafile); os.IsNotExist(err) {
s.Warnf(" Missing Stream metafile for %q", metafile)
continue
}
buf, err := ioutil.ReadFile(metafile)
if err != nil {
s.Warnf(" Error reading metafile %q: %v", metasum, err)
continue
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
s.Warnf(" Missing Stream checksum for %q", metasum)
continue
}
sum, err := ioutil.ReadFile(metasum)
if err != nil {
s.Warnf(" Error reading Stream metafile checksum %q: %v", metasum, err)
continue
}
hh.Write(buf)
checksum := hex.EncodeToString(hh.Sum(nil))
if checksum != string(sum) {
s.Warnf(" Stream metafile checksums do not match %q vs %q", sum, checksum)
continue
}
var cfg FileStreamInfo
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling Stream metafile: %v", err)
continue
}
if cfg.Template != _EMPTY_ {
if err := jsa.addStreamNameToTemplate(cfg.Template, cfg.Name); err != nil {
s.Warnf(" Error adding Stream %q to Template %q: %v", cfg.Name, cfg.Template, err)
}
}
mset, err := a.AddStream(&cfg.StreamConfig)
if err != nil {
s.Warnf(" Error recreating Stream %q: %v", cfg.Name, err)
continue
}
if !cfg.Created.IsZero() {
mset.setCreated(cfg.Created)
}
stats := mset.State()
s.Noticef(" Restored %s messages for Stream %q", comma(int64(stats.Msgs)), fi.Name())
// Now do the consumers.
odir := path.Join(sdir, fi.Name(), consumerDir)
ofis, _ := ioutil.ReadDir(odir)
if len(ofis) > 0 {
s.Noticef(" Recovering %d Consumers for Stream - %q", len(ofis), fi.Name())
}
for _, ofi := range ofis {
metafile := path.Join(odir, ofi.Name(), JetStreamMetaFile)
metasum := path.Join(odir, ofi.Name(), JetStreamMetaFileSum)
if _, err := os.Stat(metafile); os.IsNotExist(err) {
s.Warnf(" Missing Consumer Metafile %q", metafile)
continue
}
buf, err := ioutil.ReadFile(metafile)
if err != nil {
s.Warnf(" Error reading consumer metafile %q: %v", metasum, err)
continue
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
s.Warnf(" Missing Consumer checksum for %q", metasum)
continue
}
var cfg FileConsumerInfo
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling Consumer metafile: %v", err)
continue
}
isEphemeral := !isDurableConsumer(&cfg.ConsumerConfig)
if isEphemeral {
// This is an ephermal consumer and this could fail on restart until
// the consumer can reconnect. We will create it as a durable and switch it.
cfg.ConsumerConfig.Durable = ofi.Name()
}
obs, err := mset.AddConsumer(&cfg.ConsumerConfig)
if err != nil {
s.Warnf(" Error adding Consumer: %v", err)
continue
}
if isEphemeral {
obs.switchToEphemeral()
}
if !cfg.Created.IsZero() {
obs.setCreated(cfg.Created)
}
if err := obs.readStoredState(); err != nil {
s.Warnf(" Error restoring Consumer state: %v", err)
}
}
}
// Make sure to cleanup and old remaining snapshots.
os.RemoveAll(path.Join(jsa.storeDir, snapsDir))
s.Noticef("JetStream state for account %q recovered", a.Name)
return nil
}
// NumStreams will return how many streams we have.
func (a *Account) NumStreams() int {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return 0
}
jsa.mu.Lock()
n := len(jsa.streams)
jsa.mu.Unlock()
return n
}
// Streams will return all known streams.
func (a *Account) Streams() []*Stream {
return a.filteredStreams(_EMPTY_)
}
func (a *Account) filteredStreams(filter string) []*Stream {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return nil
}
jsa.mu.Lock()
defer jsa.mu.Unlock()
var msets []*Stream
for _, mset := range jsa.streams {
if filter != _EMPTY_ {
for _, subj := range mset.config.Subjects {
if SubjectsCollide(filter, subj) {
msets = append(msets, mset)
break
}
}
} else {
msets = append(msets, mset)
}
}
return msets
}
// LookupStream will lookup a stream by name.
func (a *Account) LookupStream(name string) (*Stream, error) {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return nil, ErrJetStreamNotEnabled
}
jsa.mu.Lock()
mset, ok := jsa.streams[name]
jsa.mu.Unlock()
if !ok {
return nil, ErrJetStreamStreamNotFound
}
return mset, nil
}
// UpdateJetStreamLimits will update the account limits for a JetStream enabled account.
func (a *Account) UpdateJetStreamLimits(limits *JetStreamAccountLimits) error {
a.mu.RLock()
s := a.srv
jsa := a.js
a.mu.RUnlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
if jsa == nil {
return ErrJetStreamNotEnabledForAccount
}
if limits == nil {
limits = js.dynamicAccountLimits()
}
// Calculate the delta between what we have and what we want.
jsa.mu.Lock()
dl := diffCheckedLimits(&jsa.limits, limits)
jsaLimits := jsa.limits
jsa.mu.Unlock()
js.mu.Lock()
// Check the limits against existing reservations.
if err := js.sufficientResources(&dl); err != nil {
js.mu.Unlock()
return err
}
// FIXME(dlc) - If we drop and are over the max on memory or store, do we delete??
js.releaseResources(&jsaLimits)
js.reserveResources(limits)
js.mu.Unlock()
// Update
jsa.mu.Lock()
jsa.limits = *limits
jsa.mu.Unlock()
return nil
}
func diffCheckedLimits(a, b *JetStreamAccountLimits) JetStreamAccountLimits {
return JetStreamAccountLimits{
MaxMemory: b.MaxMemory - a.MaxMemory,
MaxStore: b.MaxStore - a.MaxStore,
}
}
// JetStreamUsage reports on JetStream usage and limits for an account.
func (a *Account) JetStreamUsage() JetStreamAccountStats {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
var stats JetStreamAccountStats
if jsa != nil {
jsa.mu.Lock()
stats.Memory = uint64(jsa.memUsed)
stats.Store = uint64(jsa.storeUsed)
stats.Streams = len(jsa.streams)
stats.Limits = jsa.limits
jsa.mu.Unlock()
}
return stats
}
// DisableJetStream will disable JetStream for this account.
func (a *Account) DisableJetStream() error {
a.mu.Lock()
s := a.srv
a.js = nil
a.mu.Unlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
// Remove service imports.
for _, export := range allJsExports {
a.removeServiceImport(export)
}
return js.disableJetStream(js.lookupAccount(a))
}
// Disable JetStream for the account.
func (js *jetStream) disableJetStream(jsa *jsAccount) error {
if jsa == nil {
return ErrJetStreamNotEnabledForAccount
}
js.mu.Lock()
delete(js.accounts, jsa.account)
js.releaseResources(&jsa.limits)
js.mu.Unlock()
jsa.delete()
return nil
}
// JetStreamEnabled is a helper to determine if jetstream is enabled for an account.
func (a *Account) JetStreamEnabled() bool {
if a == nil {
return false
}
a.mu.RLock()
enabled := a.js != nil
a.mu.RUnlock()
return enabled
}
// Updates accounting on in use memory and storage.
func (jsa *jsAccount) updateUsage(storeType StorageType, delta int64) {
// TODO(dlc) - atomics? snapshot limits?
jsa.mu.Lock()
if storeType == MemoryStorage {
jsa.memUsed += delta
} else {
jsa.storeUsed += delta
}
jsa.mu.Unlock()
}
func (jsa *jsAccount) limitsExceeded(storeType StorageType) bool {
var exceeded bool
jsa.mu.Lock()
if storeType == MemoryStorage {
if jsa.limits.MaxMemory > 0 && jsa.memUsed > jsa.limits.MaxMemory {
exceeded = true
}
} else {
if jsa.limits.MaxStore > 0 && jsa.storeUsed > jsa.limits.MaxStore {
exceeded = true
}
}
jsa.mu.Unlock()
return exceeded
}
// Check if a new proposed msg set while exceed our account limits.
// Lock should be held.
func (jsa *jsAccount) checkLimits(config *StreamConfig) error {
if jsa.limits.MaxStreams > 0 && len(jsa.streams) >= jsa.limits.MaxStreams {
return fmt.Errorf("maximum number of streams reached")
}
// FIXME(dlc) - Add check here for replicas based on clustering.
if config.Replicas != 1 {
return fmt.Errorf("replicas setting of %d not allowed", config.Replicas)
}
// Check MaxConsumers
if config.MaxConsumers > 0 && jsa.limits.MaxConsumers > 0 && config.MaxConsumers > jsa.limits.MaxConsumers {
return fmt.Errorf("maximum consumers exceeds account limit")
}
// Check storage, memory or disk.
if config.MaxBytes > 0 {
return jsa.checkBytesLimits(config.MaxBytes*int64(config.Replicas), config.Storage)
}
return nil
}
// Check if additional bytes will exceed our account limits.
// This should account for replicas.
// Lock should be held.
func (jsa *jsAccount) checkBytesLimits(addBytes int64, storage StorageType) error {
switch storage {
case MemoryStorage:
if jsa.memReserved+addBytes > jsa.limits.MaxMemory {
return fmt.Errorf("insufficient memory resources available")
}
case FileStorage:
if jsa.storeReserved+addBytes > jsa.limits.MaxStore {
return fmt.Errorf("insufficient storage resources available")
}
}
return nil
}
func (jsa *jsAccount) acc() *Account {
jsa.mu.RLock()
acc := jsa.account
jsa.mu.RUnlock()
return acc
}
// Delete the JetStream resources.
func (jsa *jsAccount) delete() {
var streams []*Stream
var ts []string
jsa.mu.Lock()
for _, ms := range jsa.streams {
streams = append(streams, ms)
}
acc := jsa.account
for _, t := range jsa.templates {
ts = append(ts, t.Name)
}
jsa.templates = nil
jsa.mu.Unlock()
for _, ms := range streams {
ms.stop(false)
}
for _, t := range ts {
acc.DeleteStreamTemplate(t)
}
}
// Lookup the jetstream account for a given account.
func (js *jetStream) lookupAccount(a *Account) *jsAccount {
js.mu.RLock()
jsa := js.accounts[a]
js.mu.RUnlock()
return jsa
}
// Will dynamically create limits for this account.
func (js *jetStream) dynamicAccountLimits() *JetStreamAccountLimits {
js.mu.RLock()
// For now used all resources. Mostly meant for $G in non-account mode.
limits := &JetStreamAccountLimits{js.config.MaxMemory, js.config.MaxStore, -1, -1}
js.mu.RUnlock()
return limits
}
// Check to see if we have enough system resources for this account.
// Lock should be held.
func (js *jetStream) sufficientResources(limits *JetStreamAccountLimits) error {
if limits == nil {
return nil
}
if js.memReserved+limits.MaxMemory > js.config.MaxMemory {
return fmt.Errorf("insufficient memory resources available")
}
if js.storeReserved+limits.MaxStore > js.config.MaxStore {
return fmt.Errorf("insufficient storage resources available")
}
return nil
}
// This will (blindly) reserve the respources requested.
// Lock should be held.
func (js *jetStream) reserveResources(limits *JetStreamAccountLimits) error {
if limits == nil {
return nil
}
if limits.MaxMemory > 0 {
js.memReserved += limits.MaxMemory
}
if limits.MaxStore > 0 {
js.storeReserved += limits.MaxStore
}
return nil
}
// Lock should be held.
func (js *jetStream) releaseResources(limits *JetStreamAccountLimits) error {
if limits == nil {
return nil
}
if limits.MaxMemory > 0 {
js.memReserved -= limits.MaxMemory
}
if limits.MaxStore > 0 {
js.storeReserved -= limits.MaxStore
}
return nil
}
// Will clear the resource reservations. Mostly for reload of a config.
func (js *jetStream) clearResources() {
if js == nil {
return
}
js.mu.Lock()
js.memReserved = 0
js.storeReserved = 0
js.mu.Unlock()
}
const (
// JetStreamStoreDir is the prefix we use.
JetStreamStoreDir = "jetstream"
// JetStreamMaxStoreDefault is the default disk storage limit. 1TB
JetStreamMaxStoreDefault = 1024 * 1024 * 1024 * 1024
// JetStreamMaxMemDefault is only used when we can't determine system memory. 256MB
JetStreamMaxMemDefault = 1024 * 1024 * 256
)
// Dynamically create a config with a tmp based directory (repeatable) and 75% of system memory.
func (s *Server) dynJetStreamConfig(storeDir string, maxStore int64) *JetStreamConfig {
jsc := &JetStreamConfig{}
if storeDir != "" {
jsc.StoreDir = filepath.Join(storeDir, JetStreamStoreDir)
} else {
tdir, _ := ioutil.TempDir(os.TempDir(), "nats-jetstream-storedir-")
jsc.StoreDir = filepath.Join(tdir, JetStreamStoreDir)
}
if maxStore > 0 {
jsc.MaxStore = maxStore
} else {
jsc.MaxStore = diskAvailable(jsc.StoreDir)
}
// Estimate to 75% of total memory if we can determine system memory.
if sysMem := sysmem.Memory(); sysMem > 0 {
jsc.MaxMemory = sysMem / 4 * 3
} else {
jsc.MaxMemory = JetStreamMaxMemDefault
}
return jsc
}
// Helper function.
func (a *Account) checkForJetStream() (*Server, *jsAccount, error) {
a.mu.RLock()
s := a.srv
jsa := a.js
a.mu.RUnlock()
if s == nil {
return nil, nil, fmt.Errorf("jetstream account not registered")
}
if jsa == nil {
return nil, nil, ErrJetStreamNotEnabledForAccount
}
return s, jsa, nil
}
// StreamTemplateConfig allows a configuration to auto-create streams based on this template when a message
// is received that matches. Each new stream will use the config as the template config to create them.
type StreamTemplateConfig struct {
Name string `json:"name"`
Config *StreamConfig `json:"config"`
MaxStreams uint32 `json:"max_streams"`
}
// StreamTemplateInfo
type StreamTemplateInfo struct {
Config *StreamTemplateConfig `json:"config"`
Streams []string `json:"streams"`
}
// StreamTemplate
type StreamTemplate struct {
mu sync.Mutex
tc *client
jsa *jsAccount
*StreamTemplateConfig
streams []string
}
func (t *StreamTemplateConfig) deepCopy() *StreamTemplateConfig {
copy := *t
cfg := *t.Config
copy.Config = &cfg
return ©
}
// AddStreamTemplate will add a stream template to this account that allows auto-creation of streams.
func (a *Account) AddStreamTemplate(tc *StreamTemplateConfig) (*StreamTemplate, error) {
s, jsa, err := a.checkForJetStream()
if err != nil {
return nil, err
}
if tc.Config.Name != "" {
return nil, fmt.Errorf("template config name should be empty")
}
if len(tc.Name) > JSMaxNameLen {
return nil, fmt.Errorf("template name is too long, maximum allowed is %d", JSMaxNameLen)
}
// FIXME(dlc) - Hacky
tcopy := tc.deepCopy()
tcopy.Config.Name = "_"
cfg, err := checkStreamCfg(tcopy.Config)
if err != nil {
return nil, err
}
tcopy.Config = &cfg
t := &StreamTemplate{
StreamTemplateConfig: tcopy,
tc: s.createInternalJetStreamClient(),
jsa: jsa,
}
t.tc.registerWithAccount(a)
jsa.mu.Lock()
if jsa.templates == nil {
jsa.templates = make(map[string]*StreamTemplate)
// Create the appropriate store
if cfg.Storage == FileStorage {
jsa.store = newTemplateFileStore(jsa.storeDir)
} else {
jsa.store = newTemplateMemStore()
}
} else if _, ok := jsa.templates[tcopy.Name]; ok {
jsa.mu.Unlock()
return nil, fmt.Errorf("template with name %q already exists", tcopy.Name)
}
jsa.templates[tcopy.Name] = t
jsa.mu.Unlock()
// FIXME(dlc) - we can not overlap subjects between templates. Need to have test.
// Setup the internal subscriptions to trap the messages.
if err := t.createTemplateSubscriptions(); err != nil {
return nil, err
}
if err := jsa.store.Store(t); err != nil {
t.Delete()
return nil, err
}
return t, nil
}
func (t *StreamTemplate) createTemplateSubscriptions() error {
if t == nil {
return fmt.Errorf("no template")
}
if t.tc == nil {
return fmt.Errorf("template not enabled")
}
c := t.tc
if !c.srv.eventsEnabled() {
return ErrNoSysAccount
}
sid := 1
for _, subject := range t.Config.Subjects {
// Now create the subscription
if _, err := c.processSub([]byte(subject), nil, []byte(strconv.Itoa(sid)), t.processInboundTemplateMsg, false); err != nil {
c.acc.DeleteStreamTemplate(t.Name)
return err
}
sid++
}
return nil
}
func (t *StreamTemplate) processInboundTemplateMsg(_ *subscription, _ *client, subject, reply string, msg []byte) {
if t == nil || t.jsa == nil {
return
}
jsa := t.jsa
cn := CanonicalName(subject)
jsa.mu.Lock()
// If we already are registered then we can just return here.
if _, ok := jsa.streams[cn]; ok {
jsa.mu.Unlock()
return
}
acc := jsa.account
jsa.mu.Unlock()
// Check if we are at the maximum and grab some variables.
t.mu.Lock()
c := t.tc
cfg := *t.Config
cfg.Template = t.Name
atLimit := len(t.streams) >= int(t.MaxStreams)
if !atLimit {
t.streams = append(t.streams, cn)
}
t.mu.Unlock()
if atLimit {
c.Warnf("JetStream could not create stream for account %q on subject %q, at limit", acc.Name, subject)
return
}
// We need to create the stream here.
// Change the config from the template and only use literal subject.
cfg.Name = cn
cfg.Subjects = []string{subject}
mset, err := acc.AddStream(&cfg)
if err != nil {
acc.validateStreams(t)
c.Warnf("JetStream could not create stream for account %q on subject %q", acc.Name, subject)
return
}
// Process this message directly by invoking mset.
mset.processInboundJetStreamMsg(nil, nil, subject, reply, msg)
}
// LookupStreamTemplate looks up the names stream template.
func (a *Account) LookupStreamTemplate(name string) (*StreamTemplate, error) {
_, jsa, err := a.checkForJetStream()
if err != nil {
return nil, err
}
jsa.mu.Lock()
defer jsa.mu.Unlock()
if jsa.templates == nil {
return nil, fmt.Errorf("template not found")
}
t, ok := jsa.templates[name]
if !ok {
return nil, fmt.Errorf("template not found")
}
return t, nil
}
// This function will check all named streams and make sure they are valid.
func (a *Account) validateStreams(t *StreamTemplate) {
t.mu.Lock()
var vstreams []string
for _, sname := range t.streams {
if _, err := a.LookupStream(sname); err == nil {
vstreams = append(vstreams, sname)
}
}
t.streams = vstreams
t.mu.Unlock()
}
func (t *StreamTemplate) Delete() error {
if t == nil {
return fmt.Errorf("nil stream template")
}
t.mu.Lock()
jsa := t.jsa
c := t.tc
t.tc = nil
defer func() {
if c != nil {
c.closeConnection(ClientClosed)
}
}()
t.mu.Unlock()
if jsa == nil {
return ErrJetStreamNotEnabled
}
jsa.mu.Lock()
if jsa.templates == nil {
jsa.mu.Unlock()
return fmt.Errorf("template not found")
}
if _, ok := jsa.templates[t.Name]; !ok {
jsa.mu.Unlock()
return fmt.Errorf("template not found")
}
delete(jsa.templates, t.Name)
acc := jsa.account
jsa.mu.Unlock()
// Remove streams associated with this template.
var streams []*Stream
t.mu.Lock()
for _, name := range t.streams {
if mset, err := acc.LookupStream(name); err == nil {
streams = append(streams, mset)
}
}
t.mu.Unlock()
if jsa.store != nil {
if err := jsa.store.Delete(t); err != nil {
return fmt.Errorf("error deleting template from store: %v", err)
}
}
var lastErr error
for _, mset := range streams {
if err := mset.Delete(); err != nil {
lastErr = err
}
}
return lastErr
}
func (a *Account) DeleteStreamTemplate(name string) error {
t, err := a.LookupStreamTemplate(name)
if err != nil {
return err
}
return t.Delete()
}
func (a *Account) Templates() []*StreamTemplate {
var ts []*StreamTemplate
_, jsa, err := a.checkForJetStream()
if err != nil {
return nil
}
jsa.mu.Lock()
for _, t := range jsa.templates {
// FIXME(dlc) - Copy?
ts = append(ts, t)
}
jsa.mu.Unlock()
return ts
}
// Will add a stream to a template, this is for recovery.
func (jsa *jsAccount) addStreamNameToTemplate(tname, mname string) error {
if jsa.templates == nil {
return fmt.Errorf("template not found")
}
t, ok := jsa.templates[tname]
if !ok {
return fmt.Errorf("template not found")
}
// We found template.
t.mu.Lock()
t.streams = append(t.streams, mname)
t.mu.Unlock()
return nil
}
// This will check if a template owns this stream.
// jsAccount lock should be held
func (jsa *jsAccount) checkTemplateOwnership(tname, sname string) bool {
if jsa.templates == nil {
return false
}
t, ok := jsa.templates[tname]
if !ok {
return false
}
// We found template, make sure we are in streams.
for _, streamName := range t.streams {
if sname == streamName {
return true
}
}
return false
}
// FriendlyBytes returns a string with the given bytes int64
// represented as a size, such as 1KB, 10MB, etc...
func FriendlyBytes(bytes int64) string {
fbytes := float64(bytes)
base := 1024
pre := []string{"K", "M", "G", "T", "P", "E"}
if fbytes < float64(base) {
return fmt.Sprintf("%v B", fbytes)
}
exp := int(math.Log(fbytes) / math.Log(float64(base)))
index := exp - 1
return fmt.Sprintf("%.2f %sB", fbytes/math.Pow(float64(base), float64(exp)), pre[index])
}
func isValidName(name string) bool {
if name == "" {
return false
}
return !strings.ContainsAny(name, ".*>")
}
// CanonicalName will replace all token separators '.' with '_'.
// This can be used when naming streams or consumers with multi-token subjects.
func CanonicalName(name string) string {
return strings.ReplaceAll(name, ".", "_")
}
| 1 | 12,048 | Could be a non-public field. `allowNoSubject` | nats-io-nats-server | go |
@@ -588,12 +588,12 @@ func (c *Client) execute(tid int, target *core.BuildTarget, command *pb.Command,
} else if target.IsTextFile {
return c.buildTextFile(target, command, digest)
}
- return c.reallyExecute(tid, target, command, digest, needStdout, isTest)
+ return c.reallyExecute(tid, target, command, digest, needStdout, isTest, (c.state.ForceRerun && isTest) || (!isTest && c.state.ForceRebuild))
}
// reallyExecute is like execute but after the initial cache check etc.
// The action & sources must have already been uploaded.
-func (c *Client) reallyExecute(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, needStdout, isTest bool) (*core.BuildMetadata, *pb.ActionResult, error) {
+func (c *Client) reallyExecute(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, needStdout, isTest, skipCacheLookup bool) (*core.BuildMetadata, *pb.ActionResult, error) {
executing := false
updateProgress := func(metadata *pb.ExecuteOperationMetadata) {
if c.state.Config.Remote.DisplayURL != "" { | 1 | // Package remote provides our interface to the Google remote execution APIs
// (https://github.com/bazelbuild/remote-apis) which Please can use to distribute
// work to remote servers.
package remote
import (
"context"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/client"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/digest"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/filemetadata"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/retry"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo"
fpb "github.com/bazelbuild/remote-apis/build/bazel/remote/asset/v1"
pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
"github.com/bazelbuild/remote-apis/build/bazel/semver"
"github.com/golang/protobuf/ptypes"
"github.com/grpc-ecosystem/go-grpc-middleware/retry"
"golang.org/x/sync/errgroup"
"google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
"gopkg.in/op/go-logging.v1"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
var log = logging.MustGetLogger("remote")
// The API version we support.
var apiVersion = semver.SemVer{Major: 2}
// A Client is the interface to the remote API.
//
// It provides a higher-level interface over the specific RPCs available.
type Client struct {
client *client.Client
fetchClient fpb.FetchClient
initOnce sync.Once
state *core.BuildState
err error // for initialisation
instance string
// Stored output directories from previously executed targets.
// This isn't just a cache - it is needed for cases where we don't actually
// have the files physically on disk.
outputs map[core.BuildLabel]*pb.Directory
outputMutex sync.RWMutex
// The unstamped build action digests. Stamped and test digests are not stored.
// This isn't just a cache - it is needed because building a target can modify the target and things like plz hash
// --detailed and --shell will fail to get the right action digest.
unstampedBuildActionDigests actionDigestMap
// Used to control downloading targets (we must make sure we don't re-fetch them
// while another target is trying to use them).
//
// This map is of effective type `map[*core.BuildTarget]*pendingDownload`
downloads sync.Map
// Server-sent cache properties
maxBlobBatchSize int64
// Platform properties that we will request from the remote.
// TODO(peterebden): this will need some modification for cross-compiling support.
platform *pb.Platform
// Path to the shell to use to execute actions in.
shellPath string
// Stats used to report RPC data rates
byteRateIn, byteRateOut, totalBytesIn, totalBytesOut int
stats *statsHandler
// Used to store and retrieve action results to reduce RPC calls when re-building targets
mdStore buildMetadataStore
// Passed to various SDK functions.
fileMetadataCache filemetadata.Cache
// existingBlobs is used to track the set of existing blobs remotely.
existingBlobs map[string]struct{}
existingBlobMutex sync.Mutex
}
type actionDigestMap struct {
m sync.Map
}
func (m *actionDigestMap) Get(label core.BuildLabel) *pb.Digest {
d, ok := m.m.Load(label)
if !ok {
panic(fmt.Sprintf("could not find action digest for label: %s", label.String()))
}
return d.(*pb.Digest)
}
func (m *actionDigestMap) Put(label core.BuildLabel, actionDigest *pb.Digest) {
m.m.Store(label, actionDigest)
}
// A pendingDownload represents a pending download of a build target. It is used to
// ensure we only download each target exactly once.
type pendingDownload struct {
once sync.Once
err error // Any error if the download failed.
}
// New returns a new Client instance.
// It begins the process of contacting the remote server but does not wait for it.
func New(state *core.BuildState) *Client {
c := &Client{
state: state,
instance: state.Config.Remote.Instance,
outputs: map[core.BuildLabel]*pb.Directory{},
mdStore: newDirMDStore(time.Duration(state.Config.Remote.CacheDuration)),
existingBlobs: map[string]struct{}{
digest.Empty.Hash: {},
},
fileMetadataCache: filemetadata.NewNoopCache(),
shellPath: state.Config.Remote.Shell,
}
c.stats = newStatsHandler(c)
go c.CheckInitialised() // Kick off init now, but we don't have to wait for it.
return c
}
// CheckInitialised checks that the client has connected to the server correctly.
func (c *Client) CheckInitialised() error {
c.initOnce.Do(c.init)
return c.err
}
// init is passed to the sync.Once to do the actual initialisation.
func (c *Client) init() {
// Change grpc to log using our implementation
grpclog.SetLoggerV2(&grpcLogMabob{})
var g errgroup.Group
g.Go(c.initExec)
if c.state.Config.Remote.AssetURL != "" {
g.Go(c.initFetch)
}
c.err = g.Wait()
if c.err != nil {
log.Error("Error setting up remote execution client: %s", c.err)
}
}
// initExec initialiases the remote execution client.
func (c *Client) initExec() error {
// Create a copy of the state where we can modify the config
dialOpts, err := c.dialOpts()
if err != nil {
return err
}
client, err := client.NewClient(context.Background(), c.instance, client.DialParams{
Service: c.state.Config.Remote.URL,
CASService: c.state.Config.Remote.CASURL,
NoSecurity: !c.state.Config.Remote.Secure,
TransportCredsOnly: c.state.Config.Remote.Secure,
DialOpts: dialOpts,
}, client.UseBatchOps(true), &client.TreeSymlinkOpts{Preserved: true}, client.RetryTransient(), client.RPCTimeouts(map[string]time.Duration{
"default": time.Duration(c.state.Config.Remote.Timeout),
"GetCapabilities": 5 * time.Second,
"BatchUpdateBlobs": time.Minute,
"BatchReadBlobs": time.Minute,
"GetTree": time.Minute,
"Execute": 0,
"WaitExecution": 0,
}))
if err != nil {
return err
}
c.client = client
// Extend timeouts a bit, RetryTransient only gives about 1.5 seconds total which isn't
// necessarily very much if the other end needs to sort its life out.
c.client.Retrier.Backoff = retry.ExponentialBackoff(500*time.Millisecond, 5*time.Second, retry.Attempts(8))
// Query the server for its capabilities. This tells us whether it is capable of
// execution, caching or both.
resp, err := c.client.GetCapabilities(context.Background())
if err != nil {
return err
}
if lessThan(&apiVersion, resp.LowApiVersion) || lessThan(resp.HighApiVersion, &apiVersion) {
return fmt.Errorf("Unsupported API version; we require %s but server only supports %s - %s", printVer(&apiVersion), printVer(resp.LowApiVersion), printVer(resp.HighApiVersion))
}
caps := resp.CacheCapabilities
if caps == nil {
return fmt.Errorf("Cache capabilities not supported by server (we do not support execution-only servers)")
}
if err := c.chooseDigest(caps.DigestFunction); err != nil {
return err
}
c.maxBlobBatchSize = caps.MaxBatchTotalSizeBytes
if c.maxBlobBatchSize == 0 {
// No limit was set by the server, assume we are implicitly limited to 4MB (that's
// gRPC's limit which most implementations do not seem to override). Round it down a
// bit to allow a bit of serialisation overhead etc.
c.maxBlobBatchSize = 4000000
}
if c.shellPath == "" {
// We have to run everything through a shell since our commands are arbitrary.
// Unfortunately we can't just say "bash", we need an absolute path which is
// a bit weird since it assumes that our absolute path is the same as the
// remote one (which is probably OK on the same OS, but not between say Linux and
// FreeBSD where bash is not idiomatically in the same place).
bash, err := core.LookBuildPath("bash", c.state.Config)
if err != nil {
return fmt.Errorf("Failed to set path for bash: %w", err)
}
c.shellPath = bash
}
log.Debug("Remote execution client initialised for storage")
// Now check if it can do remote execution
if resp.ExecutionCapabilities == nil {
return fmt.Errorf("Remote execution is configured but the build server doesn't support it")
}
if err := c.chooseDigest([]pb.DigestFunction_Value{resp.ExecutionCapabilities.DigestFunction}); err != nil {
return err
} else if !resp.ExecutionCapabilities.ExecEnabled {
return fmt.Errorf("Remote execution not enabled for this server")
}
c.platform = convertPlatform(c.state.Config)
log.Debug("Remote execution client initialised for execution")
if c.state.Config.Remote.AssetURL == "" {
c.fetchClient = fpb.NewFetchClient(client.Connection)
}
return nil
}
// initFetch initialises the remote fetch server.
func (c *Client) initFetch() error {
dialOpts, err := c.dialOpts()
if err != nil {
return err
}
if c.state.Config.Remote.Secure {
dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")))
} else {
dialOpts = append(dialOpts, grpc.WithInsecure())
}
conn, err := grpc.Dial(c.state.Config.Remote.AssetURL, append(dialOpts, grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor()))...)
if err != nil {
return fmt.Errorf("Failed to connect to the remote fetch server: %s", err)
}
c.fetchClient = fpb.NewFetchClient(conn)
return nil
}
// chooseDigest selects a digest function that we will use.w
func (c *Client) chooseDigest(fns []pb.DigestFunction_Value) error {
systemFn := c.digestEnum(c.state.Config.Build.HashFunction)
for _, fn := range fns {
if fn == systemFn {
return nil
}
}
return fmt.Errorf("No acceptable hash function available; server supports %s but we require %s. Hint: you may need to set the hash function appropriately in the [build] section of your config", fns, systemFn)
}
// digestEnum returns a proto enum for the digest function of given name (as we name them in config)
func (c *Client) digestEnum(name string) pb.DigestFunction_Value {
switch c.state.Config.Build.HashFunction {
case "sha256":
return pb.DigestFunction_SHA256
case "sha1":
return pb.DigestFunction_SHA1
default:
return pb.DigestFunction_UNKNOWN // Shouldn't get here
}
}
// Build executes a remote build of the given target.
func (c *Client) Build(tid int, target *core.BuildTarget) (*core.BuildMetadata, error) {
if err := c.CheckInitialised(); err != nil {
return nil, err
}
metadata, ar, digest, err := c.build(tid, target)
if err != nil {
return metadata, err
}
if c.state.TargetHasher != nil {
hash, _ := hex.DecodeString(c.outputHash(ar))
c.state.TargetHasher.SetHash(target, hash)
}
if err := c.setOutputs(target, ar); err != nil {
return metadata, c.wrapActionErr(err, digest)
}
if c.state.ShouldDownload(target) {
if !c.outputsExist(target, digest) {
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Downloading")
if err := c.download(target, func() error {
return c.reallyDownload(target, digest, ar)
}); err != nil {
return metadata, err
}
} else {
log.Debug("Not downloading outputs for %s, they are already up-to-date", target)
// Ensure this is marked as already downloaded.
v, _ := c.downloads.LoadOrStore(target, &pendingDownload{})
v.(*pendingDownload).once.Do(func() {})
}
if err := c.downloadData(target); err != nil {
return metadata, err
}
}
return metadata, nil
}
// downloadData downloads all the runtime data for a target, recursively.
func (c *Client) downloadData(target *core.BuildTarget) error {
var g errgroup.Group
for _, datum := range target.AllData() {
if l := datum.Label(); l != nil {
t := c.state.Graph.TargetOrDie(*l)
g.Go(func() error {
if err := c.Download(t); err != nil {
return err
}
return c.downloadData(t)
})
}
}
return g.Wait()
}
// Run runs a target on the remote executors.
func (c *Client) Run(target *core.BuildTarget) error {
if err := c.CheckInitialised(); err != nil {
return err
}
cmd, digest, err := c.uploadAction(target, false, true)
if err != nil {
return err
}
// 24 hours is kind of an arbitrarily long timeout. Basically we just don't want to limit it here.
_, _, err = c.execute(0, target, cmd, digest, false, false)
return err
}
// build implements the actual build of a target.
func (c *Client) build(tid int, target *core.BuildTarget) (*core.BuildMetadata, *pb.ActionResult, *pb.Digest, error) {
needStdout := target.PostBuildFunction != nil
// If we're gonna stamp the target, first check the unstamped equivalent that we store results under.
// This implements the rules of stamp whereby we don't force rebuilds every time e.g. the SCM revision changes.
var unstampedDigest *pb.Digest
if target.Stamp {
command, digest, err := c.buildAction(target, false, false)
if err != nil {
return nil, nil, nil, err
} else if metadata, ar := c.maybeRetrieveResults(tid, target, command, digest, false, needStdout); metadata != nil {
return metadata, ar, digest, nil
}
unstampedDigest = digest
}
command, stampedDigest, err := c.buildAction(target, false, true)
if err != nil {
return nil, nil, nil, err
}
metadata, ar, err := c.execute(tid, target, command, stampedDigest, false, needStdout)
if target.Stamp && err == nil {
// Store results under unstamped digest too.
c.locallyCacheResults(target, unstampedDigest, metadata, ar)
c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{
InstanceName: c.instance,
ActionDigest: unstampedDigest,
ActionResult: ar,
})
c.unstampedBuildActionDigests.Put(target.Label, unstampedDigest)
} else {
c.unstampedBuildActionDigests.Put(target.Label, stampedDigest)
}
return metadata, ar, stampedDigest, err
}
// Download downloads outputs for the given target.
func (c *Client) Download(target *core.BuildTarget) error {
if target.Local {
return nil // No download needed since this target was built locally
}
return c.download(target, func() error {
buildAction := c.unstampedBuildActionDigests.Get(target.Label)
if c.outputsExist(target, buildAction) {
return nil
}
_, ar := c.retrieveResults(target, nil, buildAction, false, false)
if ar == nil {
return fmt.Errorf("Failed to retrieve action result for %s", target)
}
return c.reallyDownload(target, buildAction, ar)
})
}
func (c *Client) download(target *core.BuildTarget, f func() error) error {
v, _ := c.downloads.LoadOrStore(target, &pendingDownload{})
d := v.(*pendingDownload)
d.once.Do(func() {
d.err = f()
})
return d.err
}
func (c *Client) reallyDownload(target *core.BuildTarget, digest *pb.Digest, ar *pb.ActionResult) error {
log.Debug("Downloading outputs for %s", target)
if err := removeOutputs(target); err != nil {
return err
}
if err := c.downloadActionOutputs(context.Background(), ar, target); err != nil {
return c.wrapActionErr(err, digest)
}
c.recordAttrs(target, digest)
log.Debug("Downloaded outputs for %s", target)
return nil
}
func (c *Client) downloadActionOutputs(ctx context.Context, ar *pb.ActionResult, target *core.BuildTarget) error {
// We can download straight into the out dir if there are no outdirs to worry about
if len(target.OutputDirectories) == 0 {
_, err := c.client.DownloadActionOutputs(ctx, ar, target.OutDir(), c.fileMetadataCache)
return err
}
defer os.RemoveAll(target.TmpDir())
if _, err := c.client.DownloadActionOutputs(ctx, ar, target.TmpDir(), c.fileMetadataCache); err != nil {
return err
}
if err := moveOutDirsToTmpRoot(target); err != nil {
return fmt.Errorf("failed to move out directories to correct place in tmp folder: %w", err)
}
if err := moveTmpFilesToOutDir(target); err != nil {
return fmt.Errorf("failed to move downloaded action output from target tmp dir to out dir: %w", err)
}
return nil
}
// moveTmpFilesToOutDir moves files from the target tmp dir to the out dir
func moveTmpFilesToOutDir(target *core.BuildTarget) error {
files, err := ioutil.ReadDir(target.TmpDir())
if err != nil {
return err
}
for _, f := range files {
oldPath := filepath.Join(target.TmpDir(), f.Name())
newPath := filepath.Join(target.OutDir(), f.Name())
if err := fs.RecursiveCopy(oldPath, newPath, target.OutMode()); err != nil {
return err
}
}
return nil
}
// moveOutDirsToTmpRoot moves all the files from the output dirs into the root of the build temp dir and deletes the
// now empty directory
func moveOutDirsToTmpRoot(target *core.BuildTarget) error {
for _, dir := range target.OutputDirectories {
if err := moveOutDirFilesToTmpRoot(target, dir.Dir()); err != nil {
return fmt.Errorf("failed to move output dir (%s) contents to rule root: %w", dir, err)
}
if err := os.Remove(filepath.Join(target.TmpDir(), dir.Dir())); err != nil {
return err
}
}
return nil
}
func moveOutDirFilesToTmpRoot(target *core.BuildTarget, dir string) error {
fullDir := filepath.Join(target.TmpDir(), dir)
files, err := ioutil.ReadDir(fullDir)
if err != nil {
return err
}
for _, f := range files {
from := filepath.Join(fullDir, f.Name())
to := filepath.Join(target.TmpDir(), f.Name())
if err := os.Rename(from, to); err != nil {
return err
}
}
return nil
}
// Test executes a remote test of the given target.
// It returns the results (and coverage if appropriate) as bytes to be parsed elsewhere.
func (c *Client) Test(tid int, target *core.BuildTarget, run int) (metadata *core.BuildMetadata, err error) {
if err := c.CheckInitialised(); err != nil {
return nil, err
}
command, digest, err := c.buildAction(target, true, false)
if err != nil {
return nil, err
}
metadata, ar, err := c.execute(tid, target, command, digest, true, false)
if ar != nil {
_, dlErr := c.client.DownloadActionOutputs(context.Background(), ar, target.TestDir(run), c.fileMetadataCache)
if dlErr != nil {
log.Warningf("%v: failed to download test outputs: %v", target.Label, dlErr)
}
}
return metadata, err
}
// retrieveResults retrieves target results from where it can (either from the local cache or from remote).
// It returns nil if it cannot be retrieved.
func (c *Client) retrieveResults(target *core.BuildTarget, command *pb.Command, digest *pb.Digest, needStdout, isTest bool) (*core.BuildMetadata, *pb.ActionResult) {
// First see if this execution is cached locally
if metadata, ar := c.retrieveLocalResults(target, digest); metadata != nil {
log.Debug("Got locally cached results for %s %s", target.Label, c.actionURL(digest, true))
metadata.Cached = true
return metadata, ar
}
// Now see if it is cached on the remote server
if ar, err := c.client.GetActionResult(context.Background(), &pb.GetActionResultRequest{
InstanceName: c.instance,
ActionDigest: digest,
InlineStdout: needStdout,
}); err == nil {
// This action already exists and has been cached.
if metadata, err := c.buildMetadata(ar, needStdout, false); err == nil {
log.Debug("Got remotely cached results for %s %s", target.Label, c.actionURL(digest, true))
if command != nil {
err = c.verifyActionResult(target, command, digest, ar, c.state.Config.Remote.VerifyOutputs, isTest)
}
if err == nil {
c.locallyCacheResults(target, digest, metadata, ar)
metadata.Cached = true
return metadata, ar
}
log.Debug("Remotely cached results for %s were missing some outputs, forcing a rebuild: %s", target.Label, err)
}
}
return nil, nil
}
// maybeRetrieveResults is like retrieveResults but only retrieves if we aren't forcing a rebuild of the target
// (i.e. not if we're doing plz build --rebuild or plz test --rerun).
func (c *Client) maybeRetrieveResults(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, isTest, needStdout bool) (*core.BuildMetadata, *pb.ActionResult) {
if !c.state.ShouldRebuild(target) && !(c.state.NeedTests && isTest && c.state.ForceRerun) {
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking remote...")
if metadata, ar := c.retrieveResults(target, command, digest, needStdout, isTest); metadata != nil {
return metadata, ar
}
}
return nil, nil
}
// execute submits an action to the remote executor and monitors its progress.
// The returned ActionResult may be nil on failure.
func (c *Client) execute(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, isTest, needStdout bool) (*core.BuildMetadata, *pb.ActionResult, error) {
if !isTest || !c.state.ForceRerun || c.state.NumTestRuns == 1 {
if metadata, ar := c.maybeRetrieveResults(tid, target, command, digest, isTest, needStdout); metadata != nil {
return metadata, ar, nil
}
}
// We didn't actually upload the inputs before, so we must do so now.
command, digest, err := c.uploadAction(target, isTest, false)
if err != nil {
return nil, nil, fmt.Errorf("Failed to upload build action: %s", err)
}
// Remote actions & filegroups get special treatment at this point.
if target.IsFilegroup {
// Filegroups get special-cased since they are just a movement of files.
return c.buildFilegroup(target, command, digest)
} else if target.IsRemoteFile {
return c.fetchRemoteFile(tid, target, digest)
} else if target.IsTextFile {
return c.buildTextFile(target, command, digest)
}
return c.reallyExecute(tid, target, command, digest, needStdout, isTest)
}
// reallyExecute is like execute but after the initial cache check etc.
// The action & sources must have already been uploaded.
func (c *Client) reallyExecute(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, needStdout, isTest bool) (*core.BuildMetadata, *pb.ActionResult, error) {
executing := false
updateProgress := func(metadata *pb.ExecuteOperationMetadata) {
if c.state.Config.Remote.DisplayURL != "" {
log.Debug("Remote progress for %s: %s%s", target.Label, metadata.Stage, c.actionURL(metadata.ActionDigest, true))
}
if target.State() <= core.Built {
switch metadata.Stage {
case pb.ExecutionStage_CACHE_CHECK:
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking cache...")
case pb.ExecutionStage_QUEUED:
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Queued")
case pb.ExecutionStage_EXECUTING:
executing = true
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Building...")
case pb.ExecutionStage_COMPLETED:
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Completed")
}
} else {
switch metadata.Stage {
case pb.ExecutionStage_CACHE_CHECK:
c.state.LogBuildResult(tid, target.Label, core.TargetTesting, "Checking cache...")
case pb.ExecutionStage_QUEUED:
c.state.LogBuildResult(tid, target.Label, core.TargetTesting, "Queued")
case pb.ExecutionStage_EXECUTING:
executing = true
c.state.LogBuildResult(tid, target.Label, core.TargetTesting, "Testing...")
case pb.ExecutionStage_COMPLETED:
c.state.LogBuildResult(tid, target.Label, core.TargetTesting, "Completed")
}
}
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
for i := 1; i < 1000000; i++ {
select {
case <-ctx.Done():
return
case <-time.After(1 * time.Minute):
description := "queued"
if executing {
description = "executing"
}
if i == 1 {
log.Notice("%s still %s after 1 minute", target, description)
} else {
log.Notice("%s still %s after %d minutes", target, description, i)
}
}
}
}()
resp, err := c.client.ExecuteAndWaitProgress(c.contextWithMetadata(target), &pb.ExecuteRequest{
InstanceName: c.instance,
ActionDigest: digest,
}, updateProgress)
if err != nil {
// Handle timing issues if we try to resume an execution as it fails. If we get a
// "not found" we might find that it's already been completed and we can't resume.
if status.Code(err) == codes.NotFound {
if metadata, ar := c.retrieveResults(target, command, digest, needStdout, isTest); metadata != nil {
return metadata, ar, nil
}
}
return nil, nil, c.wrapActionErr(fmt.Errorf("Failed to execute %s: %s", target, err), digest)
}
switch result := resp.Result.(type) {
case *longrunning.Operation_Error:
// We shouldn't really get here - the rex API requires servers to always
// use the response field instead of error.
return nil, nil, convertError(result.Error)
case *longrunning.Operation_Response:
response := &pb.ExecuteResponse{}
if err := ptypes.UnmarshalAny(result.Response, response); err != nil {
log.Error("Failed to deserialise execution response: %s", err)
return nil, nil, err
}
if response.CachedResult {
c.state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached")
}
for k, v := range response.ServerLogs {
log.Debug("Server log available: %s: hash key %s", k, v.Digest.Hash)
}
var respErr error
if response.Status != nil {
respErr = convertError(response.Status)
if respErr != nil {
if !strings.Contains(respErr.Error(), c.state.Config.Remote.DisplayURL) {
if url := c.actionURL(digest, false); url != "" {
respErr = fmt.Errorf("%s\nAction URL: %s", respErr, url)
}
}
}
}
if resp.Result == nil { // This is optional on failure.
return nil, nil, respErr
}
if response.Result == nil { // This seems to happen when things go wrong on the build server end.
if response.Status != nil {
return nil, nil, fmt.Errorf("Build server returned invalid result: %s", convertError(response.Status))
}
log.Debug("Bad result from build server: %+v", response)
return nil, nil, fmt.Errorf("Build server did not return valid result")
}
if response.Message != "" {
// Informational messages can be emitted on successful actions.
log.Debug("Message from build server:\n %s", response.Message)
}
failed := respErr != nil || response.Result.ExitCode != 0
metadata, err := c.buildMetadata(response.Result, needStdout || failed, failed)
logResponseTimings(target, response.Result)
// The original error is higher priority than us trying to retrieve the
// output of the thing that failed.
if respErr != nil {
return metadata, response.Result, respErr
} else if response.Result.ExitCode != 0 {
err := fmt.Errorf("Remotely executed command exited with %d", response.Result.ExitCode)
if response.Message != "" {
err = fmt.Errorf("%s\n %s", err, response.Message)
}
if len(metadata.Stdout) != 0 {
err = fmt.Errorf("%s\nStdout:\n%s", err, metadata.Stdout)
}
if len(metadata.Stderr) != 0 {
err = fmt.Errorf("%s\nStderr:\n%s", err, metadata.Stderr)
}
// Add a link to the action URL, but only if the server didn't do it (they
// might add one to the failed action if they're using the Buildbarn extension
// for it, which we can't replicate here).
if !strings.Contains(response.Message, c.state.Config.Remote.DisplayURL) {
if url := c.actionURL(digest, true); url != "" {
err = fmt.Errorf("%s\n%s", err, url)
}
}
return metadata, response.Result, err
} else if err != nil {
return nil, nil, err
}
log.Debug("Completed remote build action for %s", target)
if err := c.verifyActionResult(target, command, digest, response.Result, false, isTest); err != nil {
return metadata, response.Result, err
}
c.locallyCacheResults(target, digest, metadata, response.Result)
return metadata, response.Result, nil
default:
if !resp.Done {
log.Error("Received an incomplete response for %s: %#v", target, resp)
return nil, nil, fmt.Errorf("Received an incomplete response for %s", target)
}
return nil, nil, fmt.Errorf("Unknown response type (was a %T): %#v", resp.Result, resp) // Shouldn't get here
}
}
func logResponseTimings(target *core.BuildTarget, ar *pb.ActionResult) {
if ar != nil && ar.ExecutionMetadata != nil {
startTime := toTime(ar.ExecutionMetadata.ExecutionStartTimestamp)
endTime := toTime(ar.ExecutionMetadata.ExecutionCompletedTimestamp)
inputFetchStartTime := toTime(ar.ExecutionMetadata.InputFetchStartTimestamp)
inputFetchEndTime := toTime(ar.ExecutionMetadata.InputFetchCompletedTimestamp)
log.Debug("Completed remote build action for %s; input fetch %s, build time %s", target, inputFetchEndTime.Sub(inputFetchStartTime), endTime.Sub(startTime))
}
}
// PrintHashes prints the action hashes for a target.
func (c *Client) PrintHashes(target *core.BuildTarget, isTest bool) {
actionDigest := c.unstampedBuildActionDigests.Get(target.Label)
fmt.Printf(" Action: %7d bytes: %s\n", actionDigest.SizeBytes, actionDigest.Hash)
if c.state.Config.Remote.DisplayURL != "" {
fmt.Printf(" URL: %s\n", c.actionURL(actionDigest, false))
}
}
// DataRate returns an estimate of the current in/out RPC data rates in bytes per second.
func (c *Client) DataRate() (int, int, int, int) {
return c.byteRateIn, c.byteRateOut, c.totalBytesIn, c.totalBytesOut
}
// fetchRemoteFile sends a request to fetch a file using the remote asset API.
func (c *Client) fetchRemoteFile(tid int, target *core.BuildTarget, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) {
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Downloading...")
urls := target.AllURLs(c.state)
req := &fpb.FetchBlobRequest{
InstanceName: c.instance,
Timeout: ptypes.DurationProto(target.BuildTimeout),
Uris: urls,
}
if !c.state.NeedHashesOnly || !c.state.IsOriginalTargetOrParent(target) {
if sri := subresourceIntegrity(target); sri != "" {
req.Qualifiers = []*fpb.Qualifier{{
Name: "checksum.sri",
Value: sri,
}}
}
}
ctx, cancel := context.WithTimeout(context.Background(), target.BuildTimeout)
defer cancel()
resp, err := c.fetchClient.FetchBlob(ctx, req)
if err != nil {
return nil, nil, fmt.Errorf("Failed to download file: %s", err)
}
c.state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Downloaded.")
// If we get here, the blob exists in the CAS. Create an ActionResult corresponding to it.
outs := target.Outputs()
ar := &pb.ActionResult{
OutputFiles: []*pb.OutputFile{{
Path: outs[0],
Digest: resp.BlobDigest,
IsExecutable: target.IsBinary,
}},
}
if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{
InstanceName: c.instance,
ActionDigest: actionDigest,
ActionResult: ar,
}); err != nil {
return nil, nil, fmt.Errorf("Error updating action result: %s", err)
}
return &core.BuildMetadata{}, ar, nil
}
// buildFilegroup "builds" a single filegroup target.
func (c *Client) buildFilegroup(target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) {
inputDir, err := c.uploadInputDir(nil, target, false) // We don't need to actually upload the inputs here, that is already done.
if err != nil {
return nil, nil, err
}
ar := &pb.ActionResult{}
if err := c.uploadBlobs(func(ch chan<- *uploadinfo.Entry) error {
defer close(ch)
inputDir.Build(ch)
for _, out := range command.OutputPaths {
if d, f := inputDir.Node(path.Join(target.Label.PackageName, out)); d != nil {
entry, digest := c.protoEntry(inputDir.Tree(path.Join(target.Label.PackageName, out)))
ch <- entry
ar.OutputDirectories = append(ar.OutputDirectories, &pb.OutputDirectory{
Path: out,
TreeDigest: digest,
})
} else if f != nil {
ar.OutputFiles = append(ar.OutputFiles, &pb.OutputFile{
Path: out,
Digest: f.Digest,
IsExecutable: f.IsExecutable,
})
} else {
// Of course, we should not get here (classic developer things...)
return fmt.Errorf("Missing output from filegroup: %s", out)
}
}
return nil
}); err != nil {
return nil, nil, err
}
if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{
InstanceName: c.instance,
ActionDigest: actionDigest,
ActionResult: ar,
}); err != nil {
return nil, nil, fmt.Errorf("Error updating action result: %s", err)
}
return &core.BuildMetadata{}, ar, nil
}
// buildTextFile "builds" uploads a text file to the CAS
func (c *Client) buildTextFile(target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) {
ar := &pb.ActionResult{}
if err := c.uploadBlobs(func(ch chan<- *uploadinfo.Entry) error {
defer close(ch)
if len(command.OutputPaths) != 1 {
return fmt.Errorf("text_file %s should have a single output, has %d", target.Label, len(command.OutputPaths))
}
entry := uploadinfo.EntryFromBlob([]byte(target.FileContent))
ch <- entry
ar.OutputFiles = append(ar.OutputFiles, &pb.OutputFile{
Path: command.OutputPaths[0],
Digest: entry.Digest.ToProto(),
})
return nil
}); err != nil {
return nil, nil, err
}
if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{
InstanceName: c.instance,
ActionDigest: actionDigest,
ActionResult: ar,
}); err != nil {
return nil, nil, fmt.Errorf("Error updating action result: %s", err)
}
return &core.BuildMetadata{}, ar, nil
}
// A grpcLogMabob is an implementation of grpc's logging interface using our backend.
type grpcLogMabob struct{}
func (g *grpcLogMabob) Info(args ...interface{}) { log.Info("%s", args) }
func (g *grpcLogMabob) Infof(format string, args ...interface{}) { log.Info(format, args...) }
func (g *grpcLogMabob) Infoln(args ...interface{}) { log.Info("%s", args) }
func (g *grpcLogMabob) Warning(args ...interface{}) { log.Warning("%s", args) }
func (g *grpcLogMabob) Warningf(format string, args ...interface{}) { log.Warning(format, args...) }
func (g *grpcLogMabob) Warningln(args ...interface{}) { log.Warning("%s", args) }
func (g *grpcLogMabob) Error(args ...interface{}) { log.Error("", args...) }
func (g *grpcLogMabob) Errorf(format string, args ...interface{}) { log.Errorf(format, args...) }
func (g *grpcLogMabob) Errorln(args ...interface{}) { log.Error("", args...) }
func (g *grpcLogMabob) Fatal(args ...interface{}) { log.Fatal(args...) }
func (g *grpcLogMabob) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) }
func (g *grpcLogMabob) Fatalln(args ...interface{}) { log.Fatal(args...) }
func (g *grpcLogMabob) V(l int) bool { return log.IsEnabledFor(logging.Level(l)) }
| 1 | 9,769 | Don't think this is quite right - think the build one also needs an `IsOriginalTarget` (c.f. code in `src/build`) | thought-machine-please | go |
@@ -62,6 +62,12 @@ class Autocomplete
return array('results' => $this->processResults($paginator->getCurrentPageResults(), $backendConfig['entities'][$entity]));
}
+ /**
+ * @param array $entities
+ * @param array $targetEntityConfig
+ *
+ * @return array
+ */
private function processResults($entities, $targetEntityConfig)
{
$results = array(); | 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\Search;
use JavierEguiluz\Bundle\EasyAdminBundle\Configuration\ConfigManager;
use Symfony\Component\PropertyAccess\PropertyAccessor;
/**
* It looks for the values of entity which match the given query. It's used for
* the autocomplete field types.
*
* @author Javier Eguiluz <[email protected]>
* @author Yonel Ceruto <[email protected]>
*/
class Autocomplete
{
/** @var ConfigManager */
private $configManager;
/** @var Finder */
private $finder;
/** @var PropertyAccessor */
private $propertyAccessor;
public function __construct(ConfigManager $configManager, Finder $finder, PropertyAccessor $propertyAccessor)
{
$this->configManager = $configManager;
$this->finder = $finder;
$this->propertyAccessor = $propertyAccessor;
}
/**
* Finds the values of the given entity which match the query provided.
*
* @param string $entity
* @param string $query
* @param int $page
*
* @return array
*/
public function find($entity, $query, $page = 1)
{
if (empty($entity) || empty($query)) {
return array('results' => array());
}
$backendConfig = $this->configManager->getBackendConfig();
if (!isset($backendConfig['entities'][$entity])) {
throw new \InvalidArgumentException(sprintf('The "entity" argument must contain the name of an entity managed by EasyAdmin ("%s" given).', $entity));
}
$paginator = $this->finder->findByAllProperties($backendConfig['entities'][$entity], $query, $page, $backendConfig['show']['max_results']);
return array('results' => $this->processResults($paginator->getCurrentPageResults(), $backendConfig['entities'][$entity]));
}
private function processResults($entities, $targetEntityConfig)
{
$results = array();
foreach ($entities as $entity) {
$results[] = array(
'id' => $this->propertyAccessor->getValue($entity, $targetEntityConfig['primary_key_field_name']),
'text' => (string) $entity,
);
}
return $results;
}
}
| 1 | 10,862 | what about adding typehints instead? I don't think we need to add docblocks for every private methods. | EasyCorp-EasyAdminBundle | php |
@@ -76,6 +76,7 @@ func createVolumeBuilder(cStorVolumeReplica *apis.CStorVolumeReplica, fullVolNam
openebsTargetIP := "io.openebs:targetip=" + cStorVolumeReplica.Spec.TargetIP
createVolAttr = append(createVolAttr, "create",
+ "-b", "4K", "-s", "-o", "compression=on",
"-V", cStorVolumeReplica.Spec.Capacity, fullVolName,
"-o", openebsTargetIP, "-o", openebsVolname)
| 1 | /*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumereplica
import (
"fmt"
"strings"
"github.com/golang/glog"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/util"
)
// VolumeReplicaOperator is the name of the tool that makes
// volume-related operations.
const (
VolumeReplicaOperator = "zfs"
)
// RunnerVar the runner variable for executing binaries.
var RunnerVar util.Runner
// CheckValidVolumeReplica checks for validity of cStor replica resource.
func CheckValidVolumeReplica(cVR *apis.CStorVolumeReplica) error {
var err error
if len(cVR.Labels["cstorvolume.openebs.io/name"]) == 0 {
err = fmt.Errorf("Volume Name/UID cannot be empty")
return err
}
if len(cVR.Spec.TargetIP) == 0 {
err = fmt.Errorf("TargetIP cannot be empty")
return err
}
if len(cVR.Spec.Capacity) == 0 {
err = fmt.Errorf("Capacity cannot be empty")
return err
}
if len(cVR.Labels["cstorpool.openebs.io/uid"]) == 0 {
err = fmt.Errorf("Pool cannot be empty")
return err
}
return nil
}
// CreateVolume creates cStor replica(zfs volumes).
func CreateVolume(cStorVolumeReplica *apis.CStorVolumeReplica, fullVolName string) error {
createVolAttr := createVolumeBuilder(cStorVolumeReplica, fullVolName)
stdoutStderr, err := RunnerVar.RunCombinedOutput(VolumeReplicaOperator, createVolAttr...)
if err != nil {
glog.Errorf("Unable to create volume: %v", string(stdoutStderr))
return err
}
return nil
}
// createVolumeBuilder builds volume creations command to run.
func createVolumeBuilder(cStorVolumeReplica *apis.CStorVolumeReplica, fullVolName string) []string {
var createVolAttr []string
openebsVolname := "io.openebs:volname=" + cStorVolumeReplica.ObjectMeta.Name
openebsTargetIP := "io.openebs:targetip=" + cStorVolumeReplica.Spec.TargetIP
createVolAttr = append(createVolAttr, "create",
"-V", cStorVolumeReplica.Spec.Capacity, fullVolName,
"-o", openebsTargetIP, "-o", openebsVolname)
return createVolAttr
}
// GetVolumes returns the slice of volumes.
func GetVolumes() ([]string, error) {
volStrCmd := []string{"get", "-Hp", "name", "-o", "name"}
volnameByte, err := RunnerVar.RunStdoutPipe(VolumeReplicaOperator, volStrCmd...)
if err != nil || string(volnameByte) == "" {
glog.Errorf("Unable to get volumes:%v", string(volnameByte))
return []string{}, err
}
noisyVolname := string(volnameByte)
sepNoisyVolName := strings.Split(noisyVolname, "\n")
var volNames []string
for _, volName := range sepNoisyVolName {
volName = strings.TrimSpace(volName)
volNames = append(volNames, volName)
}
return volNames, nil
}
// DeleteVolume deletes the specified volume.
func DeleteVolume(fullVolName string) error {
deleteVolStr := []string{"destroy", fullVolName}
stdoutStderr, err := RunnerVar.RunCombinedOutput(VolumeReplicaOperator, deleteVolStr...)
if err != nil {
glog.Errorf("Unable to delete volume : %v", string(stdoutStderr))
return err
}
return nil
}
| 1 | 9,160 | Is it fine to pass the "-o compression=on" in the middle instead of at the end ? | openebs-maya | go |
@@ -18,14 +18,18 @@ module Bolt
attr_reader :noop, :transports
attr_accessor :run_as
+ # FIXME: There must be a better way
+ # https://makandracards.com/makandra/36011-ruby-do-not-mix-optional-and-keyword-arguments
def initialize(concurrency = 1,
analytics = Bolt::Analytics::NoopClient.new,
noop = nil,
- bundled_content: nil)
+ bundled_content: nil,
+ load_config: true)
@analytics = analytics
@bundled_content = bundled_content
@logger = Logging.logger[self]
@plan_logging = false
+ @load_config = load_config
@transports = Bolt::TRANSPORTS.each_with_object({}) do |(key, val), coll|
coll[key.to_s] = Concurrent::Delay.new do | 1 | # frozen_string_literal: true
# Used for $ERROR_INFO. This *must* be capitalized!
require 'English'
require 'json'
require 'concurrent'
require 'logging'
require 'set'
require 'bolt/analytics'
require 'bolt/result'
require 'bolt/config'
require 'bolt/notifier'
require 'bolt/result_set'
require 'bolt/puppetdb'
module Bolt
class Executor
attr_reader :noop, :transports
attr_accessor :run_as
def initialize(concurrency = 1,
analytics = Bolt::Analytics::NoopClient.new,
noop = nil,
bundled_content: nil)
@analytics = analytics
@bundled_content = bundled_content
@logger = Logging.logger[self]
@plan_logging = false
@transports = Bolt::TRANSPORTS.each_with_object({}) do |(key, val), coll|
coll[key.to_s] = Concurrent::Delay.new do
val.new
end
end
@reported_transports = Set.new
@noop = noop
@run_as = nil
@pool = Concurrent::ThreadPoolExecutor.new(max_threads: concurrency)
@logger.debug { "Started with #{concurrency} max thread(s)" }
@notifier = Bolt::Notifier.new
end
def transport(transport)
impl = @transports[transport || 'ssh']
raise(Bolt::UnknownTransportError, transport) unless impl
# If there was an error creating the transport, ensure it gets thrown
impl.no_error!
impl.value
end
# Starts executing the given block on a list of nodes in parallel, one thread per "batch".
#
# This is the main driver of execution on a list of targets. It first
# groups targets by transport, then divides each group into batches as
# defined by the transport. Yields each batch, along with the corresponding
# transport, to the block in turn and returns an array of result promises.
def queue_execute(targets)
targets.group_by(&:protocol).flat_map do |protocol, protocol_targets|
transport = transport(protocol)
report_transport(transport, protocol_targets.count)
transport.batches(protocol_targets).flat_map do |batch|
batch_promises = Array(batch).each_with_object({}) do |target, h|
h[target] = Concurrent::Promise.new(executor: :immediate)
end
# Pass this argument through to avoid retaining a reference to a
# local variable that will change on the next iteration of the loop.
@pool.post(batch_promises) do |result_promises|
begin
results = yield transport, batch
Array(results).each do |result|
result_promises[result.target].set(result)
end
# NotImplementedError can be thrown if the transport is implemented improperly
rescue StandardError, NotImplementedError => e
result_promises.each do |target, promise|
promise.set(Bolt::Result.from_exception(target, e))
end
ensure
# Make absolutely sure every promise gets a result to avoid a
# deadlock. Use whatever exception is causing this block to
# execute, or generate one if we somehow got here without an
# exception and some promise is still missing a result.
result_promises.each do |target, promise|
next if promise.fulfilled?
error = $ERROR_INFO || Bolt::Error.new("No result was returned for #{target.uri}",
"puppetlabs.bolt/missing-result-error")
promise.set(Bolt::Result.from_exception(target, error))
end
end
end
batch_promises.values
end
end
end
# Create a ResultSet from the results of all promises.
def await_results(promises)
ResultSet.new(promises.map(&:value))
end
# Execute the given block on a list of nodes in parallel, one thread per "batch".
#
# This is the main driver of execution on a list of targets. It first
# groups targets by transport, then divides each group into batches as
# defined by the transport. Each batch, along with the corresponding
# transport, is yielded to the block in turn and the results all collected
# into a single ResultSet.
def batch_execute(targets, &block)
promises = queue_execute(targets, &block)
await_results(promises)
end
def log_action(description, targets)
# When running a plan, info messages like starting a task are promoted to notice.
log_method = @plan_logging ? :notice : :info
target_str = if targets.length > 5
"#{targets.count} targets"
else
targets.map(&:uri).join(', ')
end
@logger.send(log_method, "Starting: #{description} on #{target_str}")
start_time = Time.now
results = yield
duration = Time.now - start_time
failures = results.error_set.length
plural = failures == 1 ? '' : 's'
@logger.send(log_method, "Finished: #{description} with #{failures} failure#{plural} in #{duration.round(2)} sec")
results
end
def log_plan(plan_name)
log_method = @plan_logging ? :notice : :info
@logger.send(log_method, "Starting: plan #{plan_name}")
start_time = Time.now
results = nil
begin
results = yield
ensure
duration = Time.now - start_time
@logger.send(log_method, "Finished: plan #{plan_name} in #{duration.round(2)} sec")
end
results
end
def report_transport(transport, count)
name = transport.class.name.split('::').last.downcase
@analytics&.event('Transport', 'initialize', name, count) unless @reported_transports.include?(name)
@reported_transports.add(name)
end
def report_function_call(function)
@analytics&.event('Plan', 'call_function', function)
end
def report_bundled_content(mode, name)
if @bundled_content&.include?(name)
@analytics&.event('Bundled Content', mode, name)
end
end
def with_node_logging(description, batch)
@logger.info("#{description} on #{batch.map(&:uri)}")
result = yield
@logger.info(result.to_json)
result
end
def run_command(targets, command, options = {}, &callback)
description = options.fetch('_description', "command '#{command}'")
log_action(description, targets) do
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as
results = batch_execute(targets) do |transport, batch|
with_node_logging("Running command '#{command}'", batch) do
transport.batch_command(batch, command, options, ¬ify)
end
end
@notifier.shutdown
results
end
end
def run_script(targets, script, arguments, options = {}, &callback)
description = options.fetch('_description', "script #{script}")
log_action(description, targets) do
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as
results = batch_execute(targets) do |transport, batch|
with_node_logging("Running script #{script} with '#{arguments}'", batch) do
transport.batch_script(batch, script, arguments, options, ¬ify)
end
end
@notifier.shutdown
results
end
end
def run_task(targets, task, arguments, options = {}, &callback)
description = options.fetch('_description', "task #{task.name}")
log_action(description, targets) do
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as
arguments['_task'] = task.name
results = batch_execute(targets) do |transport, batch|
with_node_logging("Running task #{task.name} with '#{arguments}' via #{task.input_method}", batch) do
transport.batch_task(batch, task, arguments, options, ¬ify)
end
end
@notifier.shutdown
results
end
end
def file_upload(targets, source, destination, options = {}, &callback)
description = options.fetch('_description', "file upload from #{source} to #{destination}")
log_action(description, targets) do
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as
results = batch_execute(targets) do |transport, batch|
with_node_logging("Uploading file #{source} to #{destination}", batch) do
transport.batch_upload(batch, source, destination, options, ¬ify)
end
end
@notifier.shutdown
results
end
end
# Plan context doesn't make sense for most transports but it is tightly
# coupled with the orchestrator transport since the transport behaves
# differently when a plan is running. In order to limit how much this
# pollutes the transport API we only handle the orchestrator transport here.
# Since we callt this function without resolving targets this will result
# in the orchestrator transport always being initialized during plan runs.
# For now that's ok.
#
# In the future if other transports need this or if we want a plan stack
# we'll need to refactor.
def start_plan(plan_context)
transport('pcp').plan_context = plan_context
@plan_logging = true
end
def finish_plan(plan_result)
transport('pcp').finish_plan(plan_result)
end
def without_default_logging
old_log = @plan_logging
@plan_logging = false
yield
ensure
@plan_logging = old_log
end
end
end
| 1 | 9,240 | Probably make them all keyword arguments. | puppetlabs-bolt | rb |
@@ -521,11 +521,7 @@ define(["loading", "appRouter", "layoutManager", "connectionManager", "cardBuild
renderDetails(page, item, apiClient, context);
renderTrackSelections(page, instance, item);
- if (dom.getWindowSize().innerWidth >= 1000) {
- backdrop.setBackdrops([item]);
- } else {
- backdrop.clear();
- }
+ backdrop.clear();
renderDetailPageBackdrop(page, item, apiClient);
var canPlay = reloadPlayButtons(page, item); | 1 | define(["loading", "appRouter", "layoutManager", "connectionManager", "cardBuilder", "datetime", "mediaInfo", "backdrop", "listView", "itemContextMenu", "itemHelper", "dom", "indicators", "apphost", "imageLoader", "libraryMenu", "globalize", "browser", "events", "scrollHelper", "playbackManager", "libraryBrowser", "scrollStyles", "emby-itemscontainer", "emby-checkbox", "emby-button", "emby-playstatebutton", "emby-ratingbutton", "emby-scroller", "emby-select"], function (loading, appRouter, layoutManager, connectionManager, cardBuilder, datetime, mediaInfo, backdrop, listView, itemContextMenu, itemHelper, dom, indicators, appHost, imageLoader, libraryMenu, globalize, browser, events, scrollHelper, playbackManager, libraryBrowser) {
"use strict";
function getPromise(apiClient, params) {
var id = params.id;
if (id) {
return apiClient.getItem(apiClient.getCurrentUserId(), id);
}
if (params.seriesTimerId) {
return apiClient.getLiveTvSeriesTimer(params.seriesTimerId);
}
if (params.genre) {
return apiClient.getGenre(params.genre, apiClient.getCurrentUserId());
}
if (params.musicgenre) {
return apiClient.getMusicGenre(params.musicgenre, apiClient.getCurrentUserId());
}
if (params.musicartist) {
return apiClient.getArtist(params.musicartist, apiClient.getCurrentUserId());
}
throw new Error("Invalid request");
}
function hideAll(page, className, show) {
var i;
var length;
var elems = page.querySelectorAll("." + className);
for (i = 0, length = elems.length; i < length; i++) {
if (show) {
elems[i].classList.remove("hide");
} else {
elems[i].classList.add("hide");
}
}
}
function getContextMenuOptions(item, user, button) {
var options = {
item: item,
open: false,
play: false,
playAllFromHere: false,
queueAllFromHere: false,
positionTo: button,
cancelTimer: false,
record: false,
deleteItem: true === item.IsFolder,
shuffle: false,
instantMix: false,
user: user,
share: true
};
return options;
}
function getProgramScheduleHtml(items, options) {
options = options || {};
var html = "";
html += '<div is="emby-itemscontainer" class="itemsContainer vertical-list" data-contextmenu="false">';
html += listView.getListViewHtml({
items: items,
enableUserDataButtons: false,
image: true,
imageSource: "channel",
showProgramDateTime: true,
showChannel: false,
mediaInfo: false,
action: "none",
moreButton: false,
recordButton: false
});
return html += "</div>";
}
function renderSeriesTimerSchedule(page, apiClient, seriesTimerId) {
apiClient.getLiveTvTimers({
UserId: apiClient.getCurrentUserId(),
ImageTypeLimit: 1,
EnableImageTypes: "Primary,Backdrop,Thumb",
SortBy: "StartDate",
EnableTotalRecordCount: false,
EnableUserData: false,
SeriesTimerId: seriesTimerId,
Fields: "ChannelInfo,ChannelImage"
}).then(function (result) {
if (result.Items.length && result.Items[0].SeriesTimerId != seriesTimerId) {
result.Items = [];
}
var html = getProgramScheduleHtml(result.Items);
var scheduleTab = page.querySelector(".seriesTimerSchedule");
scheduleTab.innerHTML = html;
imageLoader.lazyChildren(scheduleTab);
});
}
function renderTimerEditor(page, item, apiClient, user) {
if ("Recording" !== item.Type || !user.Policy.EnableLiveTvManagement || !item.TimerId || "InProgress" !== item.Status) {
return void hideAll(page, "btnCancelTimer");
}
hideAll(page, "btnCancelTimer", true);
}
function renderSeriesTimerEditor(page, item, apiClient, user) {
if ("SeriesTimer" !== item.Type) {
return void hideAll(page, "btnCancelSeriesTimer");
}
if (user.Policy.EnableLiveTvManagement) {
require(["seriesRecordingEditor"], function (seriesRecordingEditor) {
seriesRecordingEditor.embed(item, apiClient.serverId(), {
context: page.querySelector(".seriesRecordingEditor")
});
});
page.querySelector(".seriesTimerScheduleSection").classList.remove("hide");
hideAll(page, "btnCancelSeriesTimer", true);
return void renderSeriesTimerSchedule(page, apiClient, item.Id);
}
page.querySelector(".seriesTimerScheduleSection").classList.add("hide");
return void hideAll(page, "btnCancelSeriesTimer");
}
function renderTrackSelections(page, instance, item, forceReload) {
var select = page.querySelector(".selectSource");
if (!item.MediaSources || !itemHelper.supportsMediaSourceSelection(item) || -1 === playbackManager.getSupportedCommands().indexOf("PlayMediaSource") || !playbackManager.canPlay(item)) {
page.querySelector(".trackSelections").classList.add("hide");
select.innerHTML = "";
page.querySelector(".selectVideo").innerHTML = "";
page.querySelector(".selectAudio").innerHTML = "";
page.querySelector(".selectSubtitles").innerHTML = "";
return;
}
playbackManager.getPlaybackMediaSources(item).then(function (mediaSources) {
instance._currentPlaybackMediaSources = mediaSources;
page.querySelector(".trackSelections").classList.remove("hide");
select.setLabel(globalize.translate("LabelVersion"));
var currentValue = select.value;
var selectedId = mediaSources[0].Id;
select.innerHTML = mediaSources.map(function (v) {
var selected = v.Id === selectedId ? " selected" : "";
return '<option value="' + v.Id + '"' + selected + ">" + v.Name + "</option>";
}).join("");
if (mediaSources.length > 1) {
page.querySelector(".selectSourceContainer").classList.remove("hide");
} else {
page.querySelector(".selectSourceContainer").classList.add("hide");
}
if (select.value !== currentValue || forceReload) {
renderVideoSelections(page, mediaSources);
renderAudioSelections(page, mediaSources);
renderSubtitleSelections(page, mediaSources);
}
});
}
function renderVideoSelections(page, mediaSources) {
var mediaSourceId = page.querySelector(".selectSource").value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return "Video" === m.Type;
});
var select = page.querySelector(".selectVideo");
select.setLabel(globalize.translate("LabelVideo"));
var selectedId = tracks.length ? tracks[0].Index : -1;
select.innerHTML = tracks.map(function (v) {
var selected = v.Index === selectedId ? " selected" : "";
var titleParts = [];
var resolutionText = mediaInfo.getResolutionText(v);
if (resolutionText) {
titleParts.push(resolutionText);
}
if (v.Codec) {
titleParts.push(v.Codec.toUpperCase());
}
return '<option value="' + v.Index + '" ' + selected + ">" + (v.DisplayTitle || titleParts.join(" ")) + "</option>";
}).join("");
select.setAttribute("disabled", "disabled");
if (tracks.length) {
page.querySelector(".selectVideoContainer").classList.remove("hide");
} else {
page.querySelector(".selectVideoContainer").classList.add("hide");
}
}
function renderAudioSelections(page, mediaSources) {
var mediaSourceId = page.querySelector(".selectSource").value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return "Audio" === m.Type;
});
var select = page.querySelector(".selectAudio");
select.setLabel(globalize.translate("LabelAudio"));
var selectedId = mediaSource.DefaultAudioStreamIndex;
select.innerHTML = tracks.map(function (v) {
var selected = v.Index === selectedId ? " selected" : "";
return '<option value="' + v.Index + '" ' + selected + ">" + v.DisplayTitle + "</option>";
}).join("");
if (tracks.length > 1) {
select.removeAttribute("disabled");
} else {
select.setAttribute("disabled", "disabled");
}
if (tracks.length) {
page.querySelector(".selectAudioContainer").classList.remove("hide");
} else {
page.querySelector(".selectAudioContainer").classList.add("hide");
}
}
function renderSubtitleSelections(page, mediaSources) {
var mediaSourceId = page.querySelector(".selectSource").value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return "Subtitle" === m.Type;
});
var select = page.querySelector(".selectSubtitles");
select.setLabel(globalize.translate("LabelSubtitles"));
var selectedId = null == mediaSource.DefaultSubtitleStreamIndex ? -1 : mediaSource.DefaultSubtitleStreamIndex;
if (tracks.length) {
var selected = -1 === selectedId ? " selected" : "";
select.innerHTML = '<option value="-1">' + globalize.translate("Off") + "</option>" + tracks.map(function (v) {
selected = v.Index === selectedId ? " selected" : "";
return '<option value="' + v.Index + '" ' + selected + ">" + v.DisplayTitle + "</option>";
}).join("");
page.querySelector(".selectSubtitlesContainer").classList.remove("hide");
} else {
select.innerHTML = "";
page.querySelector(".selectSubtitlesContainer").classList.add("hide");
}
}
function reloadPlayButtons(page, item) {
var canPlay = false;
if ("Program" == item.Type) {
var now = new Date();
if (now >= datetime.parseISO8601Date(item.StartDate, true) && now < datetime.parseISO8601Date(item.EndDate, true)) {
hideAll(page, "btnPlay", true);
canPlay = true;
} else {
hideAll(page, "btnPlay");
}
hideAll(page, "btnResume");
hideAll(page, "btnInstantMix");
hideAll(page, "btnShuffle");
} else if (playbackManager.canPlay(item)) {
hideAll(page, "btnPlay", true);
var enableInstantMix = -1 !== ["Audio", "MusicAlbum", "MusicGenre", "MusicArtist"].indexOf(item.Type);
hideAll(page, "btnInstantMix", enableInstantMix);
var enableShuffle = item.IsFolder || -1 !== ["MusicAlbum", "MusicGenre", "MusicArtist"].indexOf(item.Type);
hideAll(page, "btnShuffle", enableShuffle);
canPlay = true;
hideAll(page, "btnResume", item.UserData && item.UserData.PlaybackPositionTicks > 0);
} else {
hideAll(page, "btnPlay");
hideAll(page, "btnResume");
hideAll(page, "btnInstantMix");
hideAll(page, "btnShuffle");
}
return canPlay;
}
function reloadUserDataButtons(page, item) {
var i;
var length;
var btnPlaystates = page.querySelectorAll(".btnPlaystate");
for (i = 0, length = btnPlaystates.length; i < length; i++) {
var btnPlaystate = btnPlaystates[i];
if (itemHelper.canMarkPlayed(item)) {
btnPlaystate.classList.remove("hide");
btnPlaystate.setItem(item);
} else {
btnPlaystate.classList.add("hide");
btnPlaystate.setItem(null);
}
}
var btnUserRatings = page.querySelectorAll(".btnUserRating");
for (i = 0, length = btnUserRatings.length; i < length; i++) {
var btnUserRating = btnUserRatings[i];
if (itemHelper.canRate(item)) {
btnUserRating.classList.remove("hide");
btnUserRating.setItem(item);
} else {
btnUserRating.classList.add("hide");
btnUserRating.setItem(null);
}
}
}
function getArtistLinksHtml(artists, serverId, context) {
var html = [];
for (var i = 0, length = artists.length; i < length; i++) {
var artist = artists[i];
var href = appRouter.getRouteUrl(artist, {
context: context,
itemType: "MusicArtist",
serverId: serverId
});
html.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + href + '">' + artist.Name + "</a>");
}
return html = html.join(" / ");
}
function renderName(item, container, isStatic, context) {
var parentRoute;
var parentNameHtml = [];
var parentNameLast = false;
if (item.AlbumArtists) {
parentNameHtml.push(getArtistLinksHtml(item.AlbumArtists, item.ServerId, context));
parentNameLast = true;
} else if (item.ArtistItems && item.ArtistItems.length && "MusicVideo" === item.Type) {
parentNameHtml.push(getArtistLinksHtml(item.ArtistItems, item.ServerId, context));
parentNameLast = true;
} else if (item.SeriesName && "Episode" === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeriesId,
Name: item.SeriesName,
Type: "Series",
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeriesName + "</a>");
} else if (item.IsSeries || item.EpisodeTitle) {
parentNameHtml.push(item.Name);
}
if (item.SeriesName && "Season" === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeriesId,
Name: item.SeriesName,
Type: "Series",
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeriesName + "</a>");
} else if (null != item.ParentIndexNumber && "Episode" === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeasonId,
Name: item.SeasonName,
Type: "Season",
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeasonName + "</a>");
} else if (null != item.ParentIndexNumber && item.IsSeries) {
parentNameHtml.push(item.SeasonName || "S" + item.ParentIndexNumber);
} else if (item.Album && item.AlbumId && ("MusicVideo" === item.Type || "Audio" === item.Type)) {
parentRoute = appRouter.getRouteUrl({
Id: item.AlbumId,
Name: item.Album,
Type: "MusicAlbum",
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.Album + "</a>");
} else if (item.Album) {
parentNameHtml.push(item.Album);
}
var html = "";
if (parentNameHtml.length) {
if (parentNameLast) {
html = '<h3 class="parentName" style="margin: .25em 0;">' + parentNameHtml.join(" - ") + "</h3>";
} else {
html = '<h1 class="parentName" style="margin: .1em 0 .25em;">' + parentNameHtml.join(" - ") + "</h1>";
}
}
var name = itemHelper.getDisplayName(item, {
includeParentInfo: false
});
var offset = parentNameLast ? ".25em" : ".5em";
if (html && !parentNameLast) {
html += '<h3 class="itemName" style="margin: .25em 0 .5em;">' + name + '</h3>';
} else {
html = '<h1 class="itemName" style="margin: .1em 0 ' + offset + ';">' + name + "</h1>" + html;
}
if (item.OriginalTitle && item.OriginalTitle != item.Name) {
html += '<h4 class="itemName" style="margin: -' + offset + ' 0 0">' + item.OriginalTitle + '</h4>';
}
container.innerHTML = html;
if (html.length) {
container.classList.remove("hide");
} else {
container.classList.add("hide");
}
}
function setTrailerButtonVisibility(page, item) {
if ((item.LocalTrailerCount || item.RemoteTrailers && item.RemoteTrailers.length) && -1 !== playbackManager.getSupportedCommands().indexOf("PlayTrailers")) {
hideAll(page, "btnPlayTrailer", true);
} else {
hideAll(page, "btnPlayTrailer");
}
}
function renderDetailPageBackdrop(page, item, apiClient) {
var imgUrl;
var screenWidth = screen.availWidth;
var hasbackdrop = false;
var itemBackdropElement = page.querySelector("#itemBackdrop");
var usePrimaryImage = item.MediaType === "Video" && item.Type !== "Movie" && item.Type !== "Trailer" ||
item.MediaType && item.MediaType !== "Video" ||
item.Type === "MusicAlbum" ||
item.Type === "MusicArtist";
if ("Program" === item.Type && item.ImageTags && item.ImageTags.Thumb) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: "Thumb",
index: 0,
tag: item.ImageTags.Thumb
});
itemBackdropElement.classList.remove("noBackdrop");
imageLoader.lazyImage(itemBackdropElement, imgUrl, false);
hasbackdrop = true;
} else if (usePrimaryImage && item.ImageTags && item.ImageTags.Primary) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: "Primary",
index: 0,
tag: item.ImageTags.Primary
});
itemBackdropElement.classList.remove("noBackdrop");
imageLoader.lazyImage(itemBackdropElement, imgUrl, false);
hasbackdrop = true;
} else if (item.BackdropImageTags && item.BackdropImageTags.length) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: "Backdrop",
index: 0,
tag: item.BackdropImageTags[0]
});
itemBackdropElement.classList.remove("noBackdrop");
imageLoader.lazyImage(itemBackdropElement, imgUrl, false);
hasbackdrop = true;
} else if (item.ParentBackdropItemId && item.ParentBackdropImageTags && item.ParentBackdropImageTags.length) {
imgUrl = apiClient.getScaledImageUrl(item.ParentBackdropItemId, {
type: "Backdrop",
index: 0,
tag: item.ParentBackdropImageTags[0]
});
itemBackdropElement.classList.remove("noBackdrop");
imageLoader.lazyImage(itemBackdropElement, imgUrl, false);
hasbackdrop = true;
} else if (item.ImageTags && item.ImageTags.Thumb) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: "Thumb",
index: 0,
tag: item.ImageTags.Thumb
});
itemBackdropElement.classList.remove("noBackdrop");
imageLoader.lazyImage(itemBackdropElement, imgUrl, false);
hasbackdrop = true;
} else {
itemBackdropElement.classList.add("noBackdrop");
itemBackdropElement.style.backgroundImage = "";
}
return hasbackdrop;
}
function reloadFromItem(instance, page, params, item, user) {
var context = params.context;
renderName(item, page.querySelector(".nameContainer"), false, context);
var apiClient = connectionManager.getApiClient(item.ServerId);
renderSeriesTimerEditor(page, item, apiClient, user);
renderTimerEditor(page, item, apiClient, user);
renderImage(page, item, apiClient, user);
renderLogo(page, item, apiClient);
setTitle(item, apiClient);
setInitialCollapsibleState(page, item, apiClient, context, user);
renderDetails(page, item, apiClient, context);
renderTrackSelections(page, instance, item);
if (dom.getWindowSize().innerWidth >= 1000) {
backdrop.setBackdrops([item]);
} else {
backdrop.clear();
}
renderDetailPageBackdrop(page, item, apiClient);
var canPlay = reloadPlayButtons(page, item);
if ((item.LocalTrailerCount || item.RemoteTrailers && item.RemoteTrailers.length) && -1 !== playbackManager.getSupportedCommands().indexOf("PlayTrailers")) {
hideAll(page, "btnPlayTrailer", true);
} else {
hideAll(page, "btnPlayTrailer");
}
setTrailerButtonVisibility(page, item);
if (item.CanDelete && !item.IsFolder) {
hideAll(page, "btnDeleteItem", true);
} else {
hideAll(page, "btnDeleteItem");
}
if ("Program" !== item.Type || canPlay) {
hideAll(page, "mainDetailButtons", true);
} else {
hideAll(page, "mainDetailButtons");
}
showRecordingFields(instance, page, item, user);
var groupedVersions = (item.MediaSources || []).filter(function (g) {
return "Grouping" == g.Type;
});
if (user.Policy.IsAdministrator && groupedVersions.length) {
page.querySelector(".btnSplitVersions").classList.remove("hide");
} else {
page.querySelector(".btnSplitVersions").classList.add("hide");
}
if (itemContextMenu.getCommands(getContextMenuOptions(item, user)).length) {
hideAll(page, "btnMoreCommands", true);
} else {
hideAll(page, "btnMoreCommands");
}
var itemBirthday = page.querySelector("#itemBirthday");
if ("Person" == item.Type && item.PremiereDate) {
try {
var birthday = datetime.parseISO8601Date(item.PremiereDate, true).toDateString();
itemBirthday.classList.remove("hide");
itemBirthday.innerHTML = globalize.translate("BirthDateValue").replace("{0}", birthday);
} catch (err) {
itemBirthday.classList.add("hide");
}
} else {
itemBirthday.classList.add("hide");
}
var itemDeathDate = page.querySelector("#itemDeathDate");
if ("Person" == item.Type && item.EndDate) {
try {
var deathday = datetime.parseISO8601Date(item.EndDate, true).toDateString();
itemDeathDate.classList.remove("hide");
itemDeathDate.innerHTML = globalize.translate("DeathDateValue").replace("{0}", deathday);
} catch (err) {
itemDeathDate.classList.add("hide");
}
} else {
itemDeathDate.classList.add("hide");
}
var itemBirthLocation = page.querySelector("#itemBirthLocation");
if ("Person" == item.Type && item.ProductionLocations && item.ProductionLocations.length) {
var gmap = '<a is="emby-linkbutton" class="button-link textlink" target="_blank" href="https://maps.google.com/maps?q=' + item.ProductionLocations[0] + '">' + item.ProductionLocations[0] + "</a>";
itemBirthLocation.classList.remove("hide");
itemBirthLocation.innerHTML = globalize.translate("BirthPlaceValue").replace("{0}", gmap);
} else {
itemBirthLocation.classList.add("hide");
}
setPeopleHeader(page, item);
loading.hide();
if (item.Type === "Book") {
hideAll(page, "btnDownload", true);
}
try {
require(["focusManager"], function (focusManager) {
[".btnResume", ".btnPlay"].every(function (cls) {
var elems = page.querySelectorAll(cls);
for (var i = 0; i < elems.length; i++) {
if (focusManager.isCurrentlyFocusable(elems[i])) {
focusManager.focus(elems[i]);
return false;
}
}
return true;
});
});
} catch (e) {
console.log(e);
}
}
function logoImageUrl(item, apiClient, options) {
options = options || {};
options.type = "Logo";
if (item.ImageTags && item.ImageTags.Logo) {
options.tag = item.ImageTags.Logo;
return apiClient.getScaledImageUrl(item.Id, options);
}
if (item.ParentLogoImageTag) {
options.tag = item.ParentLogoImageTag;
return apiClient.getScaledImageUrl(item.ParentLogoItemId, options);
}
return null;
}
function setTitle(item, apiClient) {
var url = logoImageUrl(item, apiClient, {});
if (url = null) {
var pageTitle = document.querySelector(".pageTitle");
pageTitle.style.backgroundImage = "url('" + url + "')";
pageTitle.classList.add("pageTitleWithLogo");
pageTitle.innerHTML = "";
} else {
Emby.Page.setTitle("");
}
}
function renderLogo(page, item, apiClient) {
var url = logoImageUrl(item, apiClient, {
maxWidth: 400
});
var detailLogo = page.querySelector(".detailLogo");
if (url) {
detailLogo.classList.remove("hide");
detailLogo.classList.add("lazy");
detailLogo.setAttribute("data-src", url);
imageLoader.lazyImage(detailLogo);
} else {
detailLogo.classList.add("hide");
}
}
function showRecordingFields(instance, page, item, user) {
if (!instance.currentRecordingFields) {
var recordingFieldsElement = page.querySelector(".recordingFields");
if ("Program" == item.Type && user.Policy.EnableLiveTvManagement) {
require(["recordingFields"], function (recordingFields) {
instance.currentRecordingFields = new recordingFields({
parent: recordingFieldsElement,
programId: item.Id,
serverId: item.ServerId
});
recordingFieldsElement.classList.remove("hide");
});
} else {
recordingFieldsElement.classList.add("hide");
recordingFieldsElement.innerHTML = "";
}
}
}
function renderUserInfo(page, item) {
var lastPlayedElement = page.querySelector(".itemLastPlayed");
if (item.UserData && item.UserData.LastPlayedDate) {
lastPlayedElement.classList.remove("hide");
var datePlayed = datetime.parseISO8601Date(item.UserData.LastPlayedDate);
lastPlayedElement.innerHTML = globalize.translate("DatePlayed") + ": " + datetime.toLocaleDateString(datePlayed) + " " + datetime.getDisplayTime(datePlayed);
} else {
lastPlayedElement.classList.add("hide");
}
}
function renderLinks(linksElem, item) {
var html = [];
if (item.DateCreated && itemHelper.enableDateAddedDisplay(item)) {
var dateCreated = datetime.parseISO8601Date(item.DateCreated);
html.push(globalize.translate("AddedOnValue", datetime.toLocaleDateString(dateCreated) + " " + datetime.getDisplayTime(dateCreated)));
}
var links = [];
if (!layoutManager.tv && item.HomePageUrl) {
links.push('<a style="color:inherit;" is="emby-linkbutton" class="button-link" href="' + item.HomePageUrl + '" target="_blank">' + globalize.translate("ButtonWebsite") + "</a>");
}
if (item.ExternalUrls) {
for (var i = 0, length = item.ExternalUrls.length; i < length; i++) {
var url = item.ExternalUrls[i];
links.push('<a style="color:inherit;" is="emby-linkbutton" class="button-link" href="' + url.Url + '" target="_blank">' + url.Name + "</a>");
}
}
if (links.length) {
html.push(globalize.translate("LinksValue", links.join(", ")));
}
linksElem.innerHTML = html.join(", ");
if (html.length) {
linksElem.classList.remove("hide");
} else {
linksElem.classList.add("hide");
}
}
function renderDetailImage(page, elem, item, apiClient, editable, imageLoader, indicators) {
if ("SeriesTimer" === item.Type || "Program" === item.Type) {
editable = false;
}
if ("Person" !== item.Type) {
elem.classList.add("detailimg-hidemobile");
page.querySelector(".detailPageContent").classList.add("detailPageContent-nodetailimg");
} else {
page.querySelector(".detailPageContent").classList.remove("detailPageContent-nodetailimg");
}
var imageTags = item.ImageTags || {};
if (item.PrimaryImageTag) {
imageTags.Primary = item.PrimaryImageTag;
}
var url;
var html = "";
var shape = "portrait";
var detectRatio = false;
if (imageTags.Primary) {
url = apiClient.getScaledImageUrl(item.Id, {
type: "Primary",
tag: item.ImageTags.Primary
});
detectRatio = true;
} else if (item.BackdropImageTags && item.BackdropImageTags.length) {
url = apiClient.getScaledImageUrl(item.Id, {
type: "Backdrop",
tag: item.BackdropImageTags[0]
});
shape = "thumb";
} else if (imageTags.Thumb) {
url = apiClient.getScaledImageUrl(item.Id, {
type: "Thumb",
tag: item.ImageTags.Thumb
});
shape = "thumb";
} else if (imageTags.Disc) {
url = apiClient.getScaledImageUrl(item.Id, {
type: "Disc",
tag: item.ImageTags.Disc
});
shape = "square";
} else if (item.AlbumId && item.AlbumPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.AlbumId, {
type: "Primary",
tag: item.AlbumPrimaryImageTag
});
shape = "square";
} else if (item.SeriesId && item.SeriesPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.SeriesId, {
type: "Primary",
tag: item.SeriesPrimaryImageTag
});
} else if (item.ParentPrimaryImageItemId && item.ParentPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.ParentPrimaryImageItemId, {
type: "Primary",
tag: item.ParentPrimaryImageTag
});
}
html += '<div style="position:relative;">';
if (editable) {
html += "<a class='itemDetailGalleryLink' is='emby-linkbutton' style='display:block;padding:2px;margin:0;' href='#'>";
}
if (detectRatio && item.PrimaryImageAspectRatio) {
if (item.PrimaryImageAspectRatio >= 1.48) {
shape = "thumb";
} else if (item.PrimaryImageAspectRatio >= 0.85 && item.PrimaryImageAspectRatio <= 1.34) {
shape = "square";
}
}
html += "<img class='itemDetailImage lazy' src='data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=' />";
if (editable) {
html += "</a>";
}
var progressHtml = item.IsFolder || !item.UserData ? "" : indicators.getProgressBarHtml(item);
html += '<div class="detailImageProgressContainer">';
if (progressHtml) {
html += progressHtml;
}
html += "</div>";
html += "</div>";
elem.innerHTML = html;
if ("thumb" == shape) {
elem.classList.add("thumbDetailImageContainer");
elem.classList.remove("portraitDetailImageContainer");
elem.classList.remove("squareDetailImageContainer");
} else if ("square" == shape) {
elem.classList.remove("thumbDetailImageContainer");
elem.classList.remove("portraitDetailImageContainer");
elem.classList.add("squareDetailImageContainer");
} else {
elem.classList.remove("thumbDetailImageContainer");
elem.classList.add("portraitDetailImageContainer");
elem.classList.remove("squareDetailImageContainer");
}
if (url) {
imageLoader.lazyImage(elem.querySelector("img"), url);
}
}
function renderImage(page, item, apiClient, user) {
renderDetailImage(
page,
page.querySelector(".detailImageContainer"),
item,
apiClient,
user.Policy.IsAdministrator && "Photo" != item.MediaType,
imageLoader,
indicators
);
}
function refreshDetailImageUserData(elem, item) {
elem.querySelector(".detailImageProgressContainer").innerHTML = indicators.getProgressBarHtml(item);
}
function refreshImage(page, item, user) {
refreshDetailImageUserData(page.querySelector(".detailImageContainer"), item);
}
function setPeopleHeader(page, item) {
if ("Audio" == item.MediaType || "MusicAlbum" == item.Type || "Book" == item.MediaType || "Photo" == item.MediaType) {
page.querySelector("#peopleHeader").innerHTML = globalize.translate("HeaderPeople");
} else {
page.querySelector("#peopleHeader").innerHTML = globalize.translate("HeaderCastAndCrew");
}
}
function renderNextUp(page, item, user) {
var section = page.querySelector(".nextUpSection");
if ("Series" != item.Type) {
return void section.classList.add("hide");
}
connectionManager.getApiClient(item.ServerId).getNextUpEpisodes({
SeriesId: item.Id,
UserId: user.Id
}).then(function (result) {
if (result.Items.length) {
section.classList.remove("hide");
} else {
section.classList.add("hide");
}
var html = cardBuilder.getCardsHtml({
items: result.Items,
shape: getThumbShape(false),
showTitle: true,
displayAsSpecial: "Season" == item.Type && item.IndexNumber,
overlayText: false,
centerText: true,
overlayPlayButton: true
});
var itemsContainer = section.querySelector(".nextUpItems");
itemsContainer.innerHTML = html;
imageLoader.lazyChildren(itemsContainer);
});
}
function setInitialCollapsibleState(page, item, apiClient, context, user) {
page.querySelector(".collectionItems").innerHTML = "";
if ("Playlist" == item.Type) {
page.querySelector("#childrenCollapsible").classList.remove("hide");
renderPlaylistItems(page, item, user);
} else if ("Studio" == item.Type || "Person" == item.Type || "Genre" == item.Type || "MusicGenre" == item.Type || "MusicArtist" == item.Type) {
page.querySelector("#childrenCollapsible").classList.remove("hide");
renderItemsByName(page, item, user);
} else if (item.IsFolder) {
if ("BoxSet" == item.Type) {
page.querySelector("#childrenCollapsible").classList.add("hide");
}
renderChildren(page, item);
} else {
page.querySelector("#childrenCollapsible").classList.add("hide");
}
if ("Series" == item.Type) {
renderSeriesSchedule(page, item, user);
renderNextUp(page, item, user);
} else {
page.querySelector(".nextUpSection").classList.add("hide");
}
renderScenes(page, item);
if (item.SpecialFeatureCount && 0 != item.SpecialFeatureCount && "Series" != item.Type) {
page.querySelector("#specialsCollapsible").classList.remove("hide");
renderSpecials(page, item, user, 6);
} else {
page.querySelector("#specialsCollapsible").classList.add("hide");
}
renderCast(page, item, context, enableScrollX() ? null : 12);
if (item.PartCount && item.PartCount > 1) {
page.querySelector("#additionalPartsCollapsible").classList.remove("hide");
renderAdditionalParts(page, item, user);
} else {
page.querySelector("#additionalPartsCollapsible").classList.add("hide");
}
if ("MusicAlbum" == item.Type) {
renderMusicVideos(page, item, user);
} else {
page.querySelector("#musicVideosCollapsible").classList.add("hide");
}
}
function renderOverview(elems, item) {
for (var i = 0, length = elems.length; i < length; i++) {
var elem = elems[i];
var overview = item.Overview || "";
if (overview) {
elem.innerHTML = overview;
elem.classList.remove("hide");
var anchors = elem.querySelectorAll("a");
for (var j = 0, length2 = anchors.length; j < length2; j++) {
anchors[j].setAttribute("target", "_blank");
}
} else {
elem.innerHTML = "";
elem.classList.add("hide");
}
}
}
function renderGenres(page, item, apiClient, context, isStatic) {
context = context || inferContext(item);
var type;
var genres = item.GenreItems || [];
switch (context) {
case "music":
type = "MusicGenre";
break;
default:
type = "Genre";
}
var html = genres.map(function (p) {
return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({
Name: p.Name,
Type: type,
ServerId: item.ServerId,
Id: p.Id
}, {
context: context
}) + '">' + p.Name + "</a>";
}).join(", ");
var elem = page.querySelector(".genres");
elem.innerHTML = globalize.translate(genres.length > 1 ? "GenresValue" : "GenreValue", html);
if (genres.length) {
elem.classList.remove("hide");
} else {
elem.classList.add("hide");
}
}
function renderDirector(page, item, apiClient, context, isStatic) {
var directors = (item.People || []).filter(function (p) {
return "Director" === p.Type;
});
var html = directors.map(function (p) {
return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({
Name: p.Name,
Type: "Person",
ServerId: item.ServerId,
Id: p.Id
}, {
context: context
}) + '">' + p.Name + "</a>";
}).join(", ");
var elem = page.querySelector(".directors");
elem.innerHTML = globalize.translate(directors.length > 1 ? "DirectorsValue" : "DirectorValue", html);
if (directors.length) {
elem.classList.remove("hide");
} else {
elem.classList.add("hide");
}
}
function renderDetails(page, item, apiClient, context, isStatic) {
renderSimilarItems(page, item, context);
renderMoreFromSeason(page, item, apiClient);
renderMoreFromArtist(page, item, apiClient);
renderDirector(page, item, apiClient, context, isStatic);
renderGenres(page, item, apiClient, context, isStatic);
renderChannelGuide(page, apiClient, item);
var taglineElement = page.querySelector(".tagline");
if (item.Taglines && item.Taglines.length) {
taglineElement.classList.remove("hide");
taglineElement.innerHTML = item.Taglines[0];
} else {
taglineElement.classList.add("hide");
}
var overview = page.querySelector(".overview");
var externalLinksElem = page.querySelector(".itemExternalLinks");
if ("Season" !== item.Type && "MusicAlbum" !== item.Type && "MusicArtist" !== item.Type) {
overview.classList.add("detailsHiddenOnMobile");
externalLinksElem.classList.add("detailsHiddenOnMobile");
}
renderOverview([overview], item);
var i;
var itemMiscInfo;
itemMiscInfo = page.querySelectorAll(".itemMiscInfo-primary");
for (i = 0; i < itemMiscInfo.length; i++) {
mediaInfo.fillPrimaryMediaInfo(itemMiscInfo[i], item, {
interactive: true,
episodeTitle: false,
subtitles: false
});
if (itemMiscInfo[i].innerHTML && "SeriesTimer" !== item.Type) {
itemMiscInfo[i].classList.remove("hide");
} else {
itemMiscInfo[i].classList.add("hide");
}
}
itemMiscInfo = page.querySelectorAll(".itemMiscInfo-secondary");
for (i = 0; i < itemMiscInfo.length; i++) {
mediaInfo.fillSecondaryMediaInfo(itemMiscInfo[i], item, {
interactive: true
});
if (itemMiscInfo[i].innerHTML && "SeriesTimer" !== item.Type) {
itemMiscInfo[i].classList.remove("hide");
} else {
itemMiscInfo[i].classList.add("hide");
}
}
reloadUserDataButtons(page, item);
renderLinks(externalLinksElem, item);
renderUserInfo(page, item);
renderTags(page, item);
renderSeriesAirTime(page, item, isStatic);
}
function enableScrollX() {
return browser.mobile && screen.availWidth <= 1000;
}
function getPortraitShape(scrollX) {
if (null == scrollX) {
scrollX = enableScrollX();
}
return scrollX ? "overflowPortrait" : "portrait";
}
function getSquareShape(scrollX) {
if (null == scrollX) {
scrollX = enableScrollX();
}
return scrollX ? "overflowSquare" : "square";
}
function getThumbShape(scrollX) {
if (null == scrollX) {
scrollX = enableScrollX();
}
return scrollX ? "overflowBackdrop" : "backdrop";
}
function renderMoreFromSeason(view, item, apiClient) {
var section = view.querySelector(".moreFromSeasonSection");
if (section) {
if ("Episode" !== item.Type || !item.SeasonId || !item.SeriesId) {
return void section.classList.add("hide");
}
var userId = apiClient.getCurrentUserId();
apiClient.getEpisodes(item.SeriesId, {
SeasonId: item.SeasonId,
UserId: userId,
Fields: "ItemCounts,PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount"
}).then(function (result) {
if (result.Items.length < 2) {
return void section.classList.add("hide");
}
section.classList.remove("hide");
section.querySelector("h2").innerHTML = globalize.translate("MoreFromValue", item.SeasonName);
var itemsContainer = section.querySelector(".itemsContainer");
cardBuilder.buildCards(result.Items, {
parentContainer: section,
itemsContainer: itemsContainer,
shape: "autooverflow",
sectionTitleTagName: "h2",
scalable: true,
showTitle: true,
overlayText: false,
centerText: true,
includeParentInfoInTitle: false,
allowBottomPadding: false
});
var card = itemsContainer.querySelector('.card[data-id="' + item.Id + '"]');
if (card) {
setTimeout(function () {
section.querySelector(".emby-scroller").toStart(card.previousSibling || card, true);
}, 100);
}
});
}
}
function renderMoreFromArtist(view, item, apiClient) {
var section = view.querySelector(".moreFromArtistSection");
if (section) {
if ("MusicArtist" === item.Type) {
if (!apiClient.isMinServerVersion("3.4.1.19")) {
return void section.classList.add("hide");
}
} else if ("MusicAlbum" !== item.Type || !item.AlbumArtists || !item.AlbumArtists.length) {
return void section.classList.add("hide");
}
var query = {
IncludeItemTypes: "MusicAlbum",
Recursive: true,
ExcludeItemIds: item.Id,
SortBy: "ProductionYear,SortName",
SortOrder: "Descending"
};
if ("MusicArtist" === item.Type) {
query.ContributingArtistIds = item.Id;
} else if (apiClient.isMinServerVersion("3.4.1.18")) {
query.AlbumArtistIds = item.AlbumArtists[0].Id;
} else {
query.ArtistIds = item.AlbumArtists[0].Id;
}
apiClient.getItems(apiClient.getCurrentUserId(), query).then(function (result) {
if (!result.Items.length) {
return void section.classList.add("hide");
}
section.classList.remove("hide");
if ("MusicArtist" === item.Type) {
section.querySelector("h2").innerHTML = globalize.translate("HeaderAppearsOn");
} else {
section.querySelector("h2").innerHTML = globalize.translate("MoreFromValue", item.AlbumArtists[0].Name);
}
cardBuilder.buildCards(result.Items, {
parentContainer: section,
itemsContainer: section.querySelector(".itemsContainer"),
shape: "autooverflow",
sectionTitleTagName: "h2",
scalable: true,
coverImage: "MusicArtist" === item.Type || "MusicAlbum" === item.Type,
showTitle: true,
showParentTitle: false,
centerText: true,
overlayText: false,
overlayPlayButton: true,
showYear: true
});
});
}
}
function renderSimilarItems(page, item, context) {
var similarCollapsible = page.querySelector("#similarCollapsible");
if (similarCollapsible) {
if ("Movie" != item.Type && "Trailer" != item.Type && "Series" != item.Type && "Program" != item.Type && "Recording" != item.Type && "MusicAlbum" != item.Type && "MusicArtist" != item.Type && "Playlist" != item.Type) {
return void similarCollapsible.classList.add("hide");
}
similarCollapsible.classList.remove("hide");
var apiClient = connectionManager.getApiClient(item.ServerId);
var options = {
userId: apiClient.getCurrentUserId(),
limit: 12,
fields: "PrimaryImageAspectRatio,UserData,CanDelete"
};
if ("MusicAlbum" == item.Type && item.AlbumArtists && item.AlbumArtists.length) {
options.ExcludeArtistIds = item.AlbumArtists[0].Id;
}
apiClient.getSimilarItems(item.Id, options).then(function (result) {
if (!result.Items.length) {
return void similarCollapsible.classList.add("hide");
}
similarCollapsible.classList.remove("hide");
var html = "";
html += cardBuilder.getCardsHtml({
items: result.Items,
shape: "autooverflow",
showParentTitle: "MusicAlbum" == item.Type,
centerText: true,
showTitle: true,
context: context,
lazy: true,
showDetailsMenu: true,
coverImage: "MusicAlbum" == item.Type || "MusicArtist" == item.Type,
overlayPlayButton: true,
overlayText: false,
showYear: "Movie" === item.Type || "Trailer" === item.Type
});
var similarContent = similarCollapsible.querySelector(".similarContent");
similarContent.innerHTML = html;
imageLoader.lazyChildren(similarContent);
});
}
}
function renderSeriesAirTime(page, item, isStatic) {
var seriesAirTime = page.querySelector("#seriesAirTime");
if ("Series" != item.Type) {
seriesAirTime.classList.add("hide");
return;
}
var html = "";
if (item.AirDays && item.AirDays.length) {
if (7 == item.AirDays.length) {
html += "daily";
} else {
html += item.AirDays.map(function (a) {
return a + "s";
}).join(",");
}
}
if (item.AirTime) {
html += " at " + item.AirTime;
}
if (item.Studios.length) {
if (isStatic) {
html += " on " + item.Studios[0].Name;
} else {
var context = inferContext(item);
var href = appRouter.getRouteUrl(item.Studios[0], {
context: context,
itemType: "Studio",
serverId: item.ServerId
});
html += ' on <a class="textlink button-link" is="emby-linkbutton" href="' + href + '">' + item.Studios[0].Name + "</a>";
}
}
if (html) {
html = ("Ended" == item.Status ? "Aired " : "Airs ") + html;
seriesAirTime.innerHTML = html;
seriesAirTime.classList.remove("hide");
} else {
seriesAirTime.classList.add("hide");
}
}
function renderTags(page, item) {
var itemTags = page.querySelector(".itemTags");
var tagElements = [];
var tags = item.Tags || [];
if ("Program" === item.Type) {
tags = [];
}
for (var i = 0, length = tags.length; i < length; i++) {
tagElements.push(tags[i]);
}
if (tagElements.length) {
itemTags.innerHTML = globalize.translate("TagsValue", tagElements.join(", "));
itemTags.classList.remove("hide");
} else {
itemTags.innerHTML = "";
itemTags.classList.add("hide");
}
}
function renderChildren(page, item) {
var fields = "ItemCounts,PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount";
var query = {
ParentId: item.Id,
Fields: fields
};
if ("BoxSet" !== item.Type) {
query.SortBy = "SortName";
}
var promise;
var apiClient = connectionManager.getApiClient(item.ServerId);
var userId = apiClient.getCurrentUserId();
if ("Series" == item.Type) {
promise = apiClient.getSeasons(item.Id, {
userId: userId,
Fields: fields
});
} else if ("Season" == item.Type) {
fields += ",Overview";
promise = apiClient.getEpisodes(item.SeriesId, {
seasonId: item.Id,
userId: userId,
Fields: fields
});
} else if ("MusicArtist" == item.Type) {
query.SortBy = "ProductionYear,SortName";
}
promise = promise || apiClient.getItems(apiClient.getCurrentUserId(), query);
promise.then(function (result) {
var html = "";
var scrollX = false;
var isList = false;
var childrenItemsContainer = page.querySelector(".childrenItemsContainer");
if ("MusicAlbum" == item.Type) {
html = listView.getListViewHtml({
items: result.Items,
smallIcon: true,
showIndex: true,
index: "disc",
showIndexNumberLeft: true,
playFromHere: true,
action: "playallfromhere",
image: false,
artist: "auto",
containerAlbumArtists: item.AlbumArtists,
addToListButton: true
});
isList = true;
} else if ("Series" == item.Type) {
scrollX = enableScrollX();
html = cardBuilder.getCardsHtml({
items: result.Items,
shape: getPortraitShape(),
showTitle: true,
centerText: true,
lazy: true,
overlayPlayButton: true,
allowBottomPadding: !scrollX
});
} else if ("Season" == item.Type || "Episode" == item.Type) {
if ("Episode" !== item.Type) {
isList = true;
}
scrollX = "Episode" == item.Type;
if (result.Items.length < 2 && "Episode" === item.Type) {
return;
}
if ("Episode" === item.Type) {
html = cardBuilder.getCardsHtml({
items: result.Items,
shape: getThumbShape(scrollX),
showTitle: true,
displayAsSpecial: "Season" == item.Type && item.IndexNumber,
playFromHere: true,
overlayText: true,
lazy: true,
showDetailsMenu: true,
overlayPlayButton: true,
allowBottomPadding: !scrollX,
includeParentInfoInTitle: false
});
} else if ("Season" === item.Type) {
html = listView.getListViewHtml({
items: result.Items,
showIndexNumber: false,
enableOverview: true,
imageSize: "large",
enableSideMediaInfo: false,
highlight: false,
action: layoutManager.tv ? "resume" : "none",
infoButton: true,
imagePlayButton: true,
includeParentInfoInTitle: false
});
}
}
if ("BoxSet" !== item.Type) {
page.querySelector("#childrenCollapsible").classList.remove("hide");
}
if (scrollX) {
childrenItemsContainer.classList.add("scrollX");
childrenItemsContainer.classList.add("hiddenScrollX");
childrenItemsContainer.classList.remove("vertical-wrap");
childrenItemsContainer.classList.remove("vertical-list");
} else {
childrenItemsContainer.classList.remove("scrollX");
childrenItemsContainer.classList.remove("hiddenScrollX");
childrenItemsContainer.classList.remove("smoothScrollX");
if (isList) {
childrenItemsContainer.classList.add("vertical-list");
childrenItemsContainer.classList.remove("vertical-wrap");
} else {
childrenItemsContainer.classList.add("vertical-wrap");
childrenItemsContainer.classList.remove("vertical-list");
}
}
childrenItemsContainer.innerHTML = html;
imageLoader.lazyChildren(childrenItemsContainer);
if ("BoxSet" == item.Type) {
var collectionItemTypes = [{
name: globalize.translate("HeaderVideos"),
mediaType: "Video"
}, {
name: globalize.translate("HeaderSeries"),
type: "Series"
}, {
name: globalize.translate("HeaderAlbums"),
type: "MusicAlbum"
}, {
name: globalize.translate("HeaderBooks"),
type: "Book"
}];
renderCollectionItems(page, item, collectionItemTypes, result.Items);
}
});
if ("Season" == item.Type) {
page.querySelector("#childrenTitle").innerHTML = globalize.translate("HeaderEpisodes");
} else if ("Series" == item.Type) {
page.querySelector("#childrenTitle").innerHTML = globalize.translate("HeaderSeasons");
} else if ("MusicAlbum" == item.Type) {
page.querySelector("#childrenTitle").innerHTML = globalize.translate("HeaderTracks");
} else {
page.querySelector("#childrenTitle").innerHTML = globalize.translate("HeaderItems");
}
if ("MusicAlbum" == item.Type || "Season" == item.Type) {
page.querySelector(".childrenSectionHeader").classList.add("hide");
page.querySelector("#childrenCollapsible").classList.add("verticalSection-extrabottompadding");
} else {
page.querySelector(".childrenSectionHeader").classList.remove("hide");
}
}
function renderItemsByName(page, item, user) {
require("scripts/itembynamedetailpage".split(","), function () {
window.ItemsByName.renderItems(page, item);
});
}
function renderPlaylistItems(page, item, user) {
require("scripts/playlistedit".split(","), function () {
PlaylistViewer.render(page, item);
});
}
function renderProgramsForChannel(page, result) {
var html = "";
var currentItems = [];
var currentStartDate = null;
for (var i = 0, length = result.Items.length; i < length; i++) {
var item = result.Items[i];
var itemStartDate = datetime.parseISO8601Date(item.StartDate);
if (!(currentStartDate && currentStartDate.toDateString() === itemStartDate.toDateString())) {
if (currentItems.length) {
html += '<div class="verticalSection verticalDetailSection">';
html += '<h2 class="sectionTitle padded-left">' + datetime.toLocaleDateString(currentStartDate, {
weekday: "long",
month: "long",
day: "numeric"
}) + "</h2>";
html += '<div is="emby-itemscontainer" class="vertical-list padded-left padded-right">' + listView.getListViewHtml({
items: currentItems,
enableUserDataButtons: false,
showParentTitle: true,
image: false,
showProgramTime: true,
mediaInfo: false,
parentTitleWithTitle: true
}) + "</div></div>";
}
currentStartDate = itemStartDate;
currentItems = [];
}
currentItems.push(item);
}
if (currentItems.length) {
html += '<div class="verticalSection verticalDetailSection">';
html += '<h2 class="sectionTitle padded-left">' + datetime.toLocaleDateString(currentStartDate, {
weekday: "long",
month: "long",
day: "numeric"
}) + "</h2>";
html += '<div is="emby-itemscontainer" class="vertical-list padded-left padded-right">' + listView.getListViewHtml({
items: currentItems,
enableUserDataButtons: false,
showParentTitle: true,
image: false,
showProgramTime: true,
mediaInfo: false,
parentTitleWithTitle: true
}) + "</div></div>";
}
page.querySelector(".programGuide").innerHTML = html;
}
function renderChannelGuide(page, apiClient, item) {
if ("TvChannel" === item.Type) {
page.querySelector(".programGuideSection").classList.remove("hide");
apiClient.getLiveTvPrograms({
ChannelIds: item.Id,
UserId: apiClient.getCurrentUserId(),
HasAired: false,
SortBy: "StartDate",
EnableTotalRecordCount: false,
EnableImages: false,
ImageTypeLimit: 0,
EnableUserData: false
}).then(function (result) {
renderProgramsForChannel(page, result);
});
}
}
function renderSeriesSchedule(page, item, user) {
var apiClient = connectionManager.getApiClient(item.ServerId);
apiClient.getLiveTvPrograms({
UserId: apiClient.getCurrentUserId(),
HasAired: false,
SortBy: "StartDate",
EnableTotalRecordCount: false,
EnableImages: false,
ImageTypeLimit: 0,
Limit: 50,
EnableUserData: false,
LibrarySeriesId: item.Id
}).then(function (result) {
if (result.Items.length) {
page.querySelector("#seriesScheduleSection").classList.remove("hide");
} else {
page.querySelector("#seriesScheduleSection").classList.add("hide");
}
page.querySelector("#seriesScheduleList").innerHTML = listView.getListViewHtml({
items: result.Items,
enableUserDataButtons: false,
showParentTitle: false,
image: false,
showProgramDateTime: true,
mediaInfo: false,
showTitle: true,
moreButton: false,
action: "programdialog"
});
loading.hide();
});
}
function inferContext(item) {
if ("Movie" === item.Type || "BoxSet" === item.Type) {
return "movies";
}
if ("Series" === item.Type || "Season" === item.Type || "Episode" === item.Type) {
return "tvshows";
}
if ("MusicArtist" === item.Type || "MusicAlbum" === item.Type || "Audio" === item.Type || "AudioBook" === item.Type) {
return "music";
}
if ("Program" === item.Type) {
return "livetv";
}
return null;
}
function filterItemsByCollectionItemType(items, typeInfo) {
return items.filter(function (item) {
if (typeInfo.mediaType) {
return item.MediaType == typeInfo.mediaType;
}
return item.Type == typeInfo.type;
});
}
function canPlaySomeItemInCollection(items) {
var i = 0;
for (length = items.length; i < length; i++) {
if (playbackManager.canPlay(items[i])) {
return true;
}
}
return false;
}
function renderCollectionItems(page, parentItem, types, items) {
page.querySelector(".collectionItems").innerHTML = "";
var i;
var length;
for (i = 0, length = types.length; i < length; i++) {
var type = types[i];
var typeItems = filterItemsByCollectionItemType(items, type);
if (typeItems.length) {
renderCollectionItemType(page, parentItem, type, typeItems);
}
}
var otherType = {
name: globalize.translate("HeaderOtherItems")
};
var otherTypeItems = items.filter(function (curr) {
return !types.filter(function (t) {
return filterItemsByCollectionItemType([curr], t).length > 0;
}).length;
});
if (otherTypeItems.length) {
renderCollectionItemType(page, parentItem, otherType, otherTypeItems);
}
if (!items.length) {
renderCollectionItemType(page, parentItem, {
name: globalize.translate("HeaderItems")
}, items);
}
var containers = page.querySelectorAll(".collectionItemsContainer");
var notifyRefreshNeeded = function () {
renderChildren(page, parentItem);
};
for (i = 0, length = containers.length; i < length; i++) {
containers[i].notifyRefreshNeeded = notifyRefreshNeeded;
}
// if nothing in the collection can be played hide play and shuffle buttons
if (!canPlaySomeItemInCollection(items)) {
hideAll(page, "btnPlay", false);
hideAll(page, "btnShuffle", false);
}
}
function renderCollectionItemType(page, parentItem, type, items) {
var html = "";
html += '<div class="verticalSection">';
html += '<div class="sectionTitleContainer sectionTitleContainer-cards padded-left">';
html += '<h2 class="sectionTitle sectionTitle-cards">';
html += "<span>" + type.name + "</span>";
html += "</h2>";
html += '<button class="btnAddToCollection sectionTitleButton" type="button" is="paper-icon-button-light" style="margin-left:1em;"><i class="md-icon" icon="add"></i></button>';
html += "</div>";
html += '<div is="emby-itemscontainer" class="itemsContainer collectionItemsContainer vertical-wrap padded-left padded-right">';
var shape = "MusicAlbum" == type.type ? getSquareShape(false) : getPortraitShape(false);
html += cardBuilder.getCardsHtml({
items: items,
shape: shape,
showTitle: true,
centerText: true,
lazy: true,
showDetailsMenu: true,
overlayMoreButton: true,
showAddToCollection: false,
showRemoveFromCollection: true,
collectionId: parentItem.Id
});
html += "</div>";
html += "</div>";
var collectionItems = page.querySelector(".collectionItems");
collectionItems.insertAdjacentHTML("beforeend", html);
imageLoader.lazyChildren(collectionItems);
collectionItems.querySelector(".btnAddToCollection").addEventListener("click", function () {
require(["alert"], function (alert) {
alert({
text: globalize.translate("AddItemToCollectionHelp"),
html: globalize.translate("AddItemToCollectionHelp") + '<br/><br/><a is="emby-linkbutton" class="button-link" target="_blank" href="https://web.archive.org/web/20181216120305/https://github.com/MediaBrowser/Wiki/wiki/Collections">' + globalize.translate("ButtonLearnMore") + "</a>"
});
});
});
}
function renderMusicVideos(page, item, user) {
connectionManager.getApiClient(item.ServerId).getItems(user.Id, {
SortBy: "SortName",
SortOrder: "Ascending",
IncludeItemTypes: "MusicVideo",
Recursive: true,
Fields: "PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount",
AlbumIds: item.Id
}).then(function (result) {
if (result.Items.length) {
page.querySelector("#musicVideosCollapsible").classList.remove("hide");
var musicVideosContent = page.querySelector(".musicVideosContent");
musicVideosContent.innerHTML = getVideosHtml(result.Items, user);
imageLoader.lazyChildren(musicVideosContent);
} else {
page.querySelector("#musicVideosCollapsible").classList.add("hide");
}
});
}
function renderAdditionalParts(page, item, user) {
connectionManager.getApiClient(item.ServerId).getAdditionalVideoParts(user.Id, item.Id).then(function (result) {
if (result.Items.length) {
page.querySelector("#additionalPartsCollapsible").classList.remove("hide");
var additionalPartsContent = page.querySelector("#additionalPartsContent");
additionalPartsContent.innerHTML = getVideosHtml(result.Items, user);
imageLoader.lazyChildren(additionalPartsContent);
} else {
page.querySelector("#additionalPartsCollapsible").classList.add("hide");
}
});
}
function renderScenes(page, item) {
var chapters = item.Chapters || [];
if (chapters.length && !chapters[0].ImageTag && (chapters = []), chapters.length) {
page.querySelector("#scenesCollapsible").classList.remove("hide");
var scenesContent = page.querySelector("#scenesContent");
require(["chaptercardbuilder"], function (chaptercardbuilder) {
chaptercardbuilder.buildChapterCards(item, chapters, {
itemsContainer: scenesContent,
width: 400,
backdropShape: "overflowBackdrop",
squareShape: "overflowSquare"
});
});
} else {
page.querySelector("#scenesCollapsible").classList.add("hide");
}
}
function getVideosHtml(items, user, limit, moreButtonClass) {
var html = cardBuilder.getCardsHtml({
items: items,
shape: "auto",
showTitle: true,
action: "play",
overlayText: false,
centerText: true,
showRuntime: true
});
if (limit && items.length > limit) {
html += '<p style="margin: 0;padding-left:5px;"><button is="emby-button" type="button" class="raised more ' + moreButtonClass + '">' + globalize.translate("ButtonMore") + "</button></p>";
}
return html;
}
function renderSpecials(page, item, user, limit) {
connectionManager.getApiClient(item.ServerId).getSpecialFeatures(user.Id, item.Id).then(function (specials) {
var specialsContent = page.querySelector("#specialsContent");
specialsContent.innerHTML = getVideosHtml(specials, user, limit, "moreSpecials");
imageLoader.lazyChildren(specialsContent);
});
}
function renderCast(page, item, context, limit, isStatic) {
var people = (item.People || []).filter(function (p) {
return "Director" !== p.Type;
});
if (!people.length) {
return void page.querySelector("#castCollapsible").classList.add("hide");
}
page.querySelector("#castCollapsible").classList.remove("hide");
var castContent = page.querySelector("#castContent");
require(["peoplecardbuilder"], function (peoplecardbuilder) {
peoplecardbuilder.buildPeopleCards(people, {
itemsContainer: castContent,
coverImage: true,
serverId: item.ServerId,
width: 160,
shape: getPortraitShape()
});
});
}
function itemDetailPage() {
var self = this;
self.setInitialCollapsibleState = setInitialCollapsibleState;
self.renderDetails = renderDetails;
self.renderCast = renderCast;
}
function bindAll(view, selector, eventName, fn) {
var i;
var length;
var elems = view.querySelectorAll(selector);
for (i = 0, length = elems.length; i < length; i++) {
elems[i].addEventListener(eventName, fn);
}
}
function onTrackSelectionsSubmit(e) {
e.preventDefault();
return false;
}
window.ItemDetailPage = new itemDetailPage();
return function (view, params) {
function reload(instance, page, params) {
loading.show();
var apiClient = params.serverId ? connectionManager.getApiClient(params.serverId) : ApiClient;
var promises = [getPromise(apiClient, params), apiClient.getCurrentUser()];
Promise.all(promises).then(function (responses) {
var item = responses[0];
var user = responses[1];
currentItem = item;
reloadFromItem(instance, page, params, item, user);
});
}
function splitVersions(instance, page, apiClient, params) {
require(["confirm"], function (confirm) {
confirm("Are you sure you wish to split the media sources into separate items?", "Split Media Apart").then(function () {
loading.show();
apiClient.ajax({
type: "DELETE",
url: apiClient.getUrl("Videos/" + params.id + "/AlternateSources")
}).then(function () {
loading.hide();
reload(instance, page, params);
});
});
});
}
function getPlayOptions(startPosition) {
var audioStreamIndex = view.querySelector(".selectAudio").value || null;
return {
startPositionTicks: startPosition,
mediaSourceId: view.querySelector(".selectSource").value,
audioStreamIndex: audioStreamIndex,
subtitleStreamIndex: view.querySelector(".selectSubtitles").value
};
}
function playItem(item, startPosition) {
var playOptions = getPlayOptions(startPosition);
playOptions.items = [item];
playbackManager.play(playOptions);
}
function playTrailer(page) {
playbackManager.playTrailers(currentItem);
}
function playCurrentItem(button, mode) {
var item = currentItem;
if ("Program" === item.Type) {
var apiClient = connectionManager.getApiClient(item.ServerId);
return void apiClient.getLiveTvChannel(item.ChannelId, apiClient.getCurrentUserId()).then(function (channel) {
playbackManager.play({
items: [channel]
});
});
}
playItem(item, item.UserData && "resume" === mode ? item.UserData.PlaybackPositionTicks : 0);
}
function onPlayClick() {
playCurrentItem(this, this.getAttribute("data-mode"));
}
function onInstantMixClick() {
playbackManager.instantMix(currentItem);
}
function onShuffleClick() {
playbackManager.shuffle(currentItem);
}
function onDeleteClick() {
require(["deleteHelper"], function (deleteHelper) {
deleteHelper.deleteItem({
item: currentItem,
navigate: true
});
});
}
function onCancelSeriesTimerClick() {
require(["recordingHelper"], function (recordingHelper) {
recordingHelper.cancelSeriesTimerWithConfirmation(currentItem.Id, currentItem.ServerId).then(function () {
Dashboard.navigate("livetv.html");
});
});
}
function onCancelTimerClick() {
require(["recordingHelper"], function (recordingHelper) {
recordingHelper.cancelTimer(connectionManager.getApiClient(currentItem.ServerId), currentItem.TimerId).then(function () {
reload(self, view, params);
});
});
}
function onPlayTrailerClick() {
playTrailer(view);
}
function onDownloadChange() {
reload(self, view, params);
}
function onDownloadClick() {
require(['fileDownloader'], function (fileDownloader) {
var downloadHref = apiClient.getItemDownloadUrl(currentItem.Id);
fileDownloader.download([{
url: downloadHref,
itemId: currentItem.Id,
serverId: currentItem.serverId
}]);
});
}
function onMoreCommandsClick() {
var button = this;
apiClient.getCurrentUser().then(function (user) {
itemContextMenu.show(getContextMenuOptions(currentItem, user, button)).then(function (result) {
if (result.deleted) {
appRouter.goHome();
} else if (result.updated) {
reload(self, view, params);
}
});
});
}
function onPlayerChange() {
renderTrackSelections(view, self, currentItem);
setTrailerButtonVisibility(view, currentItem);
}
function editImages() {
return new Promise(function (resolve, reject) {
require(["imageEditor"], function (imageEditor) {
imageEditor.show({
itemId: currentItem.Id,
serverId: currentItem.ServerId
}).then(resolve, reject);
});
});
}
function onWebSocketMessage(e, data) {
var msg = data;
if ("UserDataChanged" === msg.MessageType && currentItem && msg.Data.UserId == apiClient.getCurrentUserId()) {
var key = currentItem.UserData.Key;
var userData = msg.Data.UserDataList.filter(function (u) {
return u.Key == key;
})[0];
if (userData) {
currentItem.UserData = userData;
reloadPlayButtons(view, currentItem);
apiClient.getCurrentUser().then(function (user) {
refreshImage(view, currentItem, user);
});
}
}
}
var currentItem;
var self = this;
var apiClient = params.serverId ? connectionManager.getApiClient(params.serverId) : ApiClient;
view.querySelectorAll(".btnPlay");
bindAll(view, ".btnPlay", "click", onPlayClick);
bindAll(view, ".btnResume", "click", onPlayClick);
bindAll(view, ".btnInstantMix", "click", onInstantMixClick);
bindAll(view, ".btnShuffle", "click", onShuffleClick);
bindAll(view, ".btnPlayTrailer", "click", onPlayTrailerClick);
bindAll(view, ".btnCancelSeriesTimer", "click", onCancelSeriesTimerClick);
bindAll(view, ".btnCancelTimer", "click", onCancelTimerClick);
bindAll(view, ".btnDeleteItem", "click", onDeleteClick);
bindAll(view, ".btnDownload", "click", onDownloadClick);
view.querySelector(".btnMoreCommands i").innerHTML = "";
view.querySelector(".trackSelections").addEventListener("submit", onTrackSelectionsSubmit);
view.querySelector(".btnSplitVersions").addEventListener("click", function () {
splitVersions(self, view, apiClient, params);
});
bindAll(view, ".btnMoreCommands", "click", onMoreCommandsClick);
view.querySelector(".selectSource").addEventListener("change", function () {
renderVideoSelections(view, self._currentPlaybackMediaSources);
renderAudioSelections(view, self._currentPlaybackMediaSources);
renderSubtitleSelections(view, self._currentPlaybackMediaSources);
});
view.addEventListener("click", function (e) {
if (dom.parentWithClass(e.target, "moreScenes")) {
apiClient.getCurrentUser().then(function (user) {
renderScenes(view, currentItem);
});
} else if (dom.parentWithClass(e.target, "morePeople")) {
renderCast(view, currentItem, params.context);
} else if (dom.parentWithClass(e.target, "moreSpecials")) {
apiClient.getCurrentUser().then(function (user) {
renderSpecials(view, currentItem, user);
});
}
});
view.querySelector(".detailImageContainer").addEventListener("click", function (e) {
if (dom.parentWithClass(e.target, "itemDetailGalleryLink")) {
editImages().then(function () {
reload(self, view, params);
});
}
});
view.addEventListener("viewshow", function (e) {
var page = this;
libraryMenu.setTransparentMenu(true);
if (e.detail.isRestored) {
if (currentItem) {
setTitle(currentItem, connectionManager.getApiClient(currentItem.ServerId));
renderTrackSelections(page, self, currentItem, true);
}
} else {
reload(self, page, params);
}
events.on(apiClient, "message", onWebSocketMessage);
events.on(playbackManager, "playerchange", onPlayerChange);
});
view.addEventListener("viewbeforehide", function () {
events.off(apiClient, "message", onWebSocketMessage);
events.off(playbackManager, "playerchange", onPlayerChange);
libraryMenu.setTransparentMenu(false);
});
view.addEventListener("viewdestroy", function () {
currentItem = null;
self._currentPlaybackMediaSources = null;
self.currentRecordingFields = null;
});
};
});
| 1 | 12,324 | Why not just check the backdrop setting here as well for the time being? We can combine the code later but at least then it would be configurable if people like the backgrounds. | jellyfin-jellyfin-web | js |
@@ -41,7 +41,7 @@ namespace AutoRest.Swagger.Validation
/// <summary>
/// The severity of this message (ie, debug/info/warning/error/fatal, etc)
/// </summary>
- public override Category Severity => Category.Error;
+ public override Category Severity => Category.Warning;
///// <summary>
///// Validates whether property names are camelCase in body parameters. | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using AutoRest.Core.Logging;
using AutoRest.Core.Properties;
using AutoRest.Swagger.Validation.Core;
using AutoRest.Swagger.Model;
using AutoRest.Swagger.Model.Utilities;
namespace AutoRest.Swagger.Validation
{
/// <summary>
/// Property names must be camelCase style
/// </summary>
public class BodyPropertiesNamesCamelCase : TypedRule<Dictionary<string, Operation>>
{
/// <summary>
/// Id of the Rule.
/// </summary>
public override string Id => "M3016";
/// <summary>
/// Violation category of the Rule.
/// </summary>
public override ValidationCategory ValidationCategory => ValidationCategory.RPCViolation;
/// <summary>;
/// The template message for this Rule.
/// </summary>
/// <remarks>
/// This may contain placeholders '{0}' for parameterized messages.
/// </remarks>
public override string MessageTemplate => Resources.BodyPropertyNameCamelCase;
/// <summary>
/// The severity of this message (ie, debug/info/warning/error/fatal, etc)
/// </summary>
public override Category Severity => Category.Error;
///// <summary>
///// Validates whether property names are camelCase in body parameters.
///// </summary>
public override IEnumerable<ValidationMessage> GetValidationMessages(Dictionary<string, Operation> path, RuleContext context)
{
foreach (string operation in path.Keys)
{
if (path[operation]?.Parameters != null)
{
foreach (SwaggerParameter param in path[operation].Parameters)
{
if (param.In == ParameterLocation.Body && param.Schema?.Properties != null)
{
foreach (KeyValuePair<string, Schema> prop in param.Schema?.Properties)
{
if (!ValidationUtilities.IsNameCamelCase(prop.Key))
{
yield return new ValidationMessage(new FileObjectPath(context.File, context.Path), this, prop.Key, ValidationUtilities.GetCamelCasedSuggestion(prop.Key));
}
}
}
}
}
}
}
}
} | 1 | 24,736 | why are we turning this one into a Warning? | Azure-autorest | java |
@@ -631,7 +631,7 @@ public class GUI implements CPDListener {
int separatorPos = sourceId.lastIndexOf(File.separatorChar);
label = "..." + sourceId.substring(separatorPos);
} else {
- label = '(' + sourceIDs.size() + " separate files)";
+ label = '(' + String.valueOf(sourceIDs.size()) + " separate files)";
}
match.setLabel(label); | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.cpd;
import java.awt.BorderLayout;
import java.awt.Component;
import java.awt.Dimension;
import java.awt.Point;
import java.awt.Toolkit;
import java.awt.datatransfer.StringSelection;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.ItemEvent;
import java.awt.event.ItemListener;
import java.awt.event.KeyEvent;
import java.awt.event.MouseAdapter;
import java.awt.event.MouseEvent;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import javax.swing.AbstractButton;
import javax.swing.BorderFactory;
import javax.swing.JButton;
import javax.swing.JCheckBox;
import javax.swing.JCheckBoxMenuItem;
import javax.swing.JComboBox;
import javax.swing.JComponent;
import javax.swing.JFileChooser;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JMenu;
import javax.swing.JMenuBar;
import javax.swing.JMenuItem;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JProgressBar;
import javax.swing.JScrollPane;
import javax.swing.JTable;
import javax.swing.JTextArea;
import javax.swing.JTextField;
import javax.swing.KeyStroke;
import javax.swing.ScrollPaneConstants;
import javax.swing.SwingConstants;
import javax.swing.Timer;
import javax.swing.event.ListSelectionEvent;
import javax.swing.event.ListSelectionListener;
import javax.swing.table.AbstractTableModel;
import javax.swing.table.DefaultTableCellRenderer;
import javax.swing.table.JTableHeader;
import javax.swing.table.TableColumn;
import javax.swing.table.TableColumnModel;
import javax.swing.table.TableModel;
import net.sourceforge.pmd.PMDVersion;
import net.sourceforge.pmd.cpd.renderer.CPDRenderer;
public class GUI implements CPDListener {
// private interface Renderer {
// String render(Iterator<Match> items);
// }
private static final Object[][] RENDERER_SETS = new Object[][] { { "Text", new CPDRenderer() {
@Override
public void render(Iterator<Match> items, Writer writer) throws IOException {
new SimpleRenderer().render(items, writer);
}
}, }, { "XML", new CPDRenderer() {
@Override
public void render(Iterator<Match> items, Writer writer) throws IOException {
new XMLRenderer().render(items, writer);
}
}, }, { "CSV (comma)", new CPDRenderer() {
@Override
public void render(Iterator<Match> items, Writer writer) throws IOException {
new CSVRenderer(',').render(items, writer);
}
}, }, { "CSV (tab)", new CPDRenderer() {
@Override
public void render(Iterator<Match> items, Writer writer) throws IOException {
new CSVRenderer('\t').render(items, writer);
}
}, }, };
private abstract static class LanguageConfig {
public abstract Language languageFor(Properties p);
public boolean canIgnoreIdentifiers() {
return false;
}
public boolean canIgnoreLiterals() {
return false;
}
public boolean canIgnoreAnnotations() {
return false;
}
public boolean canIgnoreUsings() {
return false;
}
public abstract String[] extensions();
}
private static final Object[][] LANGUAGE_SETS;
static {
LANGUAGE_SETS = new Object[LanguageFactory.supportedLanguages.length + 1][2];
int index;
for (index = 0; index < LanguageFactory.supportedLanguages.length; index++) {
final String terseName = LanguageFactory.supportedLanguages[index];
final Language lang = LanguageFactory.createLanguage(terseName);
LANGUAGE_SETS[index][0] = lang.getName();
LANGUAGE_SETS[index][1] = new LanguageConfig() {
@Override
public Language languageFor(Properties p) {
lang.setProperties(p);
return lang;
}
@Override
public String[] extensions() {
List<String> exts = lang.getExtensions();
return exts.toArray(new String[0]);
}
@Override
public boolean canIgnoreAnnotations() {
return "java".equals(terseName);
}
@Override
public boolean canIgnoreIdentifiers() {
return "java".equals(terseName);
}
@Override
public boolean canIgnoreLiterals() {
return "java".equals(terseName);
}
@Override
public boolean canIgnoreUsings() {
return "cs".equals(terseName);
}
};
}
LANGUAGE_SETS[index][0] = "by extension...";
LANGUAGE_SETS[index][1] = new LanguageConfig() {
@Override
public Language languageFor(Properties p) {
return LanguageFactory.createLanguage(LanguageFactory.BY_EXTENSION, p);
}
@Override
public String[] extensions() {
return new String[] { "" };
}
};
}
private static final int DEFAULT_CPD_MINIMUM_LENGTH = 75;
private static final Map<String, LanguageConfig> LANGUAGE_CONFIGS_BY_LABEL = new HashMap<>(LANGUAGE_SETS.length);
private static final KeyStroke COPY_KEY_STROKE = KeyStroke.getKeyStroke(KeyEvent.VK_C, ActionEvent.CTRL_MASK,
false);
private static final KeyStroke DELETE_KEY_STROKE = KeyStroke.getKeyStroke(KeyEvent.VK_DELETE, 0);
private class ColumnSpec {
private String label;
private int alignment;
private int width;
private Comparator<Match> sorter;
ColumnSpec(String aLabel, int anAlignment, int aWidth, Comparator<Match> aSorter) {
label = aLabel;
alignment = anAlignment;
width = aWidth;
sorter = aSorter;
}
public String label() {
return label;
}
public int alignment() {
return alignment;
}
public int width() {
return width;
}
public Comparator<Match> sorter() {
return sorter;
}
}
private final ColumnSpec[] matchColumns = new ColumnSpec[] {
new ColumnSpec("Source", SwingConstants.LEFT, -1, Match.LABEL_COMPARATOR),
new ColumnSpec("Matches", SwingConstants.RIGHT, 60, Match.MATCHES_COMPARATOR),
new ColumnSpec("Lines", SwingConstants.RIGHT, 45, Match.LINES_COMPARATOR), };
static {
for (int i = 0; i < LANGUAGE_SETS.length; i++) {
LANGUAGE_CONFIGS_BY_LABEL.put((String) LANGUAGE_SETS[i][0], (LanguageConfig) LANGUAGE_SETS[i][1]);
}
}
private static LanguageConfig languageConfigFor(String label) {
return LANGUAGE_CONFIGS_BY_LABEL.get(label);
}
private static class CancelListener implements ActionListener {
@Override
public void actionPerformed(ActionEvent e) {
System.exit(0);
}
}
private class GoListener implements ActionListener {
@Override
public void actionPerformed(ActionEvent e) {
new Thread(new Runnable() {
@Override
public void run() {
tokenizingFilesBar.setValue(0);
tokenizingFilesBar.setString("");
resultsTextArea.setText("");
phaseLabel.setText("");
timeField.setText("");
go();
}
}).start();
}
}
private class SaveListener implements ActionListener {
final CPDRenderer renderer;
SaveListener(CPDRenderer theRenderer) {
renderer = theRenderer;
}
@Override
public void actionPerformed(ActionEvent evt) {
JFileChooser fcSave = new JFileChooser();
int ret = fcSave.showSaveDialog(GUI.this.frame);
File f = fcSave.getSelectedFile();
if (f == null || ret != JFileChooser.APPROVE_OPTION) {
return;
}
if (!f.canWrite()) {
try (PrintWriter pw = new PrintWriter(new FileOutputStream(f))) {
renderer.render(matches.iterator(), pw);
pw.flush();
JOptionPane.showMessageDialog(frame, "Saved " + matches.size() + " matches");
} catch (IOException e) {
error("Couldn't save file" + f.getAbsolutePath(), e);
}
} else {
error("Could not write to file " + f.getAbsolutePath(), null);
}
}
private void error(String message, Exception e) {
if (e != null) {
e.printStackTrace();
}
JOptionPane.showMessageDialog(GUI.this.frame, message);
}
}
private class BrowseListener implements ActionListener {
@Override
public void actionPerformed(ActionEvent e) {
JFileChooser fc = new JFileChooser(rootDirectoryField.getText());
fc.setFileSelectionMode(JFileChooser.FILES_AND_DIRECTORIES);
fc.showDialog(frame, "Select");
if (fc.getSelectedFile() != null) {
rootDirectoryField.setText(fc.getSelectedFile().getAbsolutePath());
}
}
}
private class AlignmentRenderer extends DefaultTableCellRenderer {
private static final long serialVersionUID = -2190382865483285032L;
private int[] alignments;
AlignmentRenderer(int[] theAlignments) {
alignments = theAlignments;
}
@Override
public Component getTableCellRendererComponent(JTable table, Object value, boolean isSelected, boolean hasFocus,
int row, int column) {
super.getTableCellRendererComponent(table, value, isSelected, hasFocus, row, column);
setHorizontalAlignment(alignments[column]);
return this;
}
}
private JTextField rootDirectoryField = new JTextField(System.getProperty("user.home"));
private JTextField minimumLengthField = new JTextField(Integer.toString(DEFAULT_CPD_MINIMUM_LENGTH));
private JTextField encodingField = new JTextField(System.getProperty("file.encoding"));
private JTextField timeField = new JTextField(6);
private JLabel phaseLabel = new JLabel();
private JProgressBar tokenizingFilesBar = new JProgressBar();
private JTextArea resultsTextArea = new JTextArea();
private JCheckBox recurseCheckbox = new JCheckBox("", true);
private JCheckBox ignoreIdentifiersCheckbox = new JCheckBox("", false);
private JCheckBox ignoreLiteralsCheckbox = new JCheckBox("", false);
private JCheckBox ignoreAnnotationsCheckbox = new JCheckBox("", false);
private JCheckBox ignoreUsingsCheckbox = new JCheckBox("", false);
private JComboBox<String> languageBox = new JComboBox<>();
private JTextField extensionField = new JTextField();
private JLabel extensionLabel = new JLabel("Extension:", SwingConstants.RIGHT);
private JTable resultsTable = new JTable();
private JButton goButton;
private JButton cancelButton;
private JPanel progressPanel;
private JFrame frame;
private boolean trimLeadingWhitespace;
private List<Match> matches = new ArrayList<>();
private void addSaveOptionsTo(JMenu menu) {
JMenuItem saveItem;
for (int i = 0; i < RENDERER_SETS.length; i++) {
saveItem = new JMenuItem("Save as " + RENDERER_SETS[i][0]);
saveItem.addActionListener(new SaveListener((CPDRenderer) RENDERER_SETS[i][1]));
menu.add(saveItem);
}
}
public GUI() {
frame = new JFrame("PMD Duplicate Code Detector (v " + PMDVersion.VERSION + ')');
timeField.setEditable(false);
JMenu fileMenu = new JMenu("File");
fileMenu.setMnemonic('f');
addSaveOptionsTo(fileMenu);
JMenuItem exitItem = new JMenuItem("Exit");
exitItem.setMnemonic('x');
exitItem.addActionListener(new CancelListener());
fileMenu.add(exitItem);
JMenu viewMenu = new JMenu("View");
fileMenu.setMnemonic('v');
JMenuItem trimItem = new JCheckBoxMenuItem("Trim leading whitespace");
trimItem.addItemListener(new ItemListener() {
@Override
public void itemStateChanged(ItemEvent e) {
AbstractButton button = (AbstractButton) e.getItem();
GUI.this.trimLeadingWhitespace = button.isSelected();
}
});
viewMenu.add(trimItem);
JMenuBar menuBar = new JMenuBar();
menuBar.add(fileMenu);
menuBar.add(viewMenu);
frame.setJMenuBar(menuBar);
// first make all the buttons
JButton browseButton = new JButton("Browse");
browseButton.setMnemonic('b');
browseButton.addActionListener(new BrowseListener());
goButton = new JButton("Go");
goButton.setMnemonic('g');
goButton.addActionListener(new GoListener());
cancelButton = new JButton("Cancel");
cancelButton.addActionListener(new CancelListener());
JPanel settingsPanel = makeSettingsPanel(browseButton, goButton, cancelButton);
progressPanel = makeProgressPanel();
JPanel resultsPanel = makeResultsPanel();
adjustLanguageControlsFor((LanguageConfig) LANGUAGE_SETS[0][1]);
frame.getContentPane().setLayout(new BorderLayout());
JPanel topPanel = new JPanel();
topPanel.setLayout(new BorderLayout());
topPanel.add(settingsPanel, BorderLayout.NORTH);
topPanel.add(progressPanel, BorderLayout.CENTER);
setProgressControls(false); // not running now
frame.getContentPane().add(topPanel, BorderLayout.NORTH);
frame.getContentPane().add(resultsPanel, BorderLayout.CENTER);
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
frame.pack();
frame.setVisible(true);
}
private void adjustLanguageControlsFor(LanguageConfig current) {
ignoreIdentifiersCheckbox.setEnabled(current.canIgnoreIdentifiers());
ignoreLiteralsCheckbox.setEnabled(current.canIgnoreLiterals());
ignoreAnnotationsCheckbox.setEnabled(current.canIgnoreAnnotations());
ignoreUsingsCheckbox.setEnabled(current.canIgnoreUsings());
extensionField.setText(current.extensions()[0]);
boolean enableExtension = current.extensions()[0].length() == 0;
extensionField.setEnabled(enableExtension);
extensionLabel.setEnabled(enableExtension);
}
private JPanel makeSettingsPanel(JButton browseButton, JButton goButton, JButton cxButton) {
JPanel settingsPanel = new JPanel();
GridBagHelper helper = new GridBagHelper(settingsPanel, new double[] { 0.2, 0.7, 0.1, 0.1 });
helper.addLabel("Root source directory:");
helper.add(rootDirectoryField);
helper.add(browseButton, 2);
helper.nextRow();
helper.addLabel("Report duplicate chunks larger than:");
minimumLengthField.setColumns(4);
helper.add(minimumLengthField);
helper.addLabel("Language:");
for (int i = 0; i < LANGUAGE_SETS.length; i++) {
languageBox.addItem(String.valueOf(LANGUAGE_SETS[i][0]));
}
languageBox.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
adjustLanguageControlsFor(languageConfigFor((String) languageBox.getSelectedItem()));
}
});
helper.add(languageBox);
helper.nextRow();
helper.addLabel("Also scan subdirectories?");
helper.add(recurseCheckbox);
helper.add(extensionLabel);
helper.add(extensionField);
helper.nextRow();
helper.addLabel("Ignore literals?");
helper.add(ignoreLiteralsCheckbox);
helper.addLabel("");
helper.addLabel("");
helper.nextRow();
helper.nextRow();
helper.addLabel("Ignore identifiers?");
helper.add(ignoreIdentifiersCheckbox);
helper.addLabel("");
helper.addLabel("");
helper.nextRow();
helper.nextRow();
helper.addLabel("Ignore annotations?");
helper.add(ignoreAnnotationsCheckbox);
helper.addLabel("");
helper.addLabel("");
helper.nextRow();
helper.nextRow();
helper.addLabel("Ignore usings?");
helper.add(ignoreUsingsCheckbox);
helper.add(goButton);
helper.add(cxButton);
helper.nextRow();
helper.addLabel("File encoding (defaults based upon locale):");
encodingField.setColumns(1);
helper.add(encodingField);
helper.addLabel("");
helper.addLabel("");
helper.nextRow();
// settingsPanel.setBorder(BorderFactory.createTitledBorder("Settings"));
return settingsPanel;
}
private JPanel makeProgressPanel() {
JPanel progressPanel = new JPanel();
final double[] weights = { 0.0, 0.8, 0.4, 0.2 };
GridBagHelper helper = new GridBagHelper(progressPanel, weights);
helper.addLabel("Tokenizing files:");
helper.add(tokenizingFilesBar, 3);
helper.nextRow();
helper.addLabel("Phase:");
helper.add(phaseLabel);
helper.addLabel("Time elapsed:");
helper.add(timeField);
helper.nextRow();
progressPanel.setBorder(BorderFactory.createTitledBorder("Progress"));
return progressPanel;
}
private JPanel makeResultsPanel() {
JPanel resultsPanel = new JPanel();
resultsPanel.setLayout(new BorderLayout());
JScrollPane areaScrollPane = new JScrollPane(resultsTextArea);
resultsTextArea.setEditable(false);
areaScrollPane.setVerticalScrollBarPolicy(ScrollPaneConstants.VERTICAL_SCROLLBAR_ALWAYS);
areaScrollPane.setPreferredSize(new Dimension(600, 300));
resultsPanel.add(makeMatchList(), BorderLayout.WEST);
resultsPanel.add(areaScrollPane, BorderLayout.CENTER);
return resultsPanel;
}
private void populateResultArea() {
int[] selectionIndices = resultsTable.getSelectedRows();
TableModel model = resultsTable.getModel();
List<Match> selections = new ArrayList<>(selectionIndices.length);
for (int i = 0; i < selectionIndices.length; i++) {
selections.add((Match) model.getValueAt(selectionIndices[i], 99));
}
String report = new SimpleRenderer(trimLeadingWhitespace).render(selections.iterator());
resultsTextArea.setText(report);
resultsTextArea.setCaretPosition(0); // move to the top
}
private void copyMatchListSelectionsToClipboard() {
int[] selectionIndices = resultsTable.getSelectedRows();
int colCount = resultsTable.getColumnCount();
StringBuilder sb = new StringBuilder();
for (int r = 0; r < selectionIndices.length; r++) {
if (r > 0) {
sb.append('\n');
}
sb.append(resultsTable.getValueAt(selectionIndices[r], 0));
for (int c = 1; c < colCount; c++) {
sb.append('\t');
sb.append(resultsTable.getValueAt(selectionIndices[r], c));
}
}
StringSelection ss = new StringSelection(sb.toString());
Toolkit.getDefaultToolkit().getSystemClipboard().setContents(ss, null);
}
private void deleteMatchlistSelections() {
int[] selectionIndices = resultsTable.getSelectedRows();
for (int i = selectionIndices.length - 1; i >= 0; i--) {
matches.remove(selectionIndices[i]);
}
resultsTable.getSelectionModel().clearSelection();
resultsTable.addNotify();
}
private JComponent makeMatchList() {
resultsTable.getSelectionModel().addListSelectionListener(new ListSelectionListener() {
@Override
public void valueChanged(ListSelectionEvent e) {
populateResultArea();
}
});
resultsTable.registerKeyboardAction(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
copyMatchListSelectionsToClipboard();
}
}, "Copy", COPY_KEY_STROKE, JComponent.WHEN_FOCUSED);
resultsTable.registerKeyboardAction(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
deleteMatchlistSelections();
}
}, "Del", DELETE_KEY_STROKE, JComponent.WHEN_FOCUSED);
int[] alignments = new int[matchColumns.length];
for (int i = 0; i < alignments.length; i++) {
alignments[i] = matchColumns[i].alignment();
}
resultsTable.setDefaultRenderer(Object.class, new AlignmentRenderer(alignments));
final JTableHeader header = resultsTable.getTableHeader();
header.addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(MouseEvent e) {
sortOnColumn(header.columnAtPoint(new Point(e.getX(), e.getY())));
}
});
return new JScrollPane(resultsTable);
}
private boolean isLegalPath(String path, LanguageConfig config) {
String[] extensions = config.extensions();
for (int i = 0; i < extensions.length; i++) {
if (path.endsWith(extensions[i]) && extensions[i].length() > 0) {
return true;
}
}
return false;
}
private String setLabelFor(Match match) {
Set<String> sourceIDs = new HashSet<>(match.getMarkCount());
for (Iterator<Mark> occurrences = match.iterator(); occurrences.hasNext();) {
sourceIDs.add(occurrences.next().getFilename());
}
String label;
if (sourceIDs.size() == 1) {
String sourceId = sourceIDs.iterator().next();
int separatorPos = sourceId.lastIndexOf(File.separatorChar);
label = "..." + sourceId.substring(separatorPos);
} else {
label = '(' + sourceIDs.size() + " separate files)";
}
match.setLabel(label);
return label;
}
private void setProgressControls(boolean isRunning) {
progressPanel.setVisible(isRunning);
goButton.setEnabled(!isRunning);
cancelButton.setEnabled(isRunning);
}
private void go() {
try {
File dirPath = new File(rootDirectoryField.getText());
if (!dirPath.exists()) {
JOptionPane.showMessageDialog(frame, "Can't read from that root source directory", "Error",
JOptionPane.ERROR_MESSAGE);
return;
}
setProgressControls(true);
Properties p = new Properties();
CPDConfiguration config = new CPDConfiguration();
config.setMinimumTileSize(Integer.parseInt(minimumLengthField.getText()));
config.setEncoding(encodingField.getText());
config.setIgnoreIdentifiers(ignoreIdentifiersCheckbox.isSelected());
config.setIgnoreLiterals(ignoreLiteralsCheckbox.isSelected());
config.setIgnoreAnnotations(ignoreAnnotationsCheckbox.isSelected());
config.setIgnoreUsings(ignoreUsingsCheckbox.isSelected());
p.setProperty(LanguageFactory.EXTENSION, extensionField.getText());
LanguageConfig conf = languageConfigFor((String) languageBox.getSelectedItem());
Language language = conf.languageFor(p);
config.setLanguage(language);
CPDConfiguration.setSystemProperties(config);
CPD cpd = new CPD(config);
cpd.setCpdListener(this);
tokenizingFilesBar.setMinimum(0);
phaseLabel.setText("");
if (isLegalPath(dirPath.getPath(), conf)) { // should use the
// language file filter
// instead?
cpd.add(dirPath);
} else {
if (recurseCheckbox.isSelected()) {
cpd.addRecursively(dirPath);
} else {
cpd.addAllInDirectory(dirPath);
}
}
Timer t = createTimer();
t.start();
cpd.go();
t.stop();
matches = new ArrayList<>();
for (Iterator<Match> i = cpd.getMatches(); i.hasNext();) {
Match match = i.next();
setLabelFor(match);
matches.add(match);
}
setListDataFrom(matches);
String report = new SimpleRenderer().render(cpd.getMatches());
if (report.length() == 0) {
JOptionPane.showMessageDialog(frame,
"Done. Couldn't find any duplicates longer than " + minimumLengthField.getText() + " tokens");
} else {
resultsTextArea.setText(report);
}
} catch (IOException t) {
t.printStackTrace();
JOptionPane.showMessageDialog(frame, "Halted due to " + t.getClass().getName() + "; " + t.getMessage());
} catch (RuntimeException t) {
t.printStackTrace();
JOptionPane.showMessageDialog(frame, "Halted due to " + t.getClass().getName() + "; " + t.getMessage());
}
setProgressControls(false);
}
private Timer createTimer() {
final long start = System.currentTimeMillis();
return new Timer(1000, new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
long now = System.currentTimeMillis();
long elapsedMillis = now - start;
long elapsedSeconds = elapsedMillis / 1000;
long minutes = (long) Math.floor(elapsedSeconds / 60);
long seconds = elapsedSeconds - (minutes * 60);
timeField.setText(formatTime(minutes, seconds));
}
});
}
private static String formatTime(long minutes, long seconds) {
StringBuilder sb = new StringBuilder(5);
if (minutes < 10) {
sb.append('0');
}
sb.append(minutes).append(':');
if (seconds < 10) {
sb.append('0');
}
sb.append(seconds);
return sb.toString();
}
private abstract class SortingTableModel<E> extends AbstractTableModel {
abstract int sortColumn();
abstract void sortColumn(int column);
abstract boolean sortDescending();
abstract void sortDescending(boolean flag);
abstract void sort(Comparator<E> comparator);
}
private TableModel tableModelFrom(final List<Match> items) {
return new SortingTableModel<Match>() {
private int sortColumn;
private boolean sortDescending;
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
Match match = items.get(rowIndex);
switch (columnIndex) {
case 0:
return match.getLabel();
case 2:
return Integer.toString(match.getLineCount());
case 1:
return match.getMarkCount() > 2 ? Integer.toString(match.getMarkCount()) : "";
case 99:
return match;
default:
return "";
}
}
@Override
public int getColumnCount() {
return matchColumns.length;
}
@Override
public int getRowCount() {
return items.size();
}
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return false;
}
@Override
public Class<?> getColumnClass(int columnIndex) {
return Object.class;
}
@Override
public String getColumnName(int i) {
return matchColumns[i].label();
}
@Override
public int sortColumn() {
return sortColumn;
}
@Override
public void sortColumn(int column) {
sortColumn = column;
}
@Override
public boolean sortDescending() {
return sortDescending;
}
@Override
public void sortDescending(boolean flag) {
sortDescending = flag;
}
@Override
public void sort(Comparator<Match> comparator) {
Collections.sort(items, comparator);
if (sortDescending) {
Collections.reverse(items);
}
}
};
}
private void sortOnColumn(int columnIndex) {
Comparator<Match> comparator = matchColumns[columnIndex].sorter();
SortingTableModel<Match> model = (SortingTableModel<Match>) resultsTable.getModel();
if (model.sortColumn() == columnIndex) {
model.sortDescending(!model.sortDescending());
}
model.sortColumn(columnIndex);
model.sort(comparator);
resultsTable.getSelectionModel().clearSelection();
resultsTable.repaint();
}
private void setListDataFrom(List<Match> matches) {
resultsTable.setModel(tableModelFrom(matches));
TableColumnModel colModel = resultsTable.getColumnModel();
TableColumn column;
int width;
for (int i = 0; i < matchColumns.length; i++) {
if (matchColumns[i].width() > 0) {
column = colModel.getColumn(i);
width = matchColumns[i].width();
column.setPreferredWidth(width);
column.setMinWidth(width);
column.setMaxWidth(width);
}
}
}
// CPDListener
@Override
public void phaseUpdate(int phase) {
phaseLabel.setText(getPhaseText(phase));
}
public String getPhaseText(int phase) {
switch (phase) {
case CPDListener.INIT:
return "Initializing";
case CPDListener.HASH:
return "Hashing";
case CPDListener.MATCH:
return "Matching";
case CPDListener.GROUPING:
return "Grouping";
case CPDListener.DONE:
return "Done";
default:
return "Unknown";
}
}
@Override
public void addedFile(int fileCount, File file) {
tokenizingFilesBar.setMaximum(fileCount);
tokenizingFilesBar.setValue(tokenizingFilesBar.getValue() + 1);
}
// CPDListener
public static void main(String[] args) {
// this should prevent the disk not found popup
// System.setSecurityManager(null);
new GUI();
}
}
| 1 | 14,400 | Wouldn't it be better to use double quotes for the opening parenthesis ? | pmd-pmd | java |
@@ -716,7 +716,7 @@ bool IOLoginData::savePlayer(Player* player)
if (!player->isOffline()) {
query << "`onlinetime` = `onlinetime` + " << (time(nullptr) - player->lastLoginSaved) << ',';
}
- query << "`blessings` = " << static_cast<uint32_t>(player->blessings);
+ query << "`blessings` = " << static_cast<uint32_t>(player->blessings.to_ulong());
query << " WHERE `id` = " << player->getGUID();
DBTransaction transaction; | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "iologindata.h"
#include "configmanager.h"
#include "game.h"
#include <fmt/format.h>
extern ConfigManager g_config;
extern Game g_game;
Account IOLoginData::loadAccount(uint32_t accno)
{
Account account;
DBResult_ptr result = Database::getInstance().storeQuery(fmt::format("SELECT `id`, `name`, `password`, `type`, `premium_ends_at` FROM `accounts` WHERE `id` = {:d}", accno));
if (!result) {
return account;
}
account.id = result->getNumber<uint32_t>("id");
account.name = result->getString("name");
account.accountType = static_cast<AccountType_t>(result->getNumber<int32_t>("type"));
account.premiumEndsAt = result->getNumber<time_t>("premium_ends_at");
return account;
}
std::string decodeSecret(const std::string& secret)
{
// simple base32 decoding
std::string key;
key.reserve(10);
uint32_t buffer = 0, left = 0;
for (const auto& ch : secret) {
buffer <<= 5;
if (ch >= 'A' && ch <= 'Z') {
buffer |= (ch & 0x1F) - 1;
} else if (ch >= '2' && ch <= '7') {
buffer |= ch - 24;
} else {
// if a key is broken, return empty and the comparison
// will always be false since the token must not be empty
return {};
}
left += 5;
if (left >= 8) {
left -= 8;
key.push_back(static_cast<char>(buffer >> left));
}
}
return key;
}
bool IOLoginData::loginserverAuthentication(const std::string& name, const std::string& password, Account& account)
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `id`, `name`, `password`, `secret`, `type`, `premium_ends_at` FROM `accounts` WHERE `name` = {:s}", db.escapeString(name)));
if (!result) {
return false;
}
if (transformToSHA1(password) != result->getString("password")) {
return false;
}
account.id = result->getNumber<uint32_t>("id");
account.name = result->getString("name");
account.key = decodeSecret(result->getString("secret"));
account.accountType = static_cast<AccountType_t>(result->getNumber<int32_t>("type"));
account.premiumEndsAt = result->getNumber<time_t>("premium_ends_at");
result = db.storeQuery(fmt::format("SELECT `name` FROM `players` WHERE `account_id` = {:d} AND `deletion` = 0 ORDER BY `name` ASC", account.id));
if (result) {
do {
account.characters.push_back(result->getString("name"));
} while (result->next());
}
return true;
}
uint32_t IOLoginData::gameworldAuthentication(const std::string& accountName, const std::string& password, std::string& characterName, std::string& token, uint32_t tokenTime)
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `id`, `password`, `secret` FROM `accounts` WHERE `name` = {:s}", db.escapeString(accountName)));
if (!result) {
return 0;
}
std::string secret = decodeSecret(result->getString("secret"));
if (!secret.empty()) {
if (token.empty()) {
return 0;
}
bool tokenValid = token == generateToken(secret, tokenTime) || token == generateToken(secret, tokenTime - 1) || token == generateToken(secret, tokenTime + 1);
if (!tokenValid) {
return 0;
}
}
if (transformToSHA1(password) != result->getString("password")) {
return 0;
}
uint32_t accountId = result->getNumber<uint32_t>("id");
result = db.storeQuery(fmt::format("SELECT `name` FROM `players` WHERE `name` = {:s} AND `account_id` = {:d} AND `deletion` = 0", db.escapeString(characterName), accountId));
if (!result) {
return 0;
}
characterName = result->getString("name");
return accountId;
}
uint32_t IOLoginData::getAccountIdByPlayerName(const std::string& playerName)
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `account_id` FROM `players` WHERE `name` = {:s}", db.escapeString(playerName)));
if (!result) {
return 0;
}
return result->getNumber<uint32_t>("account_id");
}
uint32_t IOLoginData::getAccountIdByPlayerId(uint32_t playerId)
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `account_id` FROM `players` WHERE `id` = {:d}", playerId));
if (!result) {
return 0;
}
return result->getNumber<uint32_t>("account_id");
}
AccountType_t IOLoginData::getAccountType(uint32_t accountId)
{
DBResult_ptr result = Database::getInstance().storeQuery(fmt::format("SELECT `type` FROM `accounts` WHERE `id` = {:d}", accountId));
if (!result) {
return ACCOUNT_TYPE_NORMAL;
}
return static_cast<AccountType_t>(result->getNumber<uint16_t>("type"));
}
void IOLoginData::setAccountType(uint32_t accountId, AccountType_t accountType)
{
Database::getInstance().executeQuery(fmt::format("UPDATE `accounts` SET `type` = {:d} WHERE `id` = {:d}", static_cast<uint16_t>(accountType), accountId));
}
void IOLoginData::updateOnlineStatus(uint32_t guid, bool login)
{
if (g_config.getBoolean(ConfigManager::ALLOW_CLONES)) {
return;
}
if (login) {
Database::getInstance().executeQuery(fmt::format("INSERT INTO `players_online` VALUES ({:d})", guid));
} else {
Database::getInstance().executeQuery(fmt::format("DELETE FROM `players_online` WHERE `player_id` = {:d}", guid));
}
}
bool IOLoginData::preloadPlayer(Player* player, const std::string& name)
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `p`.`id`, `p`.`account_id`, `p`.`group_id`, `a`.`type`, `a`.`premium_ends_at` FROM `players` as `p` JOIN `accounts` as `a` ON `a`.`id` = `p`.`account_id` WHERE `p`.`name` = {:s} AND `p`.`deletion` = 0", db.escapeString(name)));
if (!result) {
return false;
}
player->setGUID(result->getNumber<uint32_t>("id"));
Group* group = g_game.groups.getGroup(result->getNumber<uint16_t>("group_id"));
if (!group) {
std::cout << "[Error - IOLoginData::preloadPlayer] " << player->name << " has Group ID " << result->getNumber<uint16_t>("group_id") << " which doesn't exist." << std::endl;
return false;
}
player->setGroup(group);
player->accountNumber = result->getNumber<uint32_t>("account_id");
player->accountType = static_cast<AccountType_t>(result->getNumber<uint16_t>("type"));
player->premiumEndsAt = result->getNumber<time_t>("premium_ends_at");
return true;
}
bool IOLoginData::loadPlayerById(Player* player, uint32_t id)
{
Database& db = Database::getInstance();
return loadPlayer(player, db.storeQuery(fmt::format("SELECT `id`, `name`, `account_id`, `group_id`, `sex`, `vocation`, `experience`, `level`, `maglevel`, `health`, `healthmax`, `blessings`, `mana`, `manamax`, `manaspent`, `soul`, `lookbody`, `lookfeet`, `lookhead`, `looklegs`, `looktype`, `lookaddons`, `posx`, `posy`, `posz`, `cap`, `lastlogin`, `lastlogout`, `lastip`, `conditions`, `skulltime`, `skull`, `town_id`, `balance`, `offlinetraining_time`, `offlinetraining_skill`, `stamina`, `skill_fist`, `skill_fist_tries`, `skill_club`, `skill_club_tries`, `skill_sword`, `skill_sword_tries`, `skill_axe`, `skill_axe_tries`, `skill_dist`, `skill_dist_tries`, `skill_shielding`, `skill_shielding_tries`, `skill_fishing`, `skill_fishing_tries`, `direction` FROM `players` WHERE `id` = {:d}", id)));
}
bool IOLoginData::loadPlayerByName(Player* player, const std::string& name)
{
Database& db = Database::getInstance();
return loadPlayer(player, db.storeQuery(fmt::format("SELECT `id`, `name`, `account_id`, `group_id`, `sex`, `vocation`, `experience`, `level`, `maglevel`, `health`, `healthmax`, `blessings`, `mana`, `manamax`, `manaspent`, `soul`, `lookbody`, `lookfeet`, `lookhead`, `looklegs`, `looktype`, `lookaddons`, `posx`, `posy`, `posz`, `cap`, `lastlogin`, `lastlogout`, `lastip`, `conditions`, `skulltime`, `skull`, `town_id`, `balance`, `offlinetraining_time`, `offlinetraining_skill`, `stamina`, `skill_fist`, `skill_fist_tries`, `skill_club`, `skill_club_tries`, `skill_sword`, `skill_sword_tries`, `skill_axe`, `skill_axe_tries`, `skill_dist`, `skill_dist_tries`, `skill_shielding`, `skill_shielding_tries`, `skill_fishing`, `skill_fishing_tries`, `direction` FROM `players` WHERE `name` = {:s}", db.escapeString(name))));
}
bool IOLoginData::loadPlayer(Player* player, DBResult_ptr result)
{
if (!result) {
return false;
}
Database& db = Database::getInstance();
uint32_t accno = result->getNumber<uint32_t>("account_id");
Account acc = loadAccount(accno);
player->setGUID(result->getNumber<uint32_t>("id"));
player->name = result->getString("name");
player->accountNumber = accno;
player->accountType = acc.accountType;
player->premiumEndsAt = acc.premiumEndsAt;
Group* group = g_game.groups.getGroup(result->getNumber<uint16_t>("group_id"));
if (!group) {
std::cout << "[Error - IOLoginData::loadPlayer] " << player->name << " has Group ID " << result->getNumber<uint16_t>("group_id") << " which doesn't exist" << std::endl;
return false;
}
player->setGroup(group);
player->bankBalance = result->getNumber<uint64_t>("balance");
player->setSex(static_cast<PlayerSex_t>(result->getNumber<uint16_t>("sex")));
player->level = std::max<uint32_t>(1, result->getNumber<uint32_t>("level"));
uint64_t experience = result->getNumber<uint64_t>("experience");
uint64_t currExpCount = Player::getExpForLevel(player->level);
uint64_t nextExpCount = Player::getExpForLevel(player->level + 1);
if (experience < currExpCount || experience > nextExpCount) {
experience = currExpCount;
}
player->experience = experience;
if (currExpCount < nextExpCount) {
player->levelPercent = Player::getPercentLevel(player->experience - currExpCount, nextExpCount - currExpCount);
} else {
player->levelPercent = 0;
}
player->soul = result->getNumber<uint16_t>("soul");
player->capacity = result->getNumber<uint32_t>("cap") * 100;
player->blessings = result->getNumber<uint16_t>("blessings");
unsigned long conditionsSize;
const char* conditions = result->getStream("conditions", conditionsSize);
PropStream propStream;
propStream.init(conditions, conditionsSize);
Condition* condition = Condition::createCondition(propStream);
while (condition) {
if (condition->unserialize(propStream)) {
player->storedConditionList.push_front(condition);
} else {
delete condition;
}
condition = Condition::createCondition(propStream);
}
if (!player->setVocation(result->getNumber<uint16_t>("vocation"))) {
std::cout << "[Error - IOLoginData::loadPlayer] " << player->name << " has Vocation ID " << result->getNumber<uint16_t>("vocation") << " which doesn't exist" << std::endl;
return false;
}
player->mana = result->getNumber<uint32_t>("mana");
player->manaMax = result->getNumber<uint32_t>("manamax");
player->magLevel = result->getNumber<uint32_t>("maglevel");
uint64_t nextManaCount = player->vocation->getReqMana(player->magLevel + 1);
uint64_t manaSpent = result->getNumber<uint64_t>("manaspent");
if (manaSpent > nextManaCount) {
manaSpent = 0;
}
player->manaSpent = manaSpent;
player->magLevelPercent = Player::getPercentLevel(player->manaSpent, nextManaCount);
player->health = result->getNumber<int32_t>("health");
player->healthMax = result->getNumber<int32_t>("healthmax");
player->defaultOutfit.lookType = result->getNumber<uint16_t>("looktype");
player->defaultOutfit.lookHead = result->getNumber<uint16_t>("lookhead");
player->defaultOutfit.lookBody = result->getNumber<uint16_t>("lookbody");
player->defaultOutfit.lookLegs = result->getNumber<uint16_t>("looklegs");
player->defaultOutfit.lookFeet = result->getNumber<uint16_t>("lookfeet");
player->defaultOutfit.lookAddons = result->getNumber<uint16_t>("lookaddons");
player->currentOutfit = player->defaultOutfit;
player->direction = static_cast<Direction> (result->getNumber<uint16_t>("direction"));
if (g_game.getWorldType() != WORLD_TYPE_PVP_ENFORCED) {
const time_t skullSeconds = result->getNumber<time_t>("skulltime") - time(nullptr);
if (skullSeconds > 0) {
//ensure that we round up the number of ticks
player->skullTicks = (skullSeconds + 2);
uint16_t skull = result->getNumber<uint16_t>("skull");
if (skull == SKULL_RED) {
player->skull = SKULL_RED;
} else if (skull == SKULL_BLACK) {
player->skull = SKULL_BLACK;
}
}
}
player->loginPosition.x = result->getNumber<uint16_t>("posx");
player->loginPosition.y = result->getNumber<uint16_t>("posy");
player->loginPosition.z = result->getNumber<uint16_t>("posz");
player->lastLoginSaved = result->getNumber<time_t>("lastlogin");
player->lastLogout = result->getNumber<time_t>("lastlogout");
player->offlineTrainingTime = result->getNumber<int32_t>("offlinetraining_time") * 1000;
player->offlineTrainingSkill = result->getNumber<int32_t>("offlinetraining_skill");
Town* town = g_game.map.towns.getTown(result->getNumber<uint32_t>("town_id"));
if (!town) {
std::cout << "[Error - IOLoginData::loadPlayer] " << player->name << " has Town ID " << result->getNumber<uint32_t>("town_id") << " which doesn't exist" << std::endl;
return false;
}
player->town = town;
const Position& loginPos = player->loginPosition;
if (loginPos.x == 0 && loginPos.y == 0 && loginPos.z == 0) {
player->loginPosition = player->getTemplePosition();
}
player->staminaMinutes = result->getNumber<uint16_t>("stamina");
static const std::string skillNames[] = {"skill_fist", "skill_club", "skill_sword", "skill_axe", "skill_dist", "skill_shielding", "skill_fishing"};
static const std::string skillNameTries[] = {"skill_fist_tries", "skill_club_tries", "skill_sword_tries", "skill_axe_tries", "skill_dist_tries", "skill_shielding_tries", "skill_fishing_tries"};
static constexpr size_t size = sizeof(skillNames) / sizeof(std::string);
for (uint8_t i = 0; i < size; ++i) {
uint16_t skillLevel = result->getNumber<uint16_t>(skillNames[i]);
uint64_t skillTries = result->getNumber<uint64_t>(skillNameTries[i]);
uint64_t nextSkillTries = player->vocation->getReqSkillTries(i, skillLevel + 1);
if (skillTries > nextSkillTries) {
skillTries = 0;
}
player->skills[i].level = skillLevel;
player->skills[i].tries = skillTries;
player->skills[i].percent = Player::getPercentLevel(skillTries, nextSkillTries);
}
if ((result = db.storeQuery(fmt::format("SELECT `guild_id`, `rank_id`, `nick` FROM `guild_membership` WHERE `player_id` = {:d}", player->getGUID())))) {
uint32_t guildId = result->getNumber<uint32_t>("guild_id");
uint32_t playerRankId = result->getNumber<uint32_t>("rank_id");
player->guildNick = result->getString("nick");
Guild* guild = g_game.getGuild(guildId);
if (!guild) {
guild = IOGuild::loadGuild(guildId);
if (guild) {
g_game.addGuild(guild);
} else {
std::cout << "[Warning - IOLoginData::loadPlayer] " << player->name << " has Guild ID " << guildId << " which doesn't exist" << std::endl;
}
}
if (guild) {
player->guild = guild;
GuildRank_ptr rank = guild->getRankById(playerRankId);
if (!rank) {
if ((result = db.storeQuery(fmt::format("SELECT `id`, `name`, `level` FROM `guild_ranks` WHERE `id` = {:d}", playerRankId)))) {
guild->addRank(result->getNumber<uint32_t>("id"), result->getString("name"), result->getNumber<uint16_t>("level"));
}
rank = guild->getRankById(playerRankId);
if (!rank) {
player->guild = nullptr;
}
}
player->guildRank = rank;
IOGuild::getWarList(guildId, player->guildWarVector);
if ((result = db.storeQuery(fmt::format("SELECT COUNT(*) AS `members` FROM `guild_membership` WHERE `guild_id` = {:d}", guildId)))) {
guild->setMemberCount(result->getNumber<uint32_t>("members"));
}
}
}
if ((result = db.storeQuery(fmt::format("SELECT `player_id`, `name` FROM `player_spells` WHERE `player_id` = {:d}", player->getGUID())))) {
do {
player->learnedInstantSpellList.emplace_front(result->getString("name"));
} while (result->next());
}
//load inventory items
ItemMap itemMap;
if ((result = db.storeQuery(fmt::format("SELECT `pid`, `sid`, `itemtype`, `count`, `attributes` FROM `player_items` WHERE `player_id` = {:d} ORDER BY `sid` DESC", player->getGUID())))) {
loadItems(itemMap, result);
for (ItemMap::const_reverse_iterator it = itemMap.rbegin(), end = itemMap.rend(); it != end; ++it) {
const std::pair<Item*, int32_t>& pair = it->second;
Item* item = pair.first;
int32_t pid = pair.second;
if (pid >= CONST_SLOT_FIRST && pid <= CONST_SLOT_LAST) {
player->internalAddThing(pid, item);
} else {
ItemMap::const_iterator it2 = itemMap.find(pid);
if (it2 == itemMap.end()) {
continue;
}
Container* container = it2->second.first->getContainer();
if (container) {
container->internalAddThing(item);
}
}
}
}
//load depot items
itemMap.clear();
if ((result = db.storeQuery(fmt::format("SELECT `pid`, `sid`, `itemtype`, `count`, `attributes` FROM `player_depotitems` WHERE `player_id` = {:d} ORDER BY `sid` DESC", player->getGUID())))) {
loadItems(itemMap, result);
for (ItemMap::const_reverse_iterator it = itemMap.rbegin(), end = itemMap.rend(); it != end; ++it) {
const std::pair<Item*, int32_t>& pair = it->second;
Item* item = pair.first;
int32_t pid = pair.second;
if (pid >= 0 && pid < 100) {
DepotChest* depotChest = player->getDepotChest(pid, true);
if (depotChest) {
depotChest->internalAddThing(item);
}
} else {
ItemMap::const_iterator it2 = itemMap.find(pid);
if (it2 == itemMap.end()) {
continue;
}
Container* container = it2->second.first->getContainer();
if (container) {
container->internalAddThing(item);
}
}
}
}
//load inbox items
itemMap.clear();
if ((result = db.storeQuery(fmt::format("SELECT `pid`, `sid`, `itemtype`, `count`, `attributes` FROM `player_inboxitems` WHERE `player_id` = {:d} ORDER BY `sid` DESC", player->getGUID())))) {
loadItems(itemMap, result);
for (ItemMap::const_reverse_iterator it = itemMap.rbegin(), end = itemMap.rend(); it != end; ++it) {
const std::pair<Item*, int32_t>& pair = it->second;
Item* item = pair.first;
int32_t pid = pair.second;
if (pid >= 0 && pid < 100) {
player->getInbox()->internalAddThing(item);
} else {
ItemMap::const_iterator it2 = itemMap.find(pid);
if (it2 == itemMap.end()) {
continue;
}
Container* container = it2->second.first->getContainer();
if (container) {
container->internalAddThing(item);
}
}
}
}
//load store inbox items
itemMap.clear();
if ((result = db.storeQuery(fmt::format("SELECT `pid`, `sid`, `itemtype`, `count`, `attributes` FROM `player_storeinboxitems` WHERE `player_id` = {:d} ORDER BY `sid` DESC", player->getGUID())))) {
loadItems(itemMap, result);
for (ItemMap::const_reverse_iterator it = itemMap.rbegin(), end = itemMap.rend(); it != end; ++it) {
const std::pair<Item*, int32_t>& pair = it->second;
Item* item = pair.first;
int32_t pid = pair.second;
if (pid >= 0 && pid < 100) {
player->getStoreInbox()->internalAddThing(item);
} else {
ItemMap::const_iterator it2 = itemMap.find(pid);
if (it2 == itemMap.end()) {
continue;
}
Container* container = it2->second.first->getContainer();
if (container) {
container->internalAddThing(item);
}
}
}
}
//load storage map
if ((result = db.storeQuery(fmt::format("SELECT `key`, `value` FROM `player_storage` WHERE `player_id` = {:d}", player->getGUID())))) {
do {
player->addStorageValue(result->getNumber<uint32_t>("key"), result->getNumber<int32_t>("value"), true);
} while (result->next());
}
//load vip list
if ((result = db.storeQuery(fmt::format("SELECT `player_id` FROM `account_viplist` WHERE `account_id` = {:d}", player->getAccount())))) {
do {
player->addVIPInternal(result->getNumber<uint32_t>("player_id"));
} while (result->next());
}
player->updateBaseSpeed();
player->updateInventoryWeight();
player->updateItemsLight(true);
return true;
}
bool IOLoginData::saveItems(const Player* player, const ItemBlockList& itemList, DBInsert& query_insert, PropWriteStream& propWriteStream)
{
using ContainerBlock = std::pair<Container*, int32_t>;
std::list<ContainerBlock> queue;
int32_t runningId = 100;
Database& db = Database::getInstance();
for (const auto& it : itemList) {
int32_t pid = it.first;
Item* item = it.second;
++runningId;
propWriteStream.clear();
item->serializeAttr(propWriteStream);
size_t attributesSize;
const char* attributes = propWriteStream.getStream(attributesSize);
if (!query_insert.addRow(fmt::format("{:d}, {:d}, {:d}, {:d}, {:d}, {:s}", player->getGUID(), pid, runningId, item->getID(), item->getSubType(), db.escapeBlob(attributes, attributesSize)))) {
return false;
}
if (Container* container = item->getContainer()) {
queue.emplace_back(container, runningId);
}
}
while (!queue.empty()) {
const ContainerBlock& cb = queue.front();
Container* container = cb.first;
int32_t parentId = cb.second;
queue.pop_front();
for (Item* item : container->getItemList()) {
++runningId;
Container* subContainer = item->getContainer();
if (subContainer) {
queue.emplace_back(subContainer, runningId);
}
propWriteStream.clear();
item->serializeAttr(propWriteStream);
size_t attributesSize;
const char* attributes = propWriteStream.getStream(attributesSize);
if (!query_insert.addRow(fmt::format("{:d}, {:d}, {:d}, {:d}, {:d}, {:s}", player->getGUID(), parentId, runningId, item->getID(), item->getSubType(), db.escapeBlob(attributes, attributesSize)))) {
return false;
}
}
}
return query_insert.execute();
}
bool IOLoginData::savePlayer(Player* player)
{
if (player->getHealth() <= 0) {
player->changeHealth(1);
}
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `save` FROM `players` WHERE `id` = {:d}", player->getGUID()));
if (!result) {
return false;
}
if (result->getNumber<uint16_t>("save") == 0) {
return db.executeQuery(fmt::format("UPDATE `players` SET `lastlogin` = {:d}, `lastip` = {:d} WHERE `id` = {:d}", player->lastLoginSaved, player->lastIP, player->getGUID()));
}
//serialize conditions
PropWriteStream propWriteStream;
for (Condition* condition : player->conditions) {
if (condition->isPersistent()) {
condition->serialize(propWriteStream);
propWriteStream.write<uint8_t>(CONDITIONATTR_END);
}
}
size_t conditionsSize;
const char* conditions = propWriteStream.getStream(conditionsSize);
//First, an UPDATE query to write the player itself
std::ostringstream query;
query << "UPDATE `players` SET ";
query << "`level` = " << player->level << ',';
query << "`group_id` = " << player->group->id << ',';
query << "`vocation` = " << player->getVocationId() << ',';
query << "`health` = " << player->health << ',';
query << "`healthmax` = " << player->healthMax << ',';
query << "`experience` = " << player->experience << ',';
query << "`lookbody` = " << static_cast<uint32_t>(player->defaultOutfit.lookBody) << ',';
query << "`lookfeet` = " << static_cast<uint32_t>(player->defaultOutfit.lookFeet) << ',';
query << "`lookhead` = " << static_cast<uint32_t>(player->defaultOutfit.lookHead) << ',';
query << "`looklegs` = " << static_cast<uint32_t>(player->defaultOutfit.lookLegs) << ',';
query << "`looktype` = " << player->defaultOutfit.lookType << ',';
query << "`lookaddons` = " << static_cast<uint32_t>(player->defaultOutfit.lookAddons) << ',';
query << "`maglevel` = " << player->magLevel << ',';
query << "`mana` = " << player->mana << ',';
query << "`manamax` = " << player->manaMax << ',';
query << "`manaspent` = " << player->manaSpent << ',';
query << "`soul` = " << static_cast<uint16_t>(player->soul) << ',';
query << "`town_id` = " << player->town->getID() << ',';
const Position& loginPosition = player->getLoginPosition();
query << "`posx` = " << loginPosition.getX() << ',';
query << "`posy` = " << loginPosition.getY() << ',';
query << "`posz` = " << loginPosition.getZ() << ',';
query << "`cap` = " << (player->capacity / 100) << ',';
query << "`sex` = " << static_cast<uint16_t>(player->sex) << ',';
if (player->lastLoginSaved != 0) {
query << "`lastlogin` = " << player->lastLoginSaved << ',';
}
if (player->lastIP != 0) {
query << "`lastip` = " << player->lastIP << ',';
}
query << "`conditions` = " << db.escapeBlob(conditions, conditionsSize) << ',';
if (g_game.getWorldType() != WORLD_TYPE_PVP_ENFORCED) {
int64_t skullTime = 0;
if (player->skullTicks > 0) {
skullTime = time(nullptr) + player->skullTicks;
}
query << "`skulltime` = " << skullTime << ',';
Skulls_t skull = SKULL_NONE;
if (player->skull == SKULL_RED) {
skull = SKULL_RED;
} else if (player->skull == SKULL_BLACK) {
skull = SKULL_BLACK;
}
query << "`skull` = " << static_cast<int64_t>(skull) << ',';
}
query << "`lastlogout` = " << player->getLastLogout() << ',';
query << "`balance` = " << player->bankBalance << ',';
query << "`offlinetraining_time` = " << player->getOfflineTrainingTime() / 1000 << ',';
query << "`offlinetraining_skill` = " << player->getOfflineTrainingSkill() << ',';
query << "`stamina` = " << player->getStaminaMinutes() << ',';
query << "`skill_fist` = " << player->skills[SKILL_FIST].level << ',';
query << "`skill_fist_tries` = " << player->skills[SKILL_FIST].tries << ',';
query << "`skill_club` = " << player->skills[SKILL_CLUB].level << ',';
query << "`skill_club_tries` = " << player->skills[SKILL_CLUB].tries << ',';
query << "`skill_sword` = " << player->skills[SKILL_SWORD].level << ',';
query << "`skill_sword_tries` = " << player->skills[SKILL_SWORD].tries << ',';
query << "`skill_axe` = " << player->skills[SKILL_AXE].level << ',';
query << "`skill_axe_tries` = " << player->skills[SKILL_AXE].tries << ',';
query << "`skill_dist` = " << player->skills[SKILL_DISTANCE].level << ',';
query << "`skill_dist_tries` = " << player->skills[SKILL_DISTANCE].tries << ',';
query << "`skill_shielding` = " << player->skills[SKILL_SHIELD].level << ',';
query << "`skill_shielding_tries` = " << player->skills[SKILL_SHIELD].tries << ',';
query << "`skill_fishing` = " << player->skills[SKILL_FISHING].level << ',';
query << "`skill_fishing_tries` = " << player->skills[SKILL_FISHING].tries << ',';
query << "`direction` = " << static_cast<uint16_t> (player->getDirection()) << ',';
if (!player->isOffline()) {
query << "`onlinetime` = `onlinetime` + " << (time(nullptr) - player->lastLoginSaved) << ',';
}
query << "`blessings` = " << static_cast<uint32_t>(player->blessings);
query << " WHERE `id` = " << player->getGUID();
DBTransaction transaction;
if (!transaction.begin()) {
return false;
}
if (!db.executeQuery(query.str())) {
return false;
}
// learned spells
if (!db.executeQuery(fmt::format("DELETE FROM `player_spells` WHERE `player_id` = {:d}", player->getGUID()))) {
return false;
}
DBInsert spellsQuery("INSERT INTO `player_spells` (`player_id`, `name` ) VALUES ");
for (const std::string& spellName : player->learnedInstantSpellList) {
if (!spellsQuery.addRow(fmt::format("{:d}, {:s}", player->getGUID(), db.escapeString(spellName)))) {
return false;
}
}
if (!spellsQuery.execute()) {
return false;
}
//item saving
if (!db.executeQuery(fmt::format("DELETE FROM `player_items` WHERE `player_id` = {:d}", player->getGUID()))) {
return false;
}
DBInsert itemsQuery("INSERT INTO `player_items` (`player_id`, `pid`, `sid`, `itemtype`, `count`, `attributes`) VALUES ");
ItemBlockList itemList;
for (int32_t slotId = CONST_SLOT_FIRST; slotId <= CONST_SLOT_LAST; ++slotId) {
Item* item = player->inventory[slotId];
if (item) {
itemList.emplace_back(slotId, item);
}
}
if (!saveItems(player, itemList, itemsQuery, propWriteStream)) {
return false;
}
if (player->lastDepotId != -1) {
//save depot items
if (!db.executeQuery(fmt::format("DELETE FROM `player_depotitems` WHERE `player_id` = {:d}", player->getGUID()))) {
return false;
}
DBInsert depotQuery("INSERT INTO `player_depotitems` (`player_id`, `pid`, `sid`, `itemtype`, `count`, `attributes`) VALUES ");
itemList.clear();
for (const auto& it : player->depotChests) {
for (Item* item : it.second->getItemList()) {
itemList.emplace_back(it.first, item);
}
}
if (!saveItems(player, itemList, depotQuery, propWriteStream)) {
return false;
}
}
//save inbox items
if (!db.executeQuery(fmt::format("DELETE FROM `player_inboxitems` WHERE `player_id` = {:d}", player->getGUID()))) {
return false;
}
DBInsert inboxQuery("INSERT INTO `player_inboxitems` (`player_id`, `pid`, `sid`, `itemtype`, `count`, `attributes`) VALUES ");
itemList.clear();
for (Item* item : player->getInbox()->getItemList()) {
itemList.emplace_back(0, item);
}
if (!saveItems(player, itemList, inboxQuery, propWriteStream)) {
return false;
}
//save store inbox items
if (!db.executeQuery(fmt::format("DELETE FROM `player_storeinboxitems` WHERE `player_id` = {:d}", player->getGUID()))) {
return false;
}
DBInsert storeInboxQuery("INSERT INTO `player_storeinboxitems` (`player_id`, `pid`, `sid`, `itemtype`, `count`, `attributes`) VALUES ");
itemList.clear();
for (Item* item : player->getStoreInbox()->getItemList()) {
itemList.emplace_back(0, item);
}
if (!saveItems(player, itemList, storeInboxQuery, propWriteStream)) {
return false;
}
if (!db.executeQuery(fmt::format("DELETE FROM `player_storage` WHERE `player_id` = {:d}", player->getGUID()))) {
return false;
}
DBInsert storageQuery("INSERT INTO `player_storage` (`player_id`, `key`, `value`) VALUES ");
player->genReservedStorageRange();
for (const auto& it : player->storageMap) {
if (!storageQuery.addRow(fmt::format("{:d}, {:d}, {:d}", player->getGUID(), it.first, it.second))) {
return false;
}
}
if (!storageQuery.execute()) {
return false;
}
//End the transaction
return transaction.commit();
}
std::string IOLoginData::getNameByGuid(uint32_t guid)
{
DBResult_ptr result = Database::getInstance().storeQuery(fmt::format("SELECT `name` FROM `players` WHERE `id` = {:d}", guid));
if (!result) {
return std::string();
}
return result->getString("name");
}
uint32_t IOLoginData::getGuidByName(const std::string& name)
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `id` FROM `players` WHERE `name` = {:s}", db.escapeString(name)));
if (!result) {
return 0;
}
return result->getNumber<uint32_t>("id");
}
bool IOLoginData::getGuidByNameEx(uint32_t& guid, bool& specialVip, std::string& name)
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `name`, `id`, `group_id`, `account_id` FROM `players` WHERE `name` = {:s}", db.escapeString(name)));
if (!result) {
return false;
}
name = result->getString("name");
guid = result->getNumber<uint32_t>("id");
Group* group = g_game.groups.getGroup(result->getNumber<uint16_t>("group_id"));
uint64_t flags;
if (group) {
flags = group->flags;
} else {
flags = 0;
}
specialVip = (flags & PlayerFlag_SpecialVIP) != 0;
return true;
}
bool IOLoginData::formatPlayerName(std::string& name)
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery(fmt::format("SELECT `name` FROM `players` WHERE `name` = {:s}", db.escapeString(name)));
if (!result) {
return false;
}
name = result->getString("name");
return true;
}
void IOLoginData::loadItems(ItemMap& itemMap, DBResult_ptr result)
{
do {
uint32_t sid = result->getNumber<uint32_t>("sid");
uint32_t pid = result->getNumber<uint32_t>("pid");
uint16_t type = result->getNumber<uint16_t>("itemtype");
uint16_t count = result->getNumber<uint16_t>("count");
unsigned long attrSize;
const char* attr = result->getStream("attributes", attrSize);
PropStream propStream;
propStream.init(attr, attrSize);
Item* item = Item::CreateItem(type, count);
if (item) {
if (!item->unserializeAttr(propStream)) {
std::cout << "WARNING: Serialize error in IOLoginData::loadItems" << std::endl;
}
std::pair<Item*, uint32_t> pair(item, pid);
itemMap[sid] = pair;
}
} while (result->next());
}
void IOLoginData::increaseBankBalance(uint32_t guid, uint64_t bankBalance)
{
Database::getInstance().executeQuery(fmt::format("UPDATE `players` SET `balance` = `balance` + {:d} WHERE `id` = {:d}", bankBalance, guid));
}
bool IOLoginData::hasBiddedOnHouse(uint32_t guid)
{
Database& db = Database::getInstance();
return db.storeQuery(fmt::format("SELECT `id` FROM `houses` WHERE `highest_bidder` = {:d} LIMIT 1", guid)).get() != nullptr;
}
std::forward_list<VIPEntry> IOLoginData::getVIPEntries(uint32_t accountId)
{
std::forward_list<VIPEntry> entries;
DBResult_ptr result = Database::getInstance().storeQuery(fmt::format("SELECT `player_id`, (SELECT `name` FROM `players` WHERE `id` = `player_id`) AS `name`, `description`, `icon`, `notify` FROM `account_viplist` WHERE `account_id` = {:d}", accountId));
if (result) {
do {
entries.emplace_front(
result->getNumber<uint32_t>("player_id"),
result->getString("name"),
result->getString("description"),
result->getNumber<uint32_t>("icon"),
result->getNumber<uint16_t>("notify") != 0
);
} while (result->next());
}
return entries;
}
void IOLoginData::addVIPEntry(uint32_t accountId, uint32_t guid, const std::string& description, uint32_t icon, bool notify)
{
Database& db = Database::getInstance();
db.executeQuery(fmt::format("INSERT INTO `account_viplist` (`account_id`, `player_id`, `description`, `icon`, `notify`) VALUES ({:d}, {:d}, {:s}, {:d}, {:d})", accountId, guid, db.escapeString(description), icon, notify));
}
void IOLoginData::editVIPEntry(uint32_t accountId, uint32_t guid, const std::string& description, uint32_t icon, bool notify)
{
Database& db = Database::getInstance();
db.executeQuery(fmt::format("UPDATE `account_viplist` SET `description` = {:s}, `icon` = {:d}, `notify` = {:d} WHERE `account_id` = {:d} AND `player_id` = {:d}", db.escapeString(description), icon, notify, accountId, guid));
}
void IOLoginData::removeVIPEntry(uint32_t accountId, uint32_t guid)
{
Database::getInstance().executeQuery(fmt::format("DELETE FROM `account_viplist` WHERE `account_id` = {:d} AND `player_id` = {:d}", accountId, guid));
}
void IOLoginData::updatePremiumTime(uint32_t accountId, time_t endTime)
{
Database::getInstance().executeQuery(fmt::format("UPDATE `accounts` SET `premium_ends_at` = {:d} WHERE `id` = {:d}", endTime, accountId));
}
| 1 | 19,083 | Remove the cast | otland-forgottenserver | cpp |
@@ -1,4 +1,4 @@
-// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
+// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intdataplane
import (
"fmt"
"io"
"net"
"os"
"reflect"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ip"
"github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/routetable"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/libcalico-go/lib/set"
)
type routeTable interface {
SetRoutes(ifaceName string, targets []routetable.Target)
}
// endpointManager manages the dataplane resources that belong to each endpoint as well as
// the "dispatch chains" that fan out packets to the right per-endpoint chain.
//
// It programs the relevant iptables chains (via the iptables.Table objects) along with
// per-endpoint routes (via the RouteTable).
//
// Since calculating the dispatch chains is fairly expensive, the main OnUpdate method
// simply records the pending state of each interface and defers the actual calculation
// to CompleteDeferredWork(). This is also the basis of our failure handling; updates
// that fail are left in the pending state so they can be retried later.
type endpointManager struct {
// Config.
ipVersion uint8
wlIfacesRegexp *regexp.Regexp
// Our dependencies.
rawTable iptablesTable
mangleTable iptablesTable
filterTable iptablesTable
ruleRenderer rules.RuleRenderer
routeTable routeTable
writeProcSys procSysWriter
// Pending updates, cleared in CompleteDeferredWork as the data is copied to the activeXYZ
// fields.
pendingWlEpUpdates map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint
pendingIfaceUpdates map[string]ifacemonitor.State
// Active state, updated in CompleteDeferredWork.
activeWlEndpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint
activeWlIfaceNameToID map[string]proto.WorkloadEndpointID
activeUpIfaces set.Set
activeWlIDToChains map[proto.WorkloadEndpointID][]*iptables.Chain
activeWlDispatchChains map[string]*iptables.Chain
// wlIfaceNamesToReconfigure contains names of workload interfaces that need to have
// their configuration (sysctls etc.) refreshed.
wlIfaceNamesToReconfigure set.Set
// epIDsToUpdateStatus contains IDs of endpoints that we need to report status for.
// Mix of host and workload endpoint IDs.
epIDsToUpdateStatus set.Set
// hostIfaceToAddrs maps host interface name to the set of IPs on that interface (reported
// fro the dataplane).
hostIfaceToAddrs map[string]set.Set
// rawHostEndpoints contains the raw (i.e. not resolved to interface) host endpoints.
rawHostEndpoints map[proto.HostEndpointID]*proto.HostEndpoint
// hostEndpointsDirty is set to true when host endpoints are updated.
hostEndpointsDirty bool
// activeHostIfaceToChains maps host interface name to the chains that we've programmed.
activeHostIfaceToRawChains map[string][]*iptables.Chain
activeHostIfaceToFiltChains map[string][]*iptables.Chain
activeHostIfaceToMangleChains map[string][]*iptables.Chain
// Dispatch chains that we've programmed for host endpoints.
activeHostRawDispatchChains map[string]*iptables.Chain
activeHostFilterDispatchChains map[string]*iptables.Chain
activeHostMangleDispatchChains map[string]*iptables.Chain
// activeHostEpIDToIfaceNames records which interfaces we resolved each host endpoint to.
activeHostEpIDToIfaceNames map[proto.HostEndpointID][]string
// activeIfaceNameToHostEpID records which endpoint we resolved each host interface to.
activeIfaceNameToHostEpID map[string]proto.HostEndpointID
needToCheckDispatchChains bool
// Callbacks
OnEndpointStatusUpdate EndpointStatusUpdateCallback
}
type EndpointStatusUpdateCallback func(ipVersion uint8, id interface{}, status string)
type procSysWriter func(path, value string) error
func newEndpointManager(
rawTable iptablesTable,
mangleTable iptablesTable,
filterTable iptablesTable,
ruleRenderer rules.RuleRenderer,
routeTable routeTable,
ipVersion uint8,
wlInterfacePrefixes []string,
onWorkloadEndpointStatusUpdate EndpointStatusUpdateCallback,
) *endpointManager {
return newEndpointManagerWithShims(
rawTable,
mangleTable,
filterTable,
ruleRenderer,
routeTable,
ipVersion,
wlInterfacePrefixes,
onWorkloadEndpointStatusUpdate,
writeProcSys,
)
}
func newEndpointManagerWithShims(
rawTable iptablesTable,
mangleTable iptablesTable,
filterTable iptablesTable,
ruleRenderer rules.RuleRenderer,
routeTable routeTable,
ipVersion uint8,
wlInterfacePrefixes []string,
onWorkloadEndpointStatusUpdate EndpointStatusUpdateCallback,
procSysWriter procSysWriter,
) *endpointManager {
wlIfacesPattern := "^(" + strings.Join(wlInterfacePrefixes, "|") + ").*"
wlIfacesRegexp := regexp.MustCompile(wlIfacesPattern)
return &endpointManager{
ipVersion: ipVersion,
wlIfacesRegexp: wlIfacesRegexp,
rawTable: rawTable,
mangleTable: mangleTable,
filterTable: filterTable,
ruleRenderer: ruleRenderer,
routeTable: routeTable,
writeProcSys: procSysWriter,
// Pending updates, we store these up as OnUpdate is called, then process them
// in CompleteDeferredWork and transfer the important data to the activeXYX fields.
pendingWlEpUpdates: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{},
pendingIfaceUpdates: map[string]ifacemonitor.State{},
activeUpIfaces: set.New(),
activeWlEndpoints: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{},
activeWlIfaceNameToID: map[string]proto.WorkloadEndpointID{},
activeWlIDToChains: map[proto.WorkloadEndpointID][]*iptables.Chain{},
wlIfaceNamesToReconfigure: set.New(),
epIDsToUpdateStatus: set.New(),
hostIfaceToAddrs: map[string]set.Set{},
rawHostEndpoints: map[proto.HostEndpointID]*proto.HostEndpoint{},
hostEndpointsDirty: true,
activeHostIfaceToRawChains: map[string][]*iptables.Chain{},
activeHostIfaceToFiltChains: map[string][]*iptables.Chain{},
activeHostIfaceToMangleChains: map[string][]*iptables.Chain{},
// Caches of the current dispatch chains indexed by chain name. We use these to
// calculate deltas when we need to update the chains.
activeWlDispatchChains: map[string]*iptables.Chain{},
activeHostFilterDispatchChains: map[string]*iptables.Chain{},
activeHostMangleDispatchChains: map[string]*iptables.Chain{},
activeHostRawDispatchChains: map[string]*iptables.Chain{},
needToCheckDispatchChains: true, // Need to do start-of-day update.
OnEndpointStatusUpdate: onWorkloadEndpointStatusUpdate,
}
}
func (m *endpointManager) OnUpdate(protoBufMsg interface{}) {
log.WithField("msg", protoBufMsg).Debug("Received message")
switch msg := protoBufMsg.(type) {
case *proto.WorkloadEndpointUpdate:
m.pendingWlEpUpdates[*msg.Id] = msg.Endpoint
case *proto.WorkloadEndpointRemove:
m.pendingWlEpUpdates[*msg.Id] = nil
case *proto.HostEndpointUpdate:
log.WithField("msg", msg).Debug("Host endpoint update")
m.rawHostEndpoints[*msg.Id] = msg.Endpoint
m.hostEndpointsDirty = true
m.epIDsToUpdateStatus.Add(*msg.Id)
case *proto.HostEndpointRemove:
log.WithField("msg", msg).Debug("Host endpoint removed")
delete(m.rawHostEndpoints, *msg.Id)
m.hostEndpointsDirty = true
m.epIDsToUpdateStatus.Add(*msg.Id)
case *ifaceUpdate:
log.WithField("update", msg).Debug("Interface state changed.")
m.pendingIfaceUpdates[msg.Name] = msg.State
case *ifaceAddrsUpdate:
log.WithField("update", msg).Debug("Interface addrs changed.")
if m.wlIfacesRegexp.MatchString(msg.Name) {
log.WithField("update", msg).Debug("Workload interface, ignoring.")
return
}
if msg.Addrs != nil {
m.hostIfaceToAddrs[msg.Name] = msg.Addrs
} else {
delete(m.hostIfaceToAddrs, msg.Name)
}
m.hostEndpointsDirty = true
}
}
func (m *endpointManager) CompleteDeferredWork() error {
// Copy the pending interface state to the active set and mark any interfaces that have
// changed state for reconfiguration by resolveWorkload/HostEndpoints()
for ifaceName, state := range m.pendingIfaceUpdates {
if state == ifacemonitor.StateUp {
m.activeUpIfaces.Add(ifaceName)
if m.wlIfacesRegexp.MatchString(ifaceName) {
log.WithField("ifaceName", ifaceName).Info(
"Workload interface came up, marking for reconfiguration.")
m.wlIfaceNamesToReconfigure.Add(ifaceName)
}
} else {
m.activeUpIfaces.Discard(ifaceName)
}
// If this interface is linked to any already-existing endpoints, mark the endpoint
// status for recalculation. If the matching endpoint changes when we do
// resolveHostEndpoints() then that will mark old and new matching endpoints for
// update.
m.markEndpointStatusDirtyByIface(ifaceName)
// Clean up as we go...
delete(m.pendingIfaceUpdates, ifaceName)
}
m.resolveWorkloadEndpoints()
if m.hostEndpointsDirty {
log.Debug("Host endpoints updated, resolving them.")
m.resolveHostEndpoints()
m.hostEndpointsDirty = false
}
// Now send any endpoint status updates.
m.updateEndpointStatuses()
return nil
}
func (m *endpointManager) markEndpointStatusDirtyByIface(ifaceName string) {
logCxt := log.WithField("ifaceName", ifaceName)
if epID, ok := m.activeWlIfaceNameToID[ifaceName]; ok {
logCxt.Info("Workload interface state changed; marking for status update.")
m.epIDsToUpdateStatus.Add(epID)
} else if epID, ok := m.activeIfaceNameToHostEpID[ifaceName]; ok {
logCxt.Info("Host interface state changed; marking for status update.")
m.epIDsToUpdateStatus.Add(epID)
} else {
// We don't know about this interface yet (or it's already been deleted).
// If the endpoint gets created, we'll do the update then. If it's been
// deleted, we've already cleaned it up.
logCxt.Debug("Ignoring interface state change for unknown interface.")
}
}
func (m *endpointManager) updateEndpointStatuses() {
log.WithField("dirtyEndpoints", m.epIDsToUpdateStatus).Debug("Reporting endpoint status.")
m.epIDsToUpdateStatus.Iter(func(item interface{}) error {
switch id := item.(type) {
case proto.WorkloadEndpointID:
status := m.calculateWorkloadEndpointStatus(id)
m.OnEndpointStatusUpdate(m.ipVersion, id, status)
case proto.HostEndpointID:
status := m.calculateHostEndpointStatus(id)
m.OnEndpointStatusUpdate(m.ipVersion, id, status)
}
return set.RemoveItem
})
}
func (m *endpointManager) calculateWorkloadEndpointStatus(id proto.WorkloadEndpointID) string {
logCxt := log.WithField("workloadEndpointID", id)
logCxt.Debug("Re-evaluating workload endpoint status")
var operUp, adminUp, failed bool
workload, known := m.activeWlEndpoints[id]
if known {
adminUp = workload.State == "active"
operUp = m.activeUpIfaces.Contains(workload.Name)
failed = m.wlIfaceNamesToReconfigure.Contains(workload.Name)
}
// Note: if endpoint is not known (i.e. has been deleted), status will be "", which signals
// a deletion.
var status string
if known {
if failed {
status = "error"
} else if operUp && adminUp {
status = "up"
} else {
status = "down"
}
}
logCxt = logCxt.WithFields(log.Fields{
"known": known,
"failed": failed,
"operUp": operUp,
"adminUp": adminUp,
"status": status,
})
logCxt.Info("Re-evaluated workload endpoint status")
return status
}
func (m *endpointManager) calculateHostEndpointStatus(id proto.HostEndpointID) (status string) {
logCxt := log.WithField("hostEndpointID", id)
logCxt.Debug("Re-evaluating host endpoint status")
var resolved, operUp bool
_, known := m.rawHostEndpoints[id]
// Note: if endpoint is not known (i.e. has been deleted), status will be "", which signals
// a deletion.
if known {
ifaceNames := m.activeHostEpIDToIfaceNames[id]
if len(ifaceNames) > 0 {
resolved = true
operUp = true
for _, ifaceName := range ifaceNames {
ifaceUp := m.activeUpIfaces.Contains(ifaceName)
logCxt.WithFields(log.Fields{
"ifaceName": ifaceName,
"ifaceUp": ifaceUp,
}).Debug("Status of matching interface.")
operUp = operUp && ifaceUp
}
}
if resolved && operUp {
status = "up"
} else if resolved {
status = "down"
} else {
// Known but failed to resolve, map that to error.
status = "error"
}
}
logCxt = logCxt.WithFields(log.Fields{
"known": known,
"resolved": resolved,
"operUp": operUp,
"status": status,
})
logCxt.Info("Re-evaluated host endpoint status")
return status
}
func (m *endpointManager) resolveWorkloadEndpoints() {
if len(m.pendingWlEpUpdates) > 0 {
// We're about to make endpoint updates, make sure we recheck the dispatch chains.
m.needToCheckDispatchChains = true
}
// Update any dirty endpoints.
for id, workload := range m.pendingWlEpUpdates {
logCxt := log.WithField("id", id)
oldWorkload := m.activeWlEndpoints[id]
if workload != nil {
logCxt.Info("Updating per-endpoint chains.")
if oldWorkload != nil && oldWorkload.Name != workload.Name {
logCxt.Debug("Interface name changed, cleaning up old state")
m.filterTable.RemoveChains(m.activeWlIDToChains[id])
m.routeTable.SetRoutes(oldWorkload.Name, nil)
m.wlIfaceNamesToReconfigure.Discard(oldWorkload.Name)
delete(m.activeWlIfaceNameToID, oldWorkload.Name)
}
var ingressPolicyNames, egressPolicyNames []string
if len(workload.Tiers) > 0 {
ingressPolicyNames = workload.Tiers[0].IngressPolicies
egressPolicyNames = workload.Tiers[0].EgressPolicies
}
adminUp := workload.State == "active"
chains := m.ruleRenderer.WorkloadEndpointToIptablesChains(
workload.Name,
adminUp,
ingressPolicyNames,
egressPolicyNames,
workload.ProfileIds,
)
m.filterTable.UpdateChains(chains)
m.activeWlIDToChains[id] = chains
// Collect the IP prefixes that we want to route locally to this endpoint:
logCxt.Info("Updating endpoint routes.")
var (
ipStrings []string
natInfos []*proto.NatInfo
addrSuffix string
)
if m.ipVersion == 4 {
ipStrings = workload.Ipv4Nets
natInfos = workload.Ipv4Nat
addrSuffix = "/32"
} else {
ipStrings = workload.Ipv6Nets
natInfos = workload.Ipv6Nat
addrSuffix = "/128"
}
if len(natInfos) != 0 {
old := ipStrings
ipStrings = make([]string, len(old)+len(natInfos))
copy(ipStrings, old)
for ii, natInfo := range natInfos {
ipStrings[len(old)+ii] = natInfo.ExtIp + addrSuffix
}
}
var mac net.HardwareAddr
if workload.Mac != "" {
var err error
mac, err = net.ParseMAC(workload.Mac)
if err != nil {
logCxt.WithError(err).Error(
"Failed to parse endpoint's MAC address")
}
}
var routeTargets []routetable.Target
if adminUp {
logCxt.Debug("Endpoint up, adding routes")
for _, s := range ipStrings {
routeTargets = append(routeTargets, routetable.Target{
CIDR: ip.MustParseCIDR(s),
DestMAC: mac,
})
}
} else {
logCxt.Debug("Endpoint down, removing routes")
}
m.routeTable.SetRoutes(workload.Name, routeTargets)
m.wlIfaceNamesToReconfigure.Add(workload.Name)
m.activeWlEndpoints[id] = workload
m.activeWlIfaceNameToID[workload.Name] = id
delete(m.pendingWlEpUpdates, id)
} else {
logCxt.Info("Workload removed, deleting its chains.")
m.filterTable.RemoveChains(m.activeWlIDToChains[id])
if oldWorkload != nil {
// Remove any routes from the routing table. The RouteTable will
// remove any conntrack entries as a side-effect.
logCxt.Info("Workload removed, deleting old state.")
m.routeTable.SetRoutes(oldWorkload.Name, nil)
m.wlIfaceNamesToReconfigure.Discard(oldWorkload.Name)
delete(m.activeWlIfaceNameToID, oldWorkload.Name)
}
delete(m.activeWlEndpoints, id)
delete(m.pendingWlEpUpdates, id)
}
// Update or deletion, make sure we update the interface status.
m.epIDsToUpdateStatus.Add(id)
}
if m.needToCheckDispatchChains {
// Rewrite the dispatch chains if they've changed.
newDispatchChains := m.ruleRenderer.WorkloadDispatchChains(m.activeWlEndpoints)
m.updateDispatchChains(m.activeWlDispatchChains, newDispatchChains, m.filterTable)
m.needToCheckDispatchChains = false
}
m.wlIfaceNamesToReconfigure.Iter(func(item interface{}) error {
ifaceName := item.(string)
err := m.configureInterface(ifaceName)
if err != nil {
log.WithError(err).Warn("Failed to configure interface, will retry")
return nil
}
return set.RemoveItem
})
}
func (m *endpointManager) resolveHostEndpoints() {
// Host endpoint resolution
// ------------------------
//
// There is a set of non-workload interfaces on the local host, each possibly with
// IP addresses, that might be controlled by HostEndpoint resources in the Calico
// data model. The data model syntactically allows multiple HostEndpoint
// resources to match a given interface - for example, an interface 'eth1' might
// have address 10.240.0.34 and 172.19.2.98, and the data model might include:
//
// - HostEndpoint A with Name 'eth1'
//
// - HostEndpoint B with ExpectedIpv4Addrs including '10.240.0.34'
//
// - HostEndpoint C with ExpectedIpv4Addrs including '172.19.2.98'.
//
// but at runtime, at any given time, we only allow one HostEndpoint to govern
// that interface. That HostEndpoint becomes the active one, and the others
// remain inactive. (But if, for example, the active HostEndpoint resource was
// deleted, then one of the inactive ones could take over.) Given multiple
// matching HostEndpoint resources, the one that wins is the one with the
// alphabetically earliest HostEndpointId
//
// So the process here is not about 'resolving' a particular HostEndpoint on its
// own. Rather it is looking at the set of local non-workload interfaces and
// seeing which of them are matched by the current set of HostEndpoints as a
// whole.
newIfaceNameToHostEpID := map[string]proto.HostEndpointID{}
newPreDNATIfaceNameToHostEpID := map[string]proto.HostEndpointID{}
newUntrackedIfaceNameToHostEpID := map[string]proto.HostEndpointID{}
newHostEpIDToIfaceNames := map[proto.HostEndpointID][]string{}
for ifaceName, ifaceAddrs := range m.hostIfaceToAddrs {
ifaceCxt := log.WithFields(log.Fields{
"ifaceName": ifaceName,
"ifaceAddrs": ifaceAddrs,
})
bestHostEpId := proto.HostEndpointID{}
var bestHostEp proto.HostEndpoint
HostEpLoop:
for id, hostEp := range m.rawHostEndpoints {
logCxt := ifaceCxt.WithField("id", id)
logCxt.WithField("bestHostEpId", bestHostEpId).Debug("See if HostEp matches interface")
if (bestHostEpId.EndpointId != "") && (bestHostEpId.EndpointId < id.EndpointId) {
// We already have a HostEndpointId that is better than
// this one, so no point looking any further.
logCxt.Debug("No better than existing match")
continue
}
if hostEp.Name == ifaceName {
// The HostEndpoint has an explicit name that matches the
// interface.
logCxt.Debug("Match on explicit iface name")
bestHostEpId = id
bestHostEp = *hostEp
continue
} else if hostEp.Name != "" {
// The HostEndpoint has an explicit name that isn't this
// interface. Continue, so as not to allow it to match on
// an IP address instead.
logCxt.Debug("Rejected on explicit iface name")
continue
}
for _, wantedList := range [][]string{hostEp.ExpectedIpv4Addrs, hostEp.ExpectedIpv6Addrs} {
for _, wanted := range wantedList {
logCxt.WithField("wanted", wanted).Debug("Address wanted by HostEp")
if ifaceAddrs.Contains(wanted) {
// The HostEndpoint expects an IP address
// that is on this interface.
logCxt.Debug("Match on address")
bestHostEpId = id
bestHostEp = *hostEp
continue HostEpLoop
}
}
}
}
if bestHostEpId.EndpointId != "" {
logCxt := log.WithFields(log.Fields{
"ifaceName": ifaceName,
"bestHostEpId": bestHostEpId,
})
logCxt.Debug("Got HostEp for interface")
newIfaceNameToHostEpID[ifaceName] = bestHostEpId
if len(bestHostEp.UntrackedTiers) > 0 {
// Optimisation: only add the endpoint chains to the raw (untracked)
// table if there's some untracked policy to apply. This reduces
// per-packet latency since every packet has to traverse the raw
// table.
logCxt.Debug("Endpoint has untracked policies.")
newUntrackedIfaceNameToHostEpID[ifaceName] = bestHostEpId
}
if len(bestHostEp.PreDnatTiers) > 0 {
// Similar optimisation (or neatness) for pre-DNAT policy.
logCxt.Debug("Endpoint has pre-DNAT policies.")
newPreDNATIfaceNameToHostEpID[ifaceName] = bestHostEpId
}
// Note, in contrast to the check above, we unconditionally record the
// match in newHostEpIDToIfaceNames so that we always render the endpoint
// into the filter table. This ensures that we get the correct "default
// drop" behaviour and that failsafe rules are applied correctly.
newHostEpIDToIfaceNames[bestHostEpId] = append(
newHostEpIDToIfaceNames[bestHostEpId], ifaceName)
}
oldID, wasKnown := m.activeIfaceNameToHostEpID[ifaceName]
newID, isKnown := newIfaceNameToHostEpID[ifaceName]
if oldID != newID {
logCxt := ifaceCxt.WithFields(log.Fields{
"oldID": m.activeIfaceNameToHostEpID[ifaceName],
"newID": newIfaceNameToHostEpID[ifaceName],
})
logCxt.Info("Endpoint matching interface changed")
if wasKnown {
logCxt.Debug("Endpoint was known, updating old endpoint status")
m.epIDsToUpdateStatus.Add(oldID)
}
if isKnown {
logCxt.Debug("Endpoint is known, updating new endpoint status")
m.epIDsToUpdateStatus.Add(newID)
}
}
}
// Set up programming for the host endpoints that are now to be used.
newHostIfaceFiltChains := map[string][]*iptables.Chain{}
for ifaceName, id := range newIfaceNameToHostEpID {
log.WithField("id", id).Info("Updating host endpoint chains.")
hostEp := m.rawHostEndpoints[id]
// Update the filter chain, for normal traffic.
var ingressPolicyNames, egressPolicyNames []string
var ingressForwardPolicyNames, egressForwardPolicyNames []string
if len(hostEp.Tiers) > 0 {
ingressPolicyNames = hostEp.Tiers[0].IngressPolicies
egressPolicyNames = hostEp.Tiers[0].EgressPolicies
}
if len(hostEp.ForwardTiers) > 0 {
ingressForwardPolicyNames = hostEp.ForwardTiers[0].IngressPolicies
egressForwardPolicyNames = hostEp.ForwardTiers[0].EgressPolicies
}
filtChains := m.ruleRenderer.HostEndpointToFilterChains(
ifaceName,
ingressPolicyNames,
egressPolicyNames,
ingressForwardPolicyNames,
egressForwardPolicyNames,
hostEp.ProfileIds,
)
if !reflect.DeepEqual(filtChains, m.activeHostIfaceToFiltChains[ifaceName]) {
m.filterTable.UpdateChains(filtChains)
}
newHostIfaceFiltChains[ifaceName] = filtChains
delete(m.activeHostIfaceToFiltChains, ifaceName)
}
newHostIfaceMangleChains := map[string][]*iptables.Chain{}
for ifaceName, id := range newPreDNATIfaceNameToHostEpID {
log.WithField("id", id).Info("Updating host endpoint mangle chains.")
hostEp := m.rawHostEndpoints[id]
// Update the mangle table, for preDNAT policy.
var ingressPolicyNames []string
if len(hostEp.PreDnatTiers) > 0 {
ingressPolicyNames = hostEp.PreDnatTiers[0].IngressPolicies
}
mangleChains := m.ruleRenderer.HostEndpointToMangleChains(
ifaceName,
ingressPolicyNames,
)
if !reflect.DeepEqual(mangleChains, m.activeHostIfaceToMangleChains[ifaceName]) {
m.mangleTable.UpdateChains(mangleChains)
}
newHostIfaceMangleChains[ifaceName] = mangleChains
delete(m.activeHostIfaceToMangleChains, ifaceName)
}
newHostIfaceRawChains := map[string][]*iptables.Chain{}
for ifaceName, id := range newUntrackedIfaceNameToHostEpID {
log.WithField("id", id).Info("Updating host endpoint raw chains.")
hostEp := m.rawHostEndpoints[id]
// Update the raw chain, for untracked traffic.
var ingressPolicyNames, egressPolicyNames []string
if len(hostEp.UntrackedTiers) > 0 {
ingressPolicyNames = hostEp.UntrackedTiers[0].IngressPolicies
egressPolicyNames = hostEp.UntrackedTiers[0].EgressPolicies
}
rawChains := m.ruleRenderer.HostEndpointToRawChains(
ifaceName,
ingressPolicyNames,
egressPolicyNames,
)
if !reflect.DeepEqual(rawChains, m.activeHostIfaceToRawChains[ifaceName]) {
m.rawTable.UpdateChains(rawChains)
}
newHostIfaceRawChains[ifaceName] = rawChains
delete(m.activeHostIfaceToRawChains, ifaceName)
}
// Remove programming for host endpoints that are not now in use.
for ifaceName, chains := range m.activeHostIfaceToFiltChains {
log.WithField("ifaceName", ifaceName).Info(
"Host interface no longer protected, deleting its normal chains.")
m.filterTable.RemoveChains(chains)
}
for ifaceName, chains := range m.activeHostIfaceToMangleChains {
log.WithField("ifaceName", ifaceName).Info(
"Host interface no longer protected, deleting its preDNAT chains.")
m.mangleTable.RemoveChains(chains)
}
for ifaceName, chains := range m.activeHostIfaceToRawChains {
log.WithField("ifaceName", ifaceName).Info(
"Host interface no longer protected, deleting its untracked chains.")
m.rawTable.RemoveChains(chains)
}
// Remember the host endpoints that are now in use.
m.activeIfaceNameToHostEpID = newIfaceNameToHostEpID
m.activeHostEpIDToIfaceNames = newHostEpIDToIfaceNames
m.activeHostIfaceToFiltChains = newHostIfaceFiltChains
m.activeHostIfaceToMangleChains = newHostIfaceMangleChains
m.activeHostIfaceToRawChains = newHostIfaceRawChains
// Rewrite the filter dispatch chains if they've changed.
log.WithField("resolvedHostEpIds", newIfaceNameToHostEpID).Debug("Rewrite filter dispatch chains?")
newFilterDispatchChains := m.ruleRenderer.HostDispatchChains(newIfaceNameToHostEpID, true)
m.updateDispatchChains(m.activeHostFilterDispatchChains, newFilterDispatchChains, m.filterTable)
// Rewrite the mangle dispatch chains if they've changed.
log.WithField("resolvedHostEpIds", newPreDNATIfaceNameToHostEpID).Debug("Rewrite mangle dispatch chains?")
newMangleDispatchChains := m.ruleRenderer.FromHostDispatchChains(newPreDNATIfaceNameToHostEpID)
m.updateDispatchChains(m.activeHostMangleDispatchChains, newMangleDispatchChains, m.mangleTable)
// Rewrite the raw dispatch chains if they've changed.
log.WithField("resolvedHostEpIds", newUntrackedIfaceNameToHostEpID).Debug("Rewrite raw dispatch chains?")
newRawDispatchChains := m.ruleRenderer.HostDispatchChains(newUntrackedIfaceNameToHostEpID, false)
m.updateDispatchChains(m.activeHostRawDispatchChains, newRawDispatchChains, m.rawTable)
log.Debug("Done resolving host endpoints.")
}
// updateDispatchChains updates one of the sets of dispatch chains. It sends the changes to the
// given iptables.Table and records the updates in the activeChains map.
//
// Calculating the minimum update prevents log spam and reduces the work needed in the Table.
func (m *endpointManager) updateDispatchChains(
activeChains map[string]*iptables.Chain,
newChains []*iptables.Chain,
table iptablesTable,
) {
seenChains := set.New()
for _, newChain := range newChains {
seenChains.Add(newChain.Name)
oldChain := activeChains[newChain.Name]
if !reflect.DeepEqual(newChain, oldChain) {
table.UpdateChain(newChain)
activeChains[newChain.Name] = newChain
}
}
for name := range activeChains {
if !seenChains.Contains(name) {
table.RemoveChainByName(name)
delete(activeChains, name)
}
}
}
func (m *endpointManager) configureInterface(name string) error {
if !m.activeUpIfaces.Contains(name) {
log.WithField("ifaceName", name).Info(
"Skipping configuration of interface because it is oper down.")
return nil
}
log.WithField("ifaceName", name).Info(
"Applying /proc/sys configuration to interface.")
if m.ipVersion == 4 {
// Enable strict reverse-path filtering. This prevents a workload from spoofing its
// IP address. Non-privileged containers have additional anti-spoofing protection
// but VM workloads, for example, can easily spoof their IP.
err := m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/rp_filter", name), "1")
if err != nil {
return err
}
// Enable routing to localhost. This is required to allow for NAT to the local
// host.
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/route_localnet", name), "1")
if err != nil {
return err
}
// Enable proxy ARP, this makes the host respond to all ARP requests with its own
// MAC. This has a couple of advantages:
//
// - In OpenStack, we're forced to configure the guest's networking using DHCP.
// Since DHCP requires a subnet and gateway, representing the Calico network
// in the natural way would lose a lot of IP addresses. For IPv4, we'd have to
// advertise a distinct /30 to each guest, which would use up 4 IPs per guest.
// Using proxy ARP, we can advertise the whole pool to each guest as its subnet
// but have the host respond to all ARP requests and route all the traffic whether
// it is on or off subnet.
//
// - For containers, we install explicit routes into the containers network
// namespace and we use a link-local address for the gateway. Turing on proxy ARP
// means that we don't need to assign the link local address explicitly to each
// host side of the veth, which is one fewer thing to maintain and one fewer
// thing we may clash over.
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/proxy_arp", name), "1")
if err != nil {
return err
}
// Normally, the kernel has a delay before responding to proxy ARP but we know
// that's not needed in a Calico network so we disable it.
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/neigh/%s/proxy_delay", name), "0")
if err != nil {
return err
}
// Enable IP forwarding of packets coming _from_ this interface. For packets to
// be forwarded in both directions we need this flag to be set on the fabric-facing
// interface too (or for the global default to be set).
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/forwarding", name), "1")
if err != nil {
return err
}
} else {
// Enable proxy NDP, similarly to proxy ARP, described above.
err := m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/proxy_ndp", name), "1")
if err != nil {
return err
}
// Enable IP forwarding of packets coming _from_ this interface. For packets to
// be forwarded in both directions we need this flag to be set on the fabric-facing
// interface too (or for the global default to be set).
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/forwarding", name), "1")
if err != nil {
return err
}
}
return nil
}
func writeProcSys(path, value string) error {
f, err := os.OpenFile(path, os.O_WRONLY, 0)
if err != nil {
return err
}
n, err := f.Write([]byte(value))
if err == nil && n < len(value) {
err = io.ErrShortWrite
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
| 1 | 16,159 | What criteria do we use to bump the copyright years? An update to the module? | projectcalico-felix | c |
@@ -267,6 +267,7 @@ class Resize:
interpolation='nearest',
backend=self.backend)
results['gt_semantic_seg'] = gt_seg
+ results[key] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic | 1 | import copy
import inspect
import mmcv
import numpy as np
from numpy import random
from mmdet.core import PolygonMasks
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from ..builder import PIPELINES
try:
from imagecorruptions import corrupt
except ImportError:
corrupt = None
try:
import albumentations
from albumentations import Compose
except ImportError:
albumentations = None
Compose = None
@PIPELINES.register_module()
class Resize:
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used. If the input dict contains the key
"scale_factor" (if MultiScaleFlipAug does not give img_scale but
scale_factor), the actual scale will be computed by image shape and
scale_factor.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- ``ratio_range is not None``: randomly sample a ratio from the ratio \
range and multiply it with the image scale.
- ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
sample a scale from the multiscale range.
- ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
sample a scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
override (bool, optional): Whether to override `scale` and
`scale_factor` so as to call resize twice. Default False. If True,
after the first resizing, the existed `scale` and `scale_factor`
will be ignored so the second resizing can be allowed.
This option is a work-around for multiple times of resize in DETR.
Defaults to False.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
bbox_clip_border=True,
backend='cv2',
override=False):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.backend = backend
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
# TODO: refactor the override option in Resize
self.override = override
self.bbox_clip_border = bbox_clip_border
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
where ``img_scale`` is the selected image scale and \
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and upper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where \
``img_scale`` is sampled scale and None is just a placeholder \
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where \
``scale`` is sampled ratio multiplied with ``img_scale`` and \
None is just a placeholder to be consistent with \
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into \
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
for key in results.get('img_fields', ['img']):
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results[key],
results['scale'],
return_scale=True,
backend=self.backend)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results[key].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results[key],
results['scale'],
return_scale=True,
backend=self.backend)
results[key] = img
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img_shape'] = img.shape
# in case that there is no padding
results['pad_shape'] = img.shape
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
"""Resize bounding boxes with ``results['scale_factor']``."""
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
if self.bbox_clip_border:
img_shape = results['img_shape']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
results[key] = bboxes
def _resize_masks(self, results):
"""Resize masks with ``results['scale']``"""
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
results[key] = results[key].rescale(results['scale'])
else:
results[key] = results[key].resize(results['img_shape'][:2])
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
else:
gt_seg = mmcv.imresize(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
results['gt_semantic_seg'] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
if 'scale_factor' in results:
img_shape = results['img'].shape[:2]
scale_factor = results['scale_factor']
assert isinstance(scale_factor, float)
results['scale'] = tuple(
[int(x * scale_factor) for x in img_shape][::-1])
else:
self._random_scale(results)
else:
if not self.override:
assert 'scale_factor' not in results, (
'scale and scale_factor cannot be both set.')
else:
results.pop('scale')
if 'scale_factor' in results:
results.pop('scale_factor')
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'multiscale_mode={self.multiscale_mode}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'keep_ratio={self.keep_ratio}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class RandomFlip:
"""Flip the image & bbox & mask.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
When random flip is enabled, ``flip_ratio``/``direction`` can either be a
float/string or tuple of float/string. There are 3 flip modes:
- ``flip_ratio`` is float, ``direction`` is string: the image will be
``direction``ly flipped with probability of ``flip_ratio`` .
E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
then image will be horizontally flipped with probability of 0.5.
- ``flip_ratio`` is float, ``direction`` is list of string: the image wil
be ``direction[i]``ly flipped with probability of
``flip_ratio/len(direction)``.
E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
then image will be horizontally flipped with probability of 0.25,
vertically with probability of 0.25.
- ``flip_ratio`` is list of float, ``direction`` is list of string:
given ``len(flip_ratio) == len(direction)``, the image wil
be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
'vertical']``, then image will be horizontally flipped with probability
of 0.3, vertically with probability of 0.5.
Args:
flip_ratio (float | list[float], optional): The flipping probability.
Default: None.
direction(str | list[str], optional): The flipping direction. Options
are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
If input is a list, the length must equal ``flip_ratio``. Each
element in ``flip_ratio`` indicates the flip probability of
corresponding direction.
"""
def __init__(self, flip_ratio=None, direction='horizontal'):
if isinstance(flip_ratio, list):
assert mmcv.is_list_of(flip_ratio, float)
assert 0 <= sum(flip_ratio) <= 1
elif isinstance(flip_ratio, float):
assert 0 <= flip_ratio <= 1
elif flip_ratio is None:
pass
else:
raise ValueError('flip_ratios must be None, float, '
'or list of float')
self.flip_ratio = flip_ratio
valid_directions = ['horizontal', 'vertical', 'diagonal']
if isinstance(direction, str):
assert direction in valid_directions
elif isinstance(direction, list):
assert mmcv.is_list_of(direction, str)
assert set(direction).issubset(set(valid_directions))
else:
raise ValueError('direction must be either str or list of str')
self.direction = direction
if isinstance(flip_ratio, list):
assert len(self.flip_ratio) == len(self.direction)
def bbox_flip(self, bboxes, img_shape, direction):
"""Flip bboxes horizontally.
Args:
bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
img_shape (tuple[int]): Image shape (height, width)
direction (str): Flip direction. Options are 'horizontal',
'vertical'.
Returns:
numpy.ndarray: Flipped bounding boxes.
"""
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.copy()
if direction == 'horizontal':
w = img_shape[1]
flipped[..., 0::4] = w - bboxes[..., 2::4]
flipped[..., 2::4] = w - bboxes[..., 0::4]
elif direction == 'vertical':
h = img_shape[0]
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
elif direction == 'diagonal':
w = img_shape[1]
h = img_shape[0]
flipped[..., 0::4] = w - bboxes[..., 2::4]
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 2::4] = w - bboxes[..., 0::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
else:
raise ValueError(f"Invalid flipping direction '{direction}'")
return flipped
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added \
into result dict.
"""
if 'flip' not in results:
if isinstance(self.direction, list):
# None means non-flip
direction_list = self.direction + [None]
else:
# None means non-flip
direction_list = [self.direction, None]
if isinstance(self.flip_ratio, list):
non_flip_ratio = 1 - sum(self.flip_ratio)
flip_ratio_list = self.flip_ratio + [non_flip_ratio]
else:
non_flip_ratio = 1 - self.flip_ratio
# exclude non-flip
single_ratio = self.flip_ratio / (len(direction_list) - 1)
flip_ratio_list = [single_ratio] * (len(direction_list) -
1) + [non_flip_ratio]
cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
results['flip'] = cur_dir is not None
if 'flip_direction' not in results:
results['flip_direction'] = cur_dir
if results['flip']:
# flip image
for key in results.get('img_fields', ['img']):
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction'])
# flip bboxes
for key in results.get('bbox_fields', []):
results[key] = self.bbox_flip(results[key],
results['img_shape'],
results['flip_direction'])
# flip masks
for key in results.get('mask_fields', []):
results[key] = results[key].flip(results['flip_direction'])
# flip segs
for key in results.get('seg_fields', []):
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction'])
return results
def __repr__(self):
return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
@PIPELINES.register_module()
class RandomShift:
"""Shift the image and box given shift pixels and probability.
Args:
shift_ratio (float): Probability of shifts. Default 0.5.
max_shift_px (int): The max pixels for shifting. Default 32.
filter_thr_px (int): The width and height threshold for filtering.
The bbox and the rest of the targets below the width and
height threshold will be filtered. Default 1.
"""
def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1):
assert 0 <= shift_ratio <= 1
assert max_shift_px >= 0
self.shift_ratio = shift_ratio
self.max_shift_px = max_shift_px
self.filter_thr_px = int(filter_thr_px)
# The key correspondence from bboxes to labels.
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
def __call__(self, results):
"""Call function to random shift images, bounding boxes.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Shift results.
"""
if random.random() < self.shift_ratio:
img_shape = results['img'].shape[:2]
random_shift_x = random.randint(-self.max_shift_px,
self.max_shift_px)
random_shift_y = random.randint(-self.max_shift_px,
self.max_shift_px)
new_x = max(0, random_shift_x)
orig_x = max(0, -random_shift_x)
new_y = max(0, random_shift_y)
orig_y = max(0, -random_shift_y)
# TODO: support mask and semantic segmentation maps.
for key in results.get('bbox_fields', []):
bboxes = results[key].copy()
bboxes[..., 0::2] += random_shift_x
bboxes[..., 1::2] += random_shift_y
# clip border
bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1])
bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0])
# remove invalid bboxes
bbox_w = bboxes[..., 2] - bboxes[..., 0]
bbox_h = bboxes[..., 3] - bboxes[..., 1]
valid_inds = (bbox_w > self.filter_thr_px) & (
bbox_h > self.filter_thr_px)
# If the shift does not contain any gt-bbox area, skip this
# image.
if key == 'gt_bboxes' and not valid_inds.any():
return results
bboxes = bboxes[valid_inds]
results[key] = bboxes
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
for key in results.get('img_fields', ['img']):
img = results[key]
new_img = np.zeros_like(img)
img_h, img_w = img.shape[:2]
new_h = img_h - np.abs(random_shift_y)
new_w = img_w - np.abs(random_shift_x)
new_img[new_y:new_y + new_h, new_x:new_x + new_w] \
= img[orig_y:orig_y + new_h, orig_x:orig_x + new_w]
results[key] = new_img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(max_shift_px={self.max_shift_px}, '
return repr_str
@PIPELINES.register_module()
class Pad:
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value, 0 by default.
"""
def __init__(self, size=None, size_divisor=None, pad_val=0):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
for key in results.get('img_fields', ['img']):
if self.size is not None:
padded_img = mmcv.impad(
results[key], shape=self.size, pad_val=self.pad_val)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results[key], self.size_divisor, pad_val=self.pad_val)
results[key] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_masks(self, results):
"""Pad masks according to ``results['pad_shape']``."""
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
def _pad_seg(self, results):
"""Pad semantic segmentation map according to
``results['pad_shape']``."""
for key in results.get('seg_fields', []):
results[key] = mmcv.impad(
results[key], shape=results['pad_shape'][:2])
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_masks(results)
self._pad_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, '
repr_str += f'size_divisor={self.size_divisor}, '
repr_str += f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class Normalize:
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
for key in results.get('img_fields', ['img']):
results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
return repr_str
@PIPELINES.register_module()
class RandomCrop:
"""Random crop the image & bboxes & masks.
The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
then the cropped results are generated.
Args:
crop_size (tuple): The relative ratio or absolute pixels of
height and width.
crop_type (str, optional): one of "relative_range", "relative",
"absolute", "absolute_range". "relative" randomly crops
(h * crop_size[0], w * crop_size[1]) part from an input of size
(h, w). "relative_range" uniformly samples relative crop size from
range [crop_size[0], 1] and [crop_size[1], 1] for height and width
respectively. "absolute" crops from an input with absolute size
(crop_size[0], crop_size[1]). "absolute_range" uniformly samples
crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
allow_negative_crop (bool, optional): Whether to allow a crop that does
not contain any bbox area. Default False.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
- If the image is smaller than the absolute crop size, return the
original image.
- The keys for bboxes, labels and masks must be aligned. That is,
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
`gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
`gt_masks_ignore`.
- If the crop does not contain any gt-bbox region and
`allow_negative_crop` is set to False, skip this image.
"""
def __init__(self,
crop_size,
crop_type='absolute',
allow_negative_crop=False,
bbox_clip_border=True):
if crop_type not in [
'relative_range', 'relative', 'absolute', 'absolute_range'
]:
raise ValueError(f'Invalid crop_type {crop_type}.')
if crop_type in ['absolute', 'absolute_range']:
assert crop_size[0] > 0 and crop_size[1] > 0
assert isinstance(crop_size[0], int) and isinstance(
crop_size[1], int)
else:
assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
self.crop_size = crop_size
self.crop_type = crop_type
self.allow_negative_crop = allow_negative_crop
self.bbox_clip_border = bbox_clip_border
# The key correspondence from bboxes to labels and masks.
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def _crop_data(self, results, crop_size, allow_negative_crop):
"""Function to randomly crop images, bounding boxes, masks, semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
crop_size (tuple): Expected absolute size after cropping, (h, w).
allow_negative_crop (bool): Whether to allow a crop that does not
contain any bbox area. Default to False.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
assert crop_size[0] > 0 and crop_size[1] > 0
for key in results.get('img_fields', ['img']):
img = results[key]
margin_h = max(img.shape[0] - crop_size[0], 0)
margin_w = max(img.shape[1] - crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
img_shape = img.shape
results[key] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
# e.g. gt_bboxes and gt_bboxes_ignore
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
if self.bbox_clip_border:
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
# If the crop does not contain any gt-bbox area and
# allow_negative_crop is False, skip this image.
if (key == 'gt_bboxes' and not valid_inds.any()
and not allow_negative_crop):
return None
results[key] = bboxes[valid_inds, :]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
return results
def _get_crop_size(self, image_size):
"""Randomly generates the absolute crop size based on `crop_type` and
`image_size`.
Args:
image_size (tuple): (h, w).
Returns:
crop_size (tuple): (crop_h, crop_w) in absolute pixels.
"""
h, w = image_size
if self.crop_type == 'absolute':
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == 'absolute_range':
assert self.crop_size[0] <= self.crop_size[1]
crop_h = np.random.randint(
min(h, self.crop_size[0]),
min(h, self.crop_size[1]) + 1)
crop_w = np.random.randint(
min(w, self.crop_size[0]),
min(w, self.crop_size[1]) + 1)
return crop_h, crop_w
elif self.crop_type == 'relative':
crop_h, crop_w = self.crop_size
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
elif self.crop_type == 'relative_range':
crop_size = np.asarray(self.crop_size, dtype=np.float32)
crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
def __call__(self, results):
"""Call function to randomly crop images, bounding boxes, masks,
semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
image_size = results['img'].shape[:2]
crop_size = self._get_crop_size(image_size)
results = self._crop_data(results, crop_size, self.allow_negative_crop)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'crop_type={self.crop_type}, '
repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class SegRescale:
"""Rescale semantic segmentation maps.
Args:
scale_factor (float): The scale factor of the final output.
backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
"""
def __init__(self, scale_factor=1, backend='cv2'):
self.scale_factor = scale_factor
self.backend = backend
def __call__(self, results):
"""Call function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
for key in results.get('seg_fields', []):
if self.scale_factor != 1:
results[key] = mmcv.imrescale(
results[key],
self.scale_factor,
interpolation='nearest',
backend=self.backend)
return results
def __repr__(self):
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
@PIPELINES.register_module()
class PhotoMetricDistortion:
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert img.dtype == np.float32, \
'PhotoMetricDistortion needs the input image of dtype np.float32,'\
' please set "to_float32=True" in "LoadImageFromFile" pipeline'
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
repr_str += 'contrast_range='
repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
repr_str += 'saturation_range='
repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
repr_str += f'hue_delta={self.hue_delta})'
return repr_str
@PIPELINES.register_module()
class Expand:
"""Random expand the image & bboxes.
Randomly place the original image on a canvas of 'ratio' x original image
size filled with mean values. The ratio is in the range of ratio_range.
Args:
mean (tuple): mean value of dataset.
to_rgb (bool): if need to convert the order of mean to align with RGB.
ratio_range (tuple): range of expand ratio.
prob (float): probability of applying this transformation
"""
def __init__(self,
mean=(0, 0, 0),
to_rgb=True,
ratio_range=(1, 4),
seg_ignore_label=None,
prob=0.5):
self.to_rgb = to_rgb
self.ratio_range = ratio_range
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
self.seg_ignore_label = seg_ignore_label
self.prob = prob
def __call__(self, results):
"""Call function to expand images, bounding boxes.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images, bounding boxes expanded
"""
if random.uniform(0, 1) > self.prob:
return results
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
# speedup expand when meets large image
if np.all(self.mean == self.mean[0]):
expand_img = np.empty((int(h * ratio), int(w * ratio), c),
img.dtype)
expand_img.fill(self.mean[0])
else:
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean,
dtype=img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
results['img'] = expand_img
# expand bboxes
for key in results.get('bbox_fields', []):
results[key] = results[key] + np.tile(
(left, top), 2).astype(results[key].dtype)
# expand masks
for key in results.get('mask_fields', []):
results[key] = results[key].expand(
int(h * ratio), int(w * ratio), top, left)
# expand segs
for key in results.get('seg_fields', []):
gt_seg = results[key]
expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
self.seg_ignore_label,
dtype=gt_seg.dtype)
expand_gt_seg[top:top + h, left:left + w] = gt_seg
results[key] = expand_gt_seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label})'
return repr_str
@PIPELINES.register_module()
class MinIoURandomCrop:
"""Random crop the image & bboxes, the cropped patches have minimum IoU
requirement with original image & bboxes, the IoU threshold is randomly
selected from min_ious.
Args:
min_ious (tuple): minimum IoU threshold for all intersections with
bounding boxes
min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
where a >= min_crop_size).
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
The keys for bboxes, labels and masks should be paired. That is, \
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
`gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
"""
def __init__(self,
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3,
bbox_clip_border=True):
# 1: return ori img
self.min_ious = min_ious
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
self.bbox_clip_border = bbox_clip_border
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def __call__(self, results):
"""Call function to crop images and bounding boxes with minimum IoU
constraint.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images and bounding boxes cropped, \
'img_shape' key is updated.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert 'bbox_fields' in results
boxes = [results[key] for key in results['bbox_fields']]
boxes = np.concatenate(boxes, 0)
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
self.mode = mode
if mode == 1:
return results
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array(
(int(left), int(top), int(left + new_w), int(top + new_h)))
# Line or point crop is not allowed
if patch[2] == patch[0] or patch[3] == patch[1]:
continue
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if len(overlaps) > 0 and overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
# only adjust boxes and instance masks when the gt is not empty
if len(overlaps) > 0:
# adjust boxes
def is_center_of_bboxes_in_patch(boxes, patch):
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = ((center[:, 0] > patch[0]) *
(center[:, 1] > patch[1]) *
(center[:, 0] < patch[2]) *
(center[:, 1] < patch[3]))
return mask
mask = is_center_of_bboxes_in_patch(boxes, patch)
if not mask.any():
continue
for key in results.get('bbox_fields', []):
boxes = results[key].copy()
mask = is_center_of_bboxes_in_patch(boxes, patch)
boxes = boxes[mask]
if self.bbox_clip_border:
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
results[key] = boxes
# labels
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][mask]
# mask fields
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
mask.nonzero()[0]].crop(patch)
# adjust the img no matter whether the gt is empty before crop
img = img[patch[1]:patch[3], patch[0]:patch[2]]
results['img'] = img
results['img_shape'] = img.shape
# seg fields
for key in results.get('seg_fields', []):
results[key] = results[key][patch[1]:patch[3],
patch[0]:patch[2]]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(min_ious={self.min_ious}, '
repr_str += f'min_crop_size={self.min_crop_size}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class Corrupt:
"""Corruption augmentation.
Corruption transforms implemented based on
`imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.
Args:
corruption (str): Corruption name.
severity (int, optional): The severity of corruption. Default: 1.
"""
def __init__(self, corruption, severity=1):
self.corruption = corruption
self.severity = severity
def __call__(self, results):
"""Call function to corrupt image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images corrupted.
"""
if corrupt is None:
raise RuntimeError('imagecorruptions is not installed')
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
results['img'] = corrupt(
results['img'].astype(np.uint8),
corruption_name=self.corruption,
severity=self.severity)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(corruption={self.corruption}, '
repr_str += f'severity={self.severity})'
return repr_str
@PIPELINES.register_module()
class Albu:
"""Albumentation augmentation.
Adds custom transformations from Albumentations library.
Please, visit `https://albumentations.readthedocs.io`
to get more information.
An example of ``transforms`` is as followed:
.. code-block::
[
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
Args:
transforms (list[dict]): A list of albu transformations
bbox_params (dict): Bbox_params for albumentation `Compose`
keymap (dict): Contains {'input key':'albumentation-style key'}
skip_img_without_anno (bool): Whether to skip the image if no ann left
after aug
"""
def __init__(self,
transforms,
bbox_params=None,
keymap=None,
update_pad_shape=False,
skip_img_without_anno=False):
if Compose is None:
raise RuntimeError('albumentations is not installed')
# Args will be modified later, copying it will be safer
transforms = copy.deepcopy(transforms)
if bbox_params is not None:
bbox_params = copy.deepcopy(bbox_params)
if keymap is not None:
keymap = copy.deepcopy(keymap)
self.transforms = transforms
self.filter_lost_elements = False
self.update_pad_shape = update_pad_shape
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
and 'filter_lost_elements' in bbox_params):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params['label_fields']
bbox_params['label_fields'] = ['idx_mapper']
del bbox_params['filter_lost_elements']
self.bbox_params = (
self.albu_builder(bbox_params) if bbox_params else None)
self.aug = Compose([self.albu_builder(t) for t in self.transforms],
bbox_params=self.bbox_params)
if not keymap:
self.keymap_to_albu = {
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
}
else:
self.keymap_to_albu = keymap
self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
def albu_builder(self, cfg):
"""Import a module from albumentations.
It inherits some of :func:`build_from_cfg` logic.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if albumentations is None:
raise RuntimeError('albumentations is not installed')
obj_cls = getattr(albumentations, obj_type)
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'transforms' in args:
args['transforms'] = [
self.albu_builder(transform)
for transform in args['transforms']
]
return obj_cls(**args)
@staticmethod
def mapper(d, keymap):
"""Dictionary mapper. Renames keys according to keymap provided.
Args:
d (dict): old dict
keymap (dict): {'old_key':'new_key'}
Returns:
dict: new dict.
"""
updated_dict = {}
for k, v in zip(d.keys(), d.values()):
new_k = keymap.get(k, k)
updated_dict[new_k] = d[k]
return updated_dict
def __call__(self, results):
# dict to albumentations format
results = self.mapper(results, self.keymap_to_albu)
# TODO: add bbox_fields
if 'bboxes' in results:
# to list of boxes
if isinstance(results['bboxes'], np.ndarray):
results['bboxes'] = [x for x in results['bboxes']]
# add pseudo-field for filtration
if self.filter_lost_elements:
results['idx_mapper'] = np.arange(len(results['bboxes']))
# TODO: Support mask structure in albu
if 'masks' in results:
if isinstance(results['masks'], PolygonMasks):
raise NotImplementedError(
'Albu only supports BitMap masks now')
ori_masks = results['masks']
if albumentations.__version__ < '0.5':
results['masks'] = results['masks'].masks
else:
results['masks'] = [mask for mask in results['masks'].masks]
results = self.aug(**results)
if 'bboxes' in results:
if isinstance(results['bboxes'], list):
results['bboxes'] = np.array(
results['bboxes'], dtype=np.float32)
results['bboxes'] = results['bboxes'].reshape(-1, 4)
# filter label_fields
if self.filter_lost_elements:
for label in self.origin_label_fields:
results[label] = np.array(
[results[label][i] for i in results['idx_mapper']])
if 'masks' in results:
results['masks'] = np.array(
[results['masks'][i] for i in results['idx_mapper']])
results['masks'] = ori_masks.__class__(
results['masks'], results['image'].shape[0],
results['image'].shape[1])
if (not len(results['idx_mapper'])
and self.skip_img_without_anno):
return None
if 'gt_labels' in results:
if isinstance(results['gt_labels'], list):
results['gt_labels'] = np.array(results['gt_labels'])
results['gt_labels'] = results['gt_labels'].astype(np.int64)
# back to the original format
results = self.mapper(results, self.keymap_back)
# update final shape
if self.update_pad_shape:
results['pad_shape'] = results['img'].shape
return results
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
return repr_str
@PIPELINES.register_module()
class RandomCenterCropPad:
"""Random center crop and random around padding for CornerNet.
This operation generates randomly cropped image from the original image and
pads it simultaneously. Different from :class:`RandomCrop`, the output
shape may not equal to ``crop_size`` strictly. We choose a random value
from ``ratios`` and the output shape could be larger or smaller than
``crop_size``. The padding operation is also different from :class:`Pad`,
here we use around padding instead of right-bottom padding.
The relation between output image (padding image) and original image:
.. code:: text
output image
+----------------------------+
| padded area |
+------|----------------------------|----------+
| | cropped area | |
| | +---------------+ | |
| | | . center | | | original image
| | | range | | |
| | +---------------+ | |
+------|----------------------------|----------+
| padded area |
+----------------------------+
There are 5 main areas in the figure:
- output image: output image of this operation, also called padding
image in following instruction.
- original image: input image of this operation.
- padded area: non-intersect area of output image and original image.
- cropped area: the overlap of output image and original image.
- center range: a smaller area where random center chosen from.
center range is computed by ``border`` and original image's shape
to avoid our random center is too close to original image's border.
Also this operation act differently in train and test mode, the summary
pipeline is listed below.
Train pipeline:
1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
will be ``random_ratio * crop_size``.
2. Choose a ``random_center`` in center range.
3. Generate padding image with center matches the ``random_center``.
4. Initialize the padding image with pixel value equals to ``mean``.
5. Copy the cropped area to padding image.
6. Refine annotations.
Test pipeline:
1. Compute output shape according to ``test_pad_mode``.
2. Generate padding image with center matches the original image
center.
3. Initialize the padding image with pixel value equals to ``mean``.
4. Copy the ``cropped area`` to padding image.
Args:
crop_size (tuple | None): expected size after crop, final size will
computed according to ratio. Requires (h, w) in train mode, and
None in test mode.
ratios (tuple): random select a ratio from tuple and crop image to
(crop_size[0] * ratio) * (crop_size[1] * ratio).
Only available in train mode.
border (int): max distance from center select area to image border.
Only available in train mode.
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB.
test_mode (bool): whether involve random variables in transform.
In train mode, crop_size is fixed, center coords and ratio is
random selected from predefined lists. In test mode, crop_size
is image's original shape, center coords and ratio is fixed.
test_pad_mode (tuple): padding method and padding shape value, only
available in test mode. Default is using 'logical_or' with
127 as padding shape value.
- 'logical_or': final_shape = input_shape | padding_shape_value
- 'size_divisor': final_shape = int(
ceil(input_shape / padding_shape_value) * padding_shape_value)
test_pad_add_pix (int): Extra padding pixel in test mode. Default 0.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
"""
def __init__(self,
crop_size=None,
ratios=(0.9, 1.0, 1.1),
border=128,
mean=None,
std=None,
to_rgb=None,
test_mode=False,
test_pad_mode=('logical_or', 127),
test_pad_add_pix=0,
bbox_clip_border=True):
if test_mode:
assert crop_size is None, 'crop_size must be None in test mode'
assert ratios is None, 'ratios must be None in test mode'
assert border is None, 'border must be None in test mode'
assert isinstance(test_pad_mode, (list, tuple))
assert test_pad_mode[0] in ['logical_or', 'size_divisor']
else:
assert isinstance(crop_size, (list, tuple))
assert crop_size[0] > 0 and crop_size[1] > 0, (
'crop_size must > 0 in train mode')
assert isinstance(ratios, (list, tuple))
assert test_pad_mode is None, (
'test_pad_mode must be None in train mode')
self.crop_size = crop_size
self.ratios = ratios
self.border = border
# We do not set default value to mean, std and to_rgb because these
# hyper-parameters are easy to forget but could affect the performance.
# Please use the same setting as Normalize for performance assurance.
assert mean is not None and std is not None and to_rgb is not None
self.to_rgb = to_rgb
self.input_mean = mean
self.input_std = std
if to_rgb:
self.mean = mean[::-1]
self.std = std[::-1]
else:
self.mean = mean
self.std = std
self.test_mode = test_mode
self.test_pad_mode = test_pad_mode
self.test_pad_add_pix = test_pad_add_pix
self.bbox_clip_border = bbox_clip_border
def _get_border(self, border, size):
"""Get final border for the target size.
This function generates a ``final_border`` according to image's shape.
The area between ``final_border`` and ``size - final_border`` is the
``center range``. We randomly choose center from the ``center range``
to avoid our random center is too close to original image's border.
Also ``center range`` should be larger than 0.
Args:
border (int): The initial border, default is 128.
size (int): The width or height of original image.
Returns:
int: The final border.
"""
k = 2 * border / size
i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
return border // i
def _filter_boxes(self, patch, boxes):
"""Check whether the center of each box is in the patch.
Args:
patch (list[int]): The cropped area, [left, top, right, bottom].
boxes (numpy array, (N x 4)): Ground truth boxes.
Returns:
mask (numpy array, (N,)): Each box is inside or outside the patch.
"""
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
return mask
def _crop_image_and_paste(self, image, center, size):
"""Crop image with a given center and size, then paste the cropped
image to a blank image with two centers align.
This function is equivalent to generating a blank image with ``size``
as its shape. Then cover it on the original image with two centers (
the center of blank image and the random center of original image)
aligned. The overlap area is paste from the original image and the
outside area is filled with ``mean pixel``.
Args:
image (np array, H x W x C): Original image.
center (list[int]): Target crop center coord.
size (list[int]): Target crop size. [target_h, target_w]
Returns:
cropped_img (np array, target_h x target_w x C): Cropped image.
border (np array, 4): The distance of four border of
``cropped_img`` to the original image area, [top, bottom,
left, right]
patch (list[int]): The cropped area, [left, top, right, bottom].
"""
center_y, center_x = center
target_h, target_w = size
img_h, img_w, img_c = image.shape
x0 = max(0, center_x - target_w // 2)
x1 = min(center_x + target_w // 2, img_w)
y0 = max(0, center_y - target_h // 2)
y1 = min(center_y + target_h // 2, img_h)
patch = np.array((int(x0), int(y0), int(x1), int(y1)))
left, right = center_x - x0, x1 - center_x
top, bottom = center_y - y0, y1 - center_y
cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
for i in range(img_c):
cropped_img[:, :, i] += self.mean[i]
y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
x_slice = slice(cropped_center_x - left, cropped_center_x + right)
cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
border = np.array([
cropped_center_y - top, cropped_center_y + bottom,
cropped_center_x - left, cropped_center_x + right
],
dtype=np.float32)
return cropped_img, border, patch
def _train_aug(self, results):
"""Random crop and around padding the original image.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
boxes = results['gt_bboxes']
while True:
scale = random.choice(self.ratios)
new_h = int(self.crop_size[0] * scale)
new_w = int(self.crop_size[1] * scale)
h_border = self._get_border(self.border, h)
w_border = self._get_border(self.border, w)
for i in range(50):
center_x = random.randint(low=w_border, high=w - w_border)
center_y = random.randint(low=h_border, high=h - h_border)
cropped_img, border, patch = self._crop_image_and_paste(
img, [center_y, center_x], [new_h, new_w])
mask = self._filter_boxes(patch, boxes)
# if image do not have valid bbox, any crop patch is valid.
if not mask.any() and len(boxes) > 0:
continue
results['img'] = cropped_img
results['img_shape'] = cropped_img.shape
results['pad_shape'] = cropped_img.shape
x0, y0, x1, y1 = patch
left_w, top_h = center_x - x0, center_y - y0
cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
mask = self._filter_boxes(patch, results[key])
bboxes = results[key][mask]
bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
if self.bbox_clip_border:
bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
keep = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
bboxes = bboxes[keep]
results[key] = bboxes
if key in ['gt_bboxes']:
if 'gt_labels' in results:
labels = results['gt_labels'][mask]
labels = labels[keep]
results['gt_labels'] = labels
if 'gt_masks' in results:
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
# crop semantic seg
for key in results.get('seg_fields', []):
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
return results
def _test_aug(self, results):
"""Around padding the original image without cropping.
The padding mode and value are from ``test_pad_mode``.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
results['img_shape'] = img.shape
if self.test_pad_mode[0] in ['logical_or']:
# self.test_pad_add_pix is only used for centernet
target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix
target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix
elif self.test_pad_mode[0] in ['size_divisor']:
divisor = self.test_pad_mode[1]
target_h = int(np.ceil(h / divisor)) * divisor
target_w = int(np.ceil(w / divisor)) * divisor
else:
raise NotImplementedError(
'RandomCenterCropPad only support two testing pad mode:'
'logical-or and size_divisor.')
cropped_img, border, _ = self._crop_image_and_paste(
img, [h // 2, w // 2], [target_h, target_w])
results['img'] = cropped_img
results['pad_shape'] = cropped_img.shape
results['border'] = border
return results
def __call__(self, results):
img = results['img']
assert img.dtype == np.float32, (
'RandomCenterCropPad needs the input image of dtype np.float32,'
' please set "to_float32=True" in "LoadImageFromFile" pipeline')
h, w, c = img.shape
assert c == len(self.mean)
if self.test_mode:
return self._test_aug(results)
else:
return self._train_aug(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'ratios={self.ratios}, '
repr_str += f'border={self.border}, '
repr_str += f'mean={self.input_mean}, '
repr_str += f'std={self.input_std}, '
repr_str += f'to_rgb={self.to_rgb}, '
repr_str += f'test_mode={self.test_mode}, '
repr_str += f'test_pad_mode={self.test_pad_mode}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class CutOut:
"""CutOut operation.
Randomly drop some regions of image used in
`Cutout <https://arxiv.org/abs/1708.04552>`_.
Args:
n_holes (int | tuple[int, int]): Number of regions to be dropped.
If it is given as a list, number of holes will be randomly
selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
shape of dropped regions. It can be `tuple[int, int]` to use a
fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
shape from the list.
cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
candidate ratio of dropped regions. It can be `tuple[float, float]`
to use a fixed ratio or `list[tuple[float, float]]` to randomly
choose ratio from the list. Please note that `cutout_shape`
and `cutout_ratio` cannot be both given at the same time.
fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
of pixel to fill in the dropped regions. Default: (0, 0, 0).
"""
def __init__(self,
n_holes,
cutout_shape=None,
cutout_ratio=None,
fill_in=(0, 0, 0)):
assert (cutout_shape is None) ^ (cutout_ratio is None), \
'Either cutout_shape or cutout_ratio should be specified.'
assert (isinstance(cutout_shape, (list, tuple))
or isinstance(cutout_ratio, (list, tuple)))
if isinstance(n_holes, tuple):
assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
else:
n_holes = (n_holes, n_holes)
self.n_holes = n_holes
self.fill_in = fill_in
self.with_ratio = cutout_ratio is not None
self.candidates = cutout_ratio if self.with_ratio else cutout_shape
if not isinstance(self.candidates, list):
self.candidates = [self.candidates]
def __call__(self, results):
"""Call function to drop some regions of image."""
h, w, c = results['img'].shape
n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
for _ in range(n_holes):
x1 = np.random.randint(0, w)
y1 = np.random.randint(0, h)
index = np.random.randint(0, len(self.candidates))
if not self.with_ratio:
cutout_w, cutout_h = self.candidates[index]
else:
cutout_w = int(self.candidates[index][0] * w)
cutout_h = int(self.candidates[index][1] * h)
x2 = np.clip(x1 + cutout_w, 0, w)
y2 = np.clip(y1 + cutout_h, 0, h)
results['img'][y1:y2, x1:x2, :] = self.fill_in
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(n_holes={self.n_holes}, '
repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
else f'cutout_shape={self.candidates}, ')
repr_str += f'fill_in={self.fill_in})'
return repr_str
| 1 | 24,706 | should we rm line 269? | open-mmlab-mmdetection | py |
@@ -68,7 +68,9 @@ class NVDASpyLib:
# callbacks for extension points
def _onNvdaStartupComplete(self):
- self._isNvdaStartupComplete = True
+ # Queue the setting of the completion variable,
+ # To ensure that NvDA's core loop has started running, and it has processed initial focus.
+ queueHandler.queueFunction(queueHandler.eventQueue, lambda: setattr(self, '_isNvdaStartupComplete', True))
def _onNvdaSpeech(self, speechSequence=None):
if not speechSequence: | 1 | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2018 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""This module provides an NVDA global plugin which creates a and robot library remote server.
It allows tests to get information out of NVDA.
It is copied into the (system test specific) NVDA profile directory. It becomes the '__init__.py' file as part
of a package.
"""
import typing
from typing import Optional
import globalPluginHandler
import threading
from .blockUntilConditionMet import _blockUntilConditionMet
from logHandler import log
from time import perf_counter as _timer
from keyboardHandler import KeyboardInputGesture
import inputCore
import queueHandler
import watchdog
import sys
import os
def _importRobotRemoteServer() -> typing.Type:
log.debug(f"before path mod: {sys.path}")
# Get the path to the top of the package
TOP_DIR = os.path.abspath(os.path.dirname(__file__))
# imports that require libraries not distributed with an install of NVDA
sys.path.append(os.path.join(TOP_DIR, "libs"))
log.debug(f"after path mod: {sys.path}")
from robotremoteserver import RobotRemoteServer
return RobotRemoteServer
class NVDASpyLib:
""" Robot Framework Library to spy on NVDA during system tests.
Used to determine if NVDA has finished starting, and various ways of getting speech output.
All public methods are part of the Robot Library
"""
SPEECH_HAS_FINISHED_SECONDS: float = 0.5
def __init__(self):
# speech cache is ordered temporally, oldest at low indexes, most recent at highest index.
self._nvdaSpeech_requiresLock = [ # requires thread locking before read/write
[""], # initialise with an empty string, this allows for access via [-1]. This is equiv to no speech.
]
self._lastSpeechTime_requiresLock = _timer()
#: Lock to protect members written in _onNvdaSpeech.
self._speechLock = threading.RLock()
self._isNvdaStartupComplete = False
self._allSpeechStartIndex = self.get_last_speech_index()
self._maxKeywordDuration = 30
self._registerWithExtensionPoints()
def _registerWithExtensionPoints(self):
from core import postNvdaStartup
postNvdaStartup.register(self._onNvdaStartupComplete)
# This file (`speechSpyGlobalPlugin.py`) is moved to
# "scratchpad/globalPlugins/speechSpyGlobalPlugin/__init__.py"
# Import path must be valid after `speechSpySynthDriver.py` is moved to "scratchpad/synthDrivers/"
from synthDrivers.speechSpySynthDriver import post_speech
post_speech.register(self._onNvdaSpeech)
# callbacks for extension points
def _onNvdaStartupComplete(self):
self._isNvdaStartupComplete = True
def _onNvdaSpeech(self, speechSequence=None):
if not speechSequence:
return
with self._speechLock:
self._lastSpeechTime_requiresLock = _timer()
self._nvdaSpeech_requiresLock.append(speechSequence)
@staticmethod
def _getJoinedBaseStringsFromCommands(speechCommandArray) -> str:
baseStrings = [c for c in speechCommandArray if isinstance(c, str)]
return ''.join(baseStrings).strip()
def _getSpeechAtIndex(self, speechIndex):
with self._speechLock:
return self._getJoinedBaseStringsFromCommands(self._nvdaSpeech_requiresLock[speechIndex])
def get_speech_at_index_until_now(self, speechIndex: int) -> str:
""" All speech from (and including) the index until now.
@param speechIndex:
@return: The speech joined together, see L{_getJoinedBaseStringsFromCommands}
"""
with self._speechLock:
speechCommands = [
self._getJoinedBaseStringsFromCommands(x) for x in self._nvdaSpeech_requiresLock[speechIndex:]
]
return "\n".join(x for x in speechCommands if x and not x.isspace())
def get_last_speech_index(self) -> int:
with self._speechLock:
return len(self._nvdaSpeech_requiresLock) - 1
def _getIndexOfSpeech(self, speech, searchAfterIndex: Optional[int] = None):
if searchAfterIndex is None:
firstIndexToCheck = 0
else:
firstIndexToCheck = 1 + searchAfterIndex
with self._speechLock:
for index, commands in enumerate(self._nvdaSpeech_requiresLock[firstIndexToCheck:]):
index = index + firstIndexToCheck
baseStrings = [c.strip() for c in commands if isinstance(c, str)]
if any(speech in x for x in baseStrings):
return index
return -1
def _hasSpeechFinished(self, speechStartedIndex: Optional[int] = None):
with self._speechLock:
started = speechStartedIndex is None or speechStartedIndex < self.get_next_speech_index()
finished = self.SPEECH_HAS_FINISHED_SECONDS < _timer() - self._lastSpeechTime_requiresLock
return started and finished
def _devInfoToLog(self):
import api
obj = api.getNavigatorObject()
if hasattr(obj, "devInfo"):
log.info("Developer info for navigator object:\n%s" % "\n".join(obj.devInfo))
else:
log.info("No developer info for navigator object")
def dump_speech_to_log(self):
log.debug("dump_speech_to_log.")
with self._speechLock:
try:
self._devInfoToLog()
except Exception:
log.error("Unable to log dev info")
try:
log.debug(f"All speech:\n{repr(self._nvdaSpeech_requiresLock)}")
except Exception:
log.error("Unable to log speech")
def _minTimeout(self, timeout: float) -> float:
"""Helper to get the minimum value, the timeout passed in, or self._maxKeywordDuration"""
return min(timeout, self._maxKeywordDuration)
def init_max_keyword_duration(self, maxSeconds: float):
"""This should only be called once, immediately after importing the library.
@param maxSeconds: Should match the 'timeout' value given to the `robot.libraries.Remote` instance. If
this value is greater than the value for the `robot.libraries.Remote` instance it may mean that the test
is failed, and NVDA is never exited, requiring manual intervention.
Should be set to a large value like '30' (seconds).
"""
self._maxKeywordDuration = maxSeconds - 1
def wait_for_NVDA_startup_to_complete(self):
_blockUntilConditionMet(
getValue=lambda: self._isNvdaStartupComplete,
giveUpAfterSeconds=self._minTimeout(10),
errorMessage="Unable to connect to nvdaSpy",
)
if self._isNvdaStartupComplete:
self.reset_all_speech_index()
def get_last_speech(self) -> str:
return self._getSpeechAtIndex(-1)
def get_all_speech(self) -> str:
return self.get_speech_at_index_until_now(self._allSpeechStartIndex)
def reset_all_speech_index(self) -> int:
self._allSpeechStartIndex = self.get_last_speech_index()
return self._allSpeechStartIndex
def get_next_speech_index(self) -> int:
""" @return: the next index that will be used.
"""
return self.get_last_speech_index() + 1
def wait_for_specific_speech(
self,
speech: str,
afterIndex: Optional[int] = None,
maxWaitSeconds: int = 5,
) -> int:
"""
@param speech: The speech to expect.
@param afterIndex: The speech should come after this index. The index is exclusive.
@param maxWaitSeconds: The amount of time to wait in seconds.
@return: the index of the speech.
"""
success, speechIndex = _blockUntilConditionMet(
getValue=lambda: self._getIndexOfSpeech(speech, afterIndex),
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
shouldStopEvaluator=lambda indexFound: indexFound >= (afterIndex if afterIndex else 0),
intervalBetweenSeconds=0.1,
errorMessage=None
)
if not success:
self.dump_speech_to_log()
raise AssertionError(
"Specific speech did not occur before timeout: {}\n"
"See NVDA log for dump of all speech.".format(speech)
)
return speechIndex
def wait_for_speech_to_finish(
self,
maxWaitSeconds=5.0,
speechStartedIndex: Optional[int] = None
):
_blockUntilConditionMet(
getValue=lambda: self._hasSpeechFinished(speechStartedIndex=speechStartedIndex),
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
errorMessage="Speech did not finish before timeout"
)
def emulateKeyPress(self, kbIdentifier: str, blockUntilProcessed=True):
"""
Emulates a key press using NVDA's input gesture framework.
The key press will either result in a script being executed, or the key being sent on to the OS.
By default this method will block until any script resulting from this key has been executed,
and the NVDA core has again gone back to sleep.
@param kbIdentifier: an NVDA keyboard gesture identifier.
0 or more modifier keys followed by a main key, all separated by a plus (+) symbol.
E.g. control+shift+downArrow.
See vkCodes.py in the NVDA source directory for valid key names.
"""
gesture = KeyboardInputGesture.fromName(kbIdentifier)
inputCore.manager.emulateGesture(gesture)
if blockUntilProcessed:
# Emulating may have queued a script or events.
# Insert our own function into the queue after, and wait for that to be also executed.
queueProcessed = set()
def _setQueueProcessed():
nonlocal queueProcessed
queueProcessed = True
queueHandler.queueFunction(queueHandler.eventQueue, _setQueueProcessed)
_blockUntilConditionMet(
getValue=lambda: queueProcessed,
giveUpAfterSeconds=self._minTimeout(5),
errorMessage="Timed out waiting for key to be processed",
)
# We know that by now the core will have woken up and processed the scripts, events and our own function.
# Wait for the core to go to sleep,
# Which means there is no more things the core is currently processing.
_blockUntilConditionMet(
getValue=lambda: watchdog.isCoreAsleep(),
giveUpAfterSeconds=self._minTimeout(5),
errorMessage="Timed out waiting for core to sleep again",
)
class SystemTestSpyServer(globalPluginHandler.GlobalPlugin):
def __init__(self):
super().__init__()
self._server = None
self._start()
def _start(self):
log.debug("SystemTestSpyServer started")
spyLibrary = NVDASpyLib() # spies on NVDA
RobotRemoteServer = _importRobotRemoteServer()
server = self._server = RobotRemoteServer(
spyLibrary, # provides library behaviour
port=8270, # default:8270 is `registered by IANA` for remote server usage. Two ASCII values, RF.
serve=False # we want to start this serving on another thread so as not to block.
)
log.debug("Server address: {}".format(server.server_address))
server_thread = threading.Thread(target=server.serve, name="RF Test Spy Thread")
server_thread.start()
def terminate(self):
log.debug("Terminating the SystemTestSpyServer")
self._server.stop()
GlobalPlugin = SystemTestSpyServer
GlobalPlugin.__gestures = {
}
| 1 | 31,910 | Perhaps the code at `source/core.py:564: postNvdaStartup.notify()` should be queued instead? If we are saying the loop must have started before NVDA's startup is complete, then the `postNvdaStartup` action is incorrect. | nvaccess-nvda | py |
@@ -25,6 +25,13 @@ FactoryGirl.define do
cart.save!
end
+ factory :cart_with_all_approvals_approved do
+ after :create do |cart|
+ cart.approvals.each {|a| a.update_attribute :status, 'approved'}
+ cart.update_attribute :status, 'approved'
+ end
+ end
+
factory :cart_with_approvals_and_items do
after :create do |cart|
cart.cart_items << FactoryGirl.create(:cart_item, cart_id: cart.id) | 1 | FactoryGirl.define do
factory :cart do
flow 'parallel'
name 'Test Cart needing approval'
status 'pending'
factory :cart_with_approval_group do
after :create do |cart|
approval_group = FactoryGirl.create(:approval_group_with_approver_and_requester_approvals)
cart.approval_group = approval_group
cart.save!
end
end
factory :cart_with_approvals do
after :create do |cart|
approver1 = FactoryGirl.create(:user, email_address: '[email protected]', first_name: 'Liono', last_name: 'Approver1')
approver2 = FactoryGirl.create(:user, email_address: '[email protected]', first_name: 'Liono', last_name: 'Approver2')
requester = FactoryGirl.create(:user, email_address: '[email protected]', first_name: 'Liono', last_name: 'Requester')
cart.approvals << FactoryGirl.create(:approval, role: 'approver', user_id: approver1.id)
cart.approvals << FactoryGirl.create(:approval, role: 'approver', user_id: approver2.id)
cart.approvals << FactoryGirl.create(:approval, role: 'requester', user_id: requester.id)
cart.save!
end
factory :cart_with_approvals_and_items do
after :create do |cart|
cart.cart_items << FactoryGirl.create(:cart_item, cart_id: cart.id)
cart.cart_items << FactoryGirl.create(:cart_item, cart_id: cart.id,vendor: "Spud Vendor")
cart.cart_items[0].cart_item_traits << FactoryGirl.create(:cart_item_trait,name: 'socio',value: "W")
cart.cart_items[0].cart_item_traits << FactoryGirl.create(:cart_item_trait,name: 'socio',value: "S")
cart.cart_items[0].cart_item_traits << FactoryGirl.create(:cart_item_trait,name: 'socio',value: "O")
cart.cart_items[0].cart_item_traits << FactoryGirl.create(:cart_item_trait,name: 'features',value: "discount")
cart.cart_items[0].cart_item_traits << FactoryGirl.create(:cart_item_trait,name: 'features',value: "feature2")
cart.cart_items[0].cart_item_traits << FactoryGirl.create(:cart_item_trait,name: 'green',value: 'blah')
cart.save!
end
end
end
factory :cart_with_observers do
after :create do |cart|
#TODO: change approval_group to use a factory that adds observers
approval_group = FactoryGirl.create(:approval_group_with_approvers_observers_and_requester)
cart.approval_group = approval_group
cart.save!
end
end
end
end
| 1 | 12,177 | Needed this for testing locally with mail view but not used for any tests. Might be nice to have some seed scripts at some point. | 18F-C2 | rb |
@@ -291,9 +291,13 @@ func (o *Outbound) callWithPeer(
if response.StatusCode >= 200 && response.StatusCode < 300 {
appHeaders := applicationHeaders.FromHTTPHeaders(
response.Header, transport.NewHeaders())
- appError := response.Header.Get(ApplicationStatusHeader) == ApplicationErrorStatus
+ appError := fromApplicationStatusValue(response.Header.Get(ApplicationStatusHeader))
+ acceptResponseError := fromAcceptValue(response.Header.Get(AcceptResponseErrorHeader))
return &transport.Response{
- Headers: appHeaders,
+ Headers: appHeaders,
+ Features: transport.ResponseFeatures{
+ AcceptResponseError: acceptResponseError,
+ },
Body: response.Body,
ApplicationError: appError,
}, nil | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal/introspection"
peerchooser "go.uber.org/yarpc/peer"
"go.uber.org/yarpc/peer/hostport"
"go.uber.org/yarpc/pkg/lifecycle"
"go.uber.org/yarpc/yarpcerrors"
)
// this ensures the HTTP outbound implements both transport.Outbound interfaces
var (
_ transport.UnaryOutbound = (*Outbound)(nil)
_ transport.OnewayOutbound = (*Outbound)(nil)
_ introspection.IntrospectableOutbound = (*Outbound)(nil)
)
var defaultURLTemplate, _ = url.Parse("http://localhost")
// OutboundOption customizes an HTTP Outbound.
type OutboundOption func(*Outbound)
func (OutboundOption) httpOption() {}
// URLTemplate specifies the URL this outbound makes requests to. For
// peer.Chooser-based outbounds, the peer (host:port) spection of the URL may
// vary from call to call but the rest will remain unchanged. For single-peer
// outbounds, the URL will be used as-is.
func URLTemplate(template string) OutboundOption {
return func(o *Outbound) {
o.setURLTemplate(template)
}
}
// AddHeader specifies that an HTTP outbound should always include the given
// header in outgoung requests.
//
// httpTransport.NewOutbound(chooser, http.AddHeader("X-Token", "TOKEN"))
//
// Note that headers starting with "Rpc-" are reserved by YARPC. This function
// will panic if the header starts with "Rpc-".
func AddHeader(key, value string) OutboundOption {
if strings.HasPrefix(strings.ToLower(key), "rpc-") {
panic(fmt.Errorf(
"invalid header name %q: "+
`headers starting with "Rpc-" are reserved by YARPC`, key))
}
return func(o *Outbound) {
if o.headers == nil {
o.headers = make(http.Header)
}
o.headers.Add(key, value)
}
}
// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// The peer chooser and outbound must share the same transport, in this case
// the HTTP transport.
// The peer chooser must use the transport's RetainPeer to obtain peer
// instances and return those peers to the outbound when it calls Choose.
// The concrete peer type is private and intrinsic to the HTTP transport.
func (t *Transport) NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
o := &Outbound{
once: lifecycle.NewOnce(),
chooser: chooser,
urlTemplate: defaultURLTemplate,
tracer: t.tracer,
transport: t,
}
for _, opt := range opts {
opt(o)
}
return o
}
// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// The peer chooser and outbound must share the same transport, in this case
// the HTTP transport.
// The peer chooser must use the transport's RetainPeer to obtain peer
// instances and return those peers to the outbound when it calls Choose.
// The concrete peer type is private and intrinsic to the HTTP transport.
func NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
return NewTransport().NewOutbound(chooser, opts...)
}
// NewSingleOutbound builds an outbound that sends YARPC requests over HTTP
// to the specified URL.
//
// The URLTemplate option has no effect in this form.
func (t *Transport) NewSingleOutbound(uri string, opts ...OutboundOption) *Outbound {
parsedURL, err := url.Parse(uri)
if err != nil {
panic(err.Error())
}
chooser := peerchooser.NewSingle(hostport.PeerIdentifier(parsedURL.Host), t)
o := t.NewOutbound(chooser)
for _, opt := range opts {
opt(o)
}
o.setURLTemplate(uri)
return o
}
// Outbound sends YARPC requests over HTTP. It may be constructed using the
// NewOutbound function or the NewOutbound or NewSingleOutbound methods on the
// HTTP Transport. It is recommended that services use a single HTTP transport
// to construct all HTTP outbounds, ensuring efficient sharing of resources
// across the different outbounds.
type Outbound struct {
chooser peer.Chooser
urlTemplate *url.URL
tracer opentracing.Tracer
transport *Transport
// Headers to add to all outgoing requests.
headers http.Header
once *lifecycle.Once
}
// setURLTemplate configures an alternate URL template.
// The host:port portion of the URL template gets replaced by the chosen peer's
// identifier for each outbound request.
func (o *Outbound) setURLTemplate(URL string) {
parsedURL, err := url.Parse(URL)
if err != nil {
log.Fatalf("failed to configure HTTP outbound: invalid URL template %q: %s", URL, err)
}
o.urlTemplate = parsedURL
}
// Transports returns the outbound's HTTP transport.
func (o *Outbound) Transports() []transport.Transport {
return []transport.Transport{o.transport}
}
// Chooser returns the outbound's peer chooser.
func (o *Outbound) Chooser() peer.Chooser {
return o.chooser
}
// Start the HTTP outbound
func (o *Outbound) Start() error {
return o.once.Start(o.chooser.Start)
}
// Stop the HTTP outbound
func (o *Outbound) Stop() error {
return o.once.Stop(o.chooser.Stop)
}
// IsRunning returns whether the Outbound is running.
func (o *Outbound) IsRunning() bool {
return o.once.IsRunning()
}
// Call makes a HTTP request
func (o *Outbound) Call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
if err := o.once.WaitUntilRunning(ctx); err != nil {
return nil, err
}
start := time.Now()
deadline, _ := ctx.Deadline()
ttl := deadline.Sub(start)
return o.call(ctx, treq, start, ttl)
}
// CallOneway makes a oneway request
func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (transport.Ack, error) {
if err := o.once.WaitUntilRunning(ctx); err != nil {
return nil, err
}
start := time.Now()
var ttl time.Duration
_, err := o.call(ctx, treq, start, ttl)
if err != nil {
return nil, err
}
return time.Now(), nil
}
func (o *Outbound) call(ctx context.Context, treq *transport.Request, start time.Time, ttl time.Duration) (*transport.Response, error) {
p, onFinish, err := o.getPeerForRequest(ctx, treq)
if err != nil {
return nil, err
}
resp, err := o.callWithPeer(ctx, treq, start, ttl, p)
// Call the onFinish method right before returning (with the error from call with peer)
onFinish(err)
return resp, err
}
func (o *Outbound) callWithPeer(
ctx context.Context,
treq *transport.Request,
start time.Time,
ttl time.Duration,
p *httpPeer,
) (*transport.Response, error) {
req, err := o.createRequest(p, treq)
if err != nil {
return nil, err
}
req.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil)
ctx, req, span, err := o.withOpentracingSpan(ctx, req, treq, start)
if err != nil {
return nil, err
}
defer span.Finish()
req = o.withCoreHeaders(req, treq, ttl)
response, err := p.transport.client.Do(req.WithContext(ctx))
if err != nil {
// Workaround borrowed from ctxhttp until
// https://github.com/golang/go/issues/17711 is resolved.
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
span.SetTag("error", true)
span.LogEvent(err.Error())
if err == context.DeadlineExceeded {
end := time.Now()
return nil, yarpcerrors.Newf(
yarpcerrors.CodeDeadlineExceeded,
"client timeout for procedure %q of service %q after %v",
treq.Procedure, treq.Service, end.Sub(start))
}
// Note that the connection may have been lost so the peer connection
// maintenance loop resumes probing for availability.
p.OnDisconnected()
return nil, yarpcerrors.Newf(yarpcerrors.CodeUnknown, "unknown error from http client: %s", err.Error())
}
span.SetTag("http.status_code", response.StatusCode)
if response.StatusCode >= 200 && response.StatusCode < 300 {
appHeaders := applicationHeaders.FromHTTPHeaders(
response.Header, transport.NewHeaders())
appError := response.Header.Get(ApplicationStatusHeader) == ApplicationErrorStatus
return &transport.Response{
Headers: appHeaders,
Body: response.Body,
ApplicationError: appError,
}, nil
}
return nil, getYARPCErrorFromResponse(response)
}
func (o *Outbound) getPeerForRequest(ctx context.Context, treq *transport.Request) (*httpPeer, func(error), error) {
p, onFinish, err := o.chooser.Choose(ctx, treq)
if err != nil {
return nil, nil, err
}
hpPeer, ok := p.(*httpPeer)
if !ok {
return nil, nil, peer.ErrInvalidPeerConversion{
Peer: p,
ExpectedType: "*httpPeer",
}
}
return hpPeer, onFinish, nil
}
func (o *Outbound) createRequest(p *httpPeer, treq *transport.Request) (*http.Request, error) {
newURL := *o.urlTemplate
newURL.Host = p.HostPort()
return http.NewRequest("POST", newURL.String(), treq.Body)
}
func (o *Outbound) withOpentracingSpan(ctx context.Context, req *http.Request, treq *transport.Request, start time.Time) (context.Context, *http.Request, opentracing.Span, error) {
// Apply HTTP Context headers for tracing and baggage carried by tracing.
tracer := o.tracer
var parent opentracing.SpanContext // ok to be nil
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
parent = parentSpan.Context()
}
span := tracer.StartSpan(
treq.Procedure,
opentracing.StartTime(start),
opentracing.ChildOf(parent),
opentracing.Tags{
"rpc.caller": treq.Caller,
"rpc.service": treq.Service,
"rpc.encoding": treq.Encoding,
"rpc.transport": "http",
},
)
ext.PeerService.Set(span, treq.Service)
ext.SpanKindRPCClient.Set(span)
ext.HTTPUrl.Set(span, req.URL.String())
ctx = opentracing.ContextWithSpan(ctx, span)
err := tracer.Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header),
)
return ctx, req, span, err
}
func (o *Outbound) withCoreHeaders(req *http.Request, treq *transport.Request, ttl time.Duration) *http.Request {
// Add default headers to all requests.
for k, vs := range o.headers {
for _, v := range vs {
req.Header.Add(k, v)
}
}
req.Header.Set(CallerHeader, treq.Caller)
req.Header.Set(ServiceHeader, treq.Service)
req.Header.Set(ProcedureHeader, treq.Procedure)
if ttl != 0 {
req.Header.Set(TTLMSHeader, fmt.Sprintf("%d", ttl/time.Millisecond))
}
if treq.ShardKey != "" {
req.Header.Set(ShardKeyHeader, treq.ShardKey)
}
if treq.RoutingKey != "" {
req.Header.Set(RoutingKeyHeader, treq.RoutingKey)
}
if treq.RoutingDelegate != "" {
req.Header.Set(RoutingDelegateHeader, treq.RoutingDelegate)
}
encoding := string(treq.Encoding)
if encoding != "" {
req.Header.Set(EncodingHeader, encoding)
}
return req
}
func getYARPCErrorFromResponse(response *http.Response) error {
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
if err := response.Body.Close(); err != nil {
return yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
// use the status code if we can't get a code from the headers
code := statusCodeToBestCode(response.StatusCode)
if errorCodeText := response.Header.Get(ErrorCodeHeader); errorCodeText != "" {
var errorCode yarpcerrors.Code
// TODO: what to do with error?
if err := errorCode.UnmarshalText([]byte(errorCodeText)); err == nil {
code = errorCode
}
}
return yarpcerrors.Newf(
code,
strings.TrimSuffix(string(contents), "\n"),
).WithName(response.Header.Get(ErrorNameHeader))
}
// Introspect returns basic status about this outbound.
func (o *Outbound) Introspect() introspection.OutboundStatus {
state := "Stopped"
if o.IsRunning() {
state = "Running"
}
var chooser introspection.ChooserStatus
if i, ok := o.chooser.(introspection.IntrospectableChooser); ok {
chooser = i.Introspect()
} else {
chooser = introspection.ChooserStatus{
Name: "Introspection not available",
}
}
return introspection.OutboundStatus{
Transport: "http",
Endpoint: o.urlTemplate.String(),
State: state,
Chooser: chooser,
}
}
| 1 | 15,423 | wait, if we only do this for successful status codes won't we miss a whole slew of responses? We should be checking this for all codes right? | yarpc-yarpc-go | go |
@@ -66,7 +66,9 @@ public class ProtoParserTest {
testDataLocator = TestDataLocator.create(GapicCodeGeneratorAnnotationsTest.class);
testDataLocator.addTestDataSource(CodegenTestUtil.class, "testsrc/common");
- model = CodegenTestUtil.readModel(testDataLocator, tempDir, protoFiles, new String[0]);
+ model =
+ CodegenTestUtil.readModel(
+ testDataLocator, tempDir, protoFiles, new String[] {"library.yaml"});
libraryProtoFile =
model | 1 | /* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.util;
import static com.google.common.truth.Truth.assertThat;
import com.google.api.MethodSignature;
import com.google.api.OperationData;
import com.google.api.Resource;
import com.google.api.ResourceSet;
import com.google.api.codegen.CodegenTestUtil;
import com.google.api.codegen.protoannotations.GapicCodeGeneratorAnnotationsTest;
import com.google.api.tools.framework.model.BoundedDiagCollector;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.MessageType;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.Model;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.testing.TestDataLocator;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
public class ProtoParserTest {
private static String[] protoFiles = {"library.proto"};
@ClassRule public static TemporaryFolder tempDir = new TemporaryFolder();
private static Model model;
private static TestDataLocator testDataLocator;
private static ProtoFile libraryProtoFile;
private static Field shelfNameField;
private static Interface libraryService;
private static Method deleteShelfMethod;
private static Method getBigBookMethod;
private static MessageType book;
private static MessageType shelf;
private static Map<Resource, ProtoFile> resourceDefs;
private static Map<ResourceSet, ProtoFile> resourceSetDefs;
private static final DiagCollector diagCollector = new BoundedDiagCollector();
// Object under test.
private static ProtoParser protoParser = new ProtoParser();
@BeforeClass
public static void startUp() {
// Load and parse protofile.
testDataLocator = TestDataLocator.create(GapicCodeGeneratorAnnotationsTest.class);
testDataLocator.addTestDataSource(CodegenTestUtil.class, "testsrc/common");
model = CodegenTestUtil.readModel(testDataLocator, tempDir, protoFiles, new String[0]);
libraryProtoFile =
model
.getFiles()
.stream()
.filter(f -> f.getSimpleName().equals("library.proto"))
.findFirst()
.get();
model.addRoot(libraryProtoFile);
libraryService = libraryProtoFile.getInterfaces().get(0);
shelf =
libraryProtoFile
.getMessages()
.stream()
.filter(m -> m.getSimpleName().equals("Shelf"))
.findFirst()
.get();
shelfNameField =
shelf.getFields().stream().filter(f -> f.getSimpleName().equals("name")).findFirst().get();
book =
libraryProtoFile
.getMessages()
.stream()
.filter(m -> m.getSimpleName().equals("Book"))
.findFirst()
.get();
shelfNameField =
shelf.getFields().stream().filter(f -> f.getSimpleName().equals("name")).findFirst().get();
libraryService = libraryProtoFile.getInterfaces().get(0);
deleteShelfMethod = libraryService.lookupMethod("DeleteShelf");
getBigBookMethod = libraryService.lookupMethod("GetBigBook");
resourceDefs = protoParser.getResourceDefs(Arrays.asList(libraryProtoFile), diagCollector);
resourceSetDefs =
protoParser.getResourceSetDefs(Arrays.asList(libraryProtoFile), diagCollector);
}
@Test
public void testGetResourcePath() {
Field shelfNameField =
shelf.getFields().stream().filter(f -> f.getSimpleName().equals("name")).findFirst().get();
assertThat(protoParser.getResource(shelfNameField).getPath()).isEqualTo("shelves/{shelf_id}");
}
@Test
public void testGetEmptyResource() {
MessageType book =
libraryProtoFile
.getMessages()
.stream()
.filter(m -> m.getSimpleName().equals("Book"))
.findFirst()
.get();
Field authorBookField =
book.getFields().stream().filter(f -> f.getSimpleName().equals("author")).findFirst().get();
assertThat(protoParser.getResource(authorBookField)).isNull();
}
/** Return the entity name, e.g. "shelf" for a resource field. */
@Test
public void testGetResourceEntityName() {
assertThat(protoParser.getResourceEntityName(shelfNameField)).isEqualTo("Shelf");
}
@Test
public void testGetResourceSet() {
Field bookNameField =
book.getFields().stream().filter(f -> f.getSimpleName().equals("name")).findFirst().get();
ResourceSet bookResourceSet = protoParser.getResourceSet(bookNameField);
assertThat(bookResourceSet).isNotNull();
assertThat(bookResourceSet.getName()).isEqualTo("BookOneOf");
assertThat(bookResourceSet.getResourcesCount()).isEqualTo(1);
assertThat(bookResourceSet.getResources(0))
.isEqualTo(Resource.newBuilder().setName("DeletedBook").setPath("_deleted-book_").build());
assertThat(bookResourceSet.getResourceReferencesList()).containsExactly("ArchivedBook", "Book");
}
@Test
public void testGetAllResourceDefs() {
// resourceDefs has already been computed in the setUp() method.
assertThat(resourceDefs).hasSize(4);
assertThat(resourceDefs)
.containsEntry(
Resource.newBuilder().setName("Shelf").setPath("shelves/{shelf_id}").build(),
libraryProtoFile);
assertThat(resourceDefs)
.containsEntry(
Resource.newBuilder().setName("Project").setPath("projects/{project}").build(),
libraryProtoFile);
assertThat(resourceDefs)
.containsEntry(
Resource.newBuilder()
.setName("Book")
.setPath("shelves/{shelf_id}/books/{book_id}")
.build(),
libraryProtoFile);
assertThat(resourceDefs)
.containsEntry(
Resource.newBuilder()
.setName("ArchivedBook")
.setPath("archives/{archive_path}/books/{book_id=**}")
.build(),
libraryProtoFile);
}
@Test
public void testGetAllResourceSetDefs() {
// resourceSetDefs has already been computed in the setUp() method.
assertThat(resourceSetDefs).hasSize(1);
assertThat(resourceSetDefs)
.containsEntry(
ResourceSet.newBuilder()
.setName("BookOneOf")
.addResources(
Resource.newBuilder().setName("DeletedBook").setPath("_deleted-book_"))
.addResourceReferences("ArchivedBook")
.addResourceReferences("Book")
.build(),
libraryProtoFile);
}
@Test
public void getResourceEntityName() {
assertThat(protoParser.getResourceEntityName(shelfNameField)).isEqualTo("Shelf");
}
@Test
public void testGetLongRunningOperation() {
OperationData operationTypes = protoParser.getLongRunningOperation(getBigBookMethod);
OperationData expected =
OperationData.newBuilder()
.setResponseType("google.example.library.v1.Book")
.setMetadataType("google.example.library.v1.GetBigBookMetadata")
.build();
assertThat(operationTypes).isEqualTo(expected);
}
@Test
public void testIsHttpGetMethod() {
assertThat(protoParser.isHttpGetMethod(deleteShelfMethod)).isFalse();
assertThat(protoParser.isHttpGetMethod(getBigBookMethod)).isTrue();
}
@Test
public void testGetServiceAddress() {
String defaultHost = protoParser.getServiceAddress(libraryService);
assertThat(defaultHost).isEqualTo("library-example.googleapis.com:1234");
}
@Test
public void testGetRequiredFields() {
Method publishSeriesMethod = libraryService.lookupMethod("PublishSeries");
List<String> requiredFields = protoParser.getRequiredFields(publishSeriesMethod);
assertThat(requiredFields).containsExactly("books", "series_uuid", "shelf");
}
@Test
public void testGetResourceType() {
MessageType getShelfRequest =
libraryProtoFile
.getMessages()
.stream()
.filter(m -> m.getSimpleName().equals("GetShelfRequest"))
.findFirst()
.get();
Field shelves =
getShelfRequest
.getFields()
.stream()
.filter(f -> f.getSimpleName().equals("name"))
.findFirst()
.get();
String shelfType = protoParser.getResourceReference(shelves);
assertThat(shelfType).isEqualTo("Shelf");
}
@Test
public void testGetMethodSignatures() {
Method getShelfMethod = libraryService.lookupMethod("GetShelf");
List<MethodSignature> getShelfFlattenings = protoParser.getMethodSignatures(getShelfMethod);
assertThat(getShelfFlattenings.size()).isEqualTo(3);
MethodSignature firstSignature = getShelfFlattenings.get(0);
assertThat(firstSignature.getFieldsList().size()).isEqualTo(1);
assertThat(firstSignature.getFieldsList().get(0)).isEqualTo("name");
MethodSignature additionalSignature = getShelfFlattenings.get(1);
assertThat(additionalSignature.getFieldsList().size()).isEqualTo(2);
assertThat(additionalSignature.getFieldsList().get(0)).isEqualTo("name");
assertThat(additionalSignature.getFieldsList().get(1)).isEqualTo("message");
MethodSignature additionalSignature2 = getShelfFlattenings.get(2);
assertThat(additionalSignature2.getFieldsList())
.containsExactly("name", "message", "string_builder");
}
@Test
public void testEmptySignature() {
// Test that we can detect empty method signatures.
Method listShelvesMethod = libraryService.lookupMethod("ListShelves");
List<MethodSignature> listShelvesFlattenings =
protoParser.getMethodSignatures(listShelvesMethod);
assertThat(listShelvesFlattenings.size()).isEqualTo(1);
MethodSignature emptySignature = listShelvesFlattenings.get(0);
assertThat(emptySignature.getFieldsList().size()).isEqualTo(0);
}
@Test
public void testNoSignature() {
// Test that we can detect the absence of method signatures.
Method streamShelvesMethod = libraryService.lookupMethod("StreamShelves");
List<MethodSignature> listShelvesFlattenings =
protoParser.getMethodSignatures(streamShelvesMethod);
assertThat(listShelvesFlattenings.size()).isEqualTo(0);
}
/** The OAuth scopes for this service (e.g. "https://cloud.google.com/auth/cloud-platform"). */
@Test
public void testGetAuthScopes() {
List<String> scopes = protoParser.getAuthScopes(libraryService);
assertThat(scopes)
.containsExactly(
"https://www.googleapis.com/auth/library",
"https://www.googleapis.com/auth/cloud-platform");
}
}
| 1 | 27,568 | Adding in the service yaml `library.yaml` here. | googleapis-gapic-generator | java |
@@ -63,7 +63,7 @@ func (a BasicAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error
// let upstream middleware (e.g. fastcgi and cgi) know about authenticated
// user; this replaces the request with a wrapped instance
- r = r.WithContext(context.WithValue(r.Context(),
+ *r = *r.WithContext(context.WithValue(r.Context(),
httpserver.RemoteUserCtxKey, username))
}
} | 1 | // Package basicauth implements HTTP Basic Authentication for Caddy.
//
// This is useful for simple protections on a website, like requiring
// a password to access an admin interface. This package assumes a
// fairly small threat model.
package basicauth
import (
"bufio"
"context"
"crypto/sha1"
"crypto/subtle"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"github.com/jimstudt/http-authentication/basic"
"github.com/mholt/caddy/caddyhttp/httpserver"
)
// BasicAuth is middleware to protect resources with a username and password.
// Note that HTTP Basic Authentication is not secure by itself and should
// not be used to protect important assets without HTTPS. Even then, the
// security of HTTP Basic Auth is disputed. Use discretion when deciding
// what to protect with BasicAuth.
type BasicAuth struct {
Next httpserver.Handler
SiteRoot string
Rules []Rule
}
// ServeHTTP implements the httpserver.Handler interface.
func (a BasicAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
var protected, isAuthenticated bool
var realm string
for _, rule := range a.Rules {
for _, res := range rule.Resources {
if !httpserver.Path(r.URL.Path).Matches(res) {
continue
}
// path matches; this endpoint is protected
protected = true
realm = rule.Realm
// parse auth header
username, password, ok := r.BasicAuth()
// check credentials
if !ok ||
username != rule.Username ||
!rule.Password(password) {
continue
}
// by this point, authentication was successful
isAuthenticated = true
// let upstream middleware (e.g. fastcgi and cgi) know about authenticated
// user; this replaces the request with a wrapped instance
r = r.WithContext(context.WithValue(r.Context(),
httpserver.RemoteUserCtxKey, username))
}
}
if protected && !isAuthenticated {
// browsers show a message that says something like:
// "The website says: <realm>"
// which is kinda dumb, but whatever.
if realm == "" {
realm = "Restricted"
}
w.Header().Set("WWW-Authenticate", "Basic realm=\""+realm+"\"")
return http.StatusUnauthorized, nil
}
// Pass-through when no paths match
return a.Next.ServeHTTP(w, r)
}
// Rule represents a BasicAuth rule. A username and password
// combination protect the associated resources, which are
// file or directory paths.
type Rule struct {
Username string
Password func(string) bool
Resources []string
Realm string // See RFC 1945 and RFC 2617, default: "Restricted"
}
// PasswordMatcher determines whether a password matches a rule.
type PasswordMatcher func(pw string) bool
var (
htpasswords map[string]map[string]PasswordMatcher
htpasswordsMu sync.Mutex
)
// GetHtpasswdMatcher matches password rules.
func GetHtpasswdMatcher(filename, username, siteRoot string) (PasswordMatcher, error) {
filename = filepath.Join(siteRoot, filename)
htpasswordsMu.Lock()
if htpasswords == nil {
htpasswords = make(map[string]map[string]PasswordMatcher)
}
pm := htpasswords[filename]
if pm == nil {
fh, err := os.Open(filename)
if err != nil {
return nil, fmt.Errorf("open %q: %v", filename, err)
}
defer fh.Close()
pm = make(map[string]PasswordMatcher)
if err = parseHtpasswd(pm, fh); err != nil {
return nil, fmt.Errorf("parsing htpasswd %q: %v", fh.Name(), err)
}
htpasswords[filename] = pm
}
htpasswordsMu.Unlock()
if pm[username] == nil {
return nil, fmt.Errorf("username %q not found in %q", username, filename)
}
return pm[username], nil
}
func parseHtpasswd(pm map[string]PasswordMatcher, r io.Reader) error {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.IndexByte(line, '#') == 0 {
continue
}
i := strings.IndexByte(line, ':')
if i <= 0 {
return fmt.Errorf("malformed line, no color: %q", line)
}
user, encoded := line[:i], line[i+1:]
for _, p := range basic.DefaultSystems {
matcher, err := p(encoded)
if err != nil {
return err
}
if matcher != nil {
pm[user] = matcher.MatchesPassword
break
}
}
}
return scanner.Err()
}
// PlainMatcher returns a PasswordMatcher that does a constant-time
// byte comparison against the password passw.
func PlainMatcher(passw string) PasswordMatcher {
// compare hashes of equal length instead of actual password
// to avoid leaking password length
passwHash := sha1.New()
passwHash.Write([]byte(passw))
passwSum := passwHash.Sum(nil)
return func(pw string) bool {
pwHash := sha1.New()
pwHash.Write([]byte(pw))
pwSum := pwHash.Sum(nil)
return subtle.ConstantTimeCompare([]byte(pwSum), []byte(passwSum)) == 1
}
}
| 1 | 10,408 | Why is this dereference needed? | caddyserver-caddy | go |
@@ -36,7 +36,8 @@ import (
// * `tlfNode` allows auto-creation of subdirectories representing
// valid repository checkouts of the corresponding TLF, e.g.
// `.kbfs_autogit/private/chris/dotfiles`. It wraps child nodes in
-// two ways, as both a `readonlyNode` and a `repoNode`.
+// two ways, as both a `readonlyNode` and a `repoNode`. It allows
+// repo directories to be removed via `RemoveDir`.
// * `repoNode` allow auto-clone and auto-pull of the corresponding
// repository on its first access. When the directory corresponding
// to the node is read for the first time for this KBFS instance, | 1 | // Copyright 2018 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libgit
import (
"context"
"path"
"sync"
"time"
"github.com/keybase/kbfs/libkbfs"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
)
// This file contains libkbfs.Node wrappers for implementing the
// .kbfs_autogit directory structure. It breaks down like this:
//
// * `rootWrapper.wrap()` is installed as a root node wrapper, and wraps
// the root node for each TLF in a `rootNode` instance.
// * `rootNode` allows .kbfs_autogit to be auto-created when it is
// looked up, and wraps it two ways, as both a `readonlyNode`, and
// an `autogitRootNode`.
// * `readonlyNode` is always marked as read-only, unless
// `ctxReadWriteKey` has a non-nil value in the context.
// * `autogitRootNode` allows the auto-creation of subdirectories
// representing TLF types, e.g. .kbfs_autogit/private or
// .kbfs_autogit/public. It wraps child nodes two ways, as both a
// `readonlyNode`, and an `tlfTypeNode`.
// * `tlfTypeNode` allows the auto-creation of subdirectories
// representing TLFs, e.g. .kbfs_autogit/private/max or
// .kbfs_autogit/team/keybase. It wraps child nodes in two ways, as
// a `readOnlyNode` and a `tlfNode`.
// * `tlfNode` allows auto-creation of subdirectories representing
// valid repository checkouts of the corresponding TLF, e.g.
// `.kbfs_autogit/private/chris/dotfiles`. It wraps child nodes in
// two ways, as both a `readonlyNode` and a `repoNode`.
// * `repoNode` allow auto-clone and auto-pull of the corresponding
// repository on its first access. When the directory corresponding
// to the node is read for the first time for this KBFS instance,
// the `repoNode` asks `AutogitManager` to either kick off a clone
// or a pull for the repository in question, which will checkout a
// copy of the source repo under that directory. The operation will
// block on that clone/pull operation until it is finished, until
// the given context is canceled, or until 10 seconds is up. But if
// it doesn't finish in time, the operation continues in the
// background and should update the directory asynchronously. It
// registers with the `AutogitManager` in order to keep the checkout
// up-to-date asynchronously if the repo changes. If the operation
// is a clone, a "CLONING" file will be visible in the directory
// until the clone completes. `repoNode` wraps each child node as a
// `readonlyNode`.
type ctxReadWriteKeyType int
type ctxSkipPopulateKeyType int
const (
autogitRoot = ".kbfs_autogit"
populateTimeout = 10 * time.Second
ctxReadWriteKey ctxReadWriteKeyType = 1
ctxSkipPopulateKey ctxSkipPopulateKeyType = 1
public = "public"
private = "private"
team = "team"
)
type repoNode struct {
libkbfs.Node
am *AutogitManager
srcRepoHandle *libkbfs.TlfHandle
repoName string
lock sync.Mutex
populated bool
populatingInProgress chan struct{}
}
var _ libkbfs.Node = (*repoNode)(nil)
func newRepoNode(
n libkbfs.Node, am *AutogitManager, srcRepoHandle *libkbfs.TlfHandle,
repoName string) *repoNode {
rn := &repoNode{
Node: n,
am: am,
srcRepoHandle: srcRepoHandle,
repoName: repoName,
}
// We can't rely on a particular repo node being passed back into
// libkbfs by callers, since they may not keep a reference to it
// after looking it up, and `WrapChild` makes a new `repoNode` for
// each call to it, even for the same underlying NodeID. So we
// keep the populated state in the AutogitManager.
rn.populated = am.isRepoNodePopulated(rn)
return rn
}
func (rn *repoNode) dstDir() string {
var typeStr string
switch rn.srcRepoHandle.Type() {
case tlf.Public:
typeStr = public
case tlf.Private:
typeStr = private
case tlf.SingleTeam:
typeStr = team
}
return path.Join(
autogitRoot, typeStr, string(rn.srcRepoHandle.GetCanonicalName()))
}
func (rn *repoNode) populate(ctx context.Context) bool {
ctx = context.WithValue(ctx, ctxSkipPopulateKey, 1)
children, err := rn.am.config.KBFSOps().GetDirChildren(ctx, rn)
if err != nil {
rn.am.log.CDebugf(ctx, "Error getting children: %+v", err)
return false
}
h, err := rn.am.config.KBFSOps().GetTLFHandle(ctx, rn)
if err != nil {
rn.am.log.CDebugf(ctx, "Error getting handle: %+v", err)
return false
}
// Associate this autogit repo node with the node corresponding to
// the top-level of the source repo. This means it will detect
// changes more often then necessary (e.g., when a different
// branch is updated, or when just objects are updated), but we
// can't just depend on the branch reference file because a
// particular branch could be defined in packed-refs as well, and
// that could change during the lifetime of the repo.
srcRepoFS, _, err := GetRepoAndID(
ctx, rn.am.config, rn.srcRepoHandle, rn.repoName, "")
if err != nil {
rn.am.log.CDebugf(ctx, "Couldn't get repo: %+v", err)
return false
}
rn.am.registerRepoNode(srcRepoFS.RootNode(), rn)
// If the directory is empty, clone it. Otherwise, pull it.
var doneCh <-chan struct{}
cloneNeeded := len(children) == 0
ctx = context.WithValue(ctx, ctxReadWriteKey, 1)
branch := "master"
if cloneNeeded {
doneCh, err = rn.am.Clone(
ctx, rn.srcRepoHandle, rn.repoName, branch, h, rn.dstDir())
} else {
doneCh, err = rn.am.Pull(
ctx, rn.srcRepoHandle, rn.repoName, branch, h, rn.dstDir())
}
if err != nil {
rn.am.log.CDebugf(ctx, "Error starting population: %+v", err)
return false
}
select {
case <-doneCh:
return true
case <-ctx.Done():
rn.am.log.CDebugf(ctx, "Error waiting for population: %+v", ctx.Err())
// If we did a clone, ask for a refresh anyway, so they will
// see the CLONING file at least. The clone operation will
// continue on in the background, so it's ok to consider this
// node `populated`.
return cloneNeeded
}
}
func (rn *repoNode) shouldPopulate() (bool, <-chan struct{}) {
rn.lock.Lock()
defer rn.lock.Unlock()
if rn.populated {
return false, nil
}
if rn.populatingInProgress != nil {
return false, rn.populatingInProgress
}
rn.populatingInProgress = make(chan struct{})
return true, rn.populatingInProgress
}
func (rn *repoNode) finishPopulate(populated bool) {
rn.lock.Lock()
defer rn.lock.Unlock()
rn.populated = populated
close(rn.populatingInProgress)
rn.populatingInProgress = nil
rn.am.populateDone(rn)
}
func (rn *repoNode) updated(ctx context.Context) {
h, err := rn.am.config.KBFSOps().GetTLFHandle(ctx, rn)
if err != nil {
rn.am.log.CDebugf(ctx, "Error getting handle: %+v", err)
return
}
dstDir := rn.dstDir()
rn.am.log.CDebugf(
ctx, "Repo %s/%s/%s updated", h.GetCanonicalPath(), dstDir, rn.repoName)
_, err = rn.am.Pull(ctx, rn.srcRepoHandle, rn.repoName, "master", h, dstDir)
if err != nil {
rn.am.log.CDebugf(ctx, "Error calling pull: %+v", err)
return
}
}
// ShouldRetryOnDirRead implements the Node interface for
// repoNode.
func (rn *repoNode) ShouldRetryOnDirRead(ctx context.Context) (
shouldRetry bool) {
if ctx.Value(ctxSkipPopulateKey) != nil {
return false
}
// Don't let this operation take more than a fixed amount of time.
// We should just let the caller see the CLONING file if it takes
// too long.
ctx, cancel := context.WithTimeout(ctx, populateTimeout)
defer cancel()
for {
doPopulate, ch := rn.shouldPopulate()
if ch == nil {
return shouldRetry
}
// If it wasn't populated on the first check, always force the
// caller to retry.
shouldRetry = true
if doPopulate {
rn.am.log.CDebugf(ctx, "Populating repo node on first access")
shouldRetry = rn.populate(ctx)
rn.finishPopulate(shouldRetry)
return shouldRetry
}
// Wait for the existing populate to succeed or fail.
rn.am.log.CDebugf(ctx, "Waiting for existing populate to finish")
select {
case <-ch:
case <-ctx.Done():
rn.am.log.CDebugf(ctx, "Error waiting for populate: %+v", ctx.Err())
return false
}
}
}
type tlfNode struct {
libkbfs.Node
am *AutogitManager
h *libkbfs.TlfHandle
}
var _ libkbfs.Node = (*tlfNode)(nil)
// ShouldCreateMissedLookup implements the Node interface for
// tlfNode.
func (tn tlfNode) ShouldCreateMissedLookup(
ctx context.Context, name string) (
bool, context.Context, libkbfs.EntryType, string) {
normalizedRepoName := normalizeRepoName(name)
// Is this a legit repo?
_, _, err := GetRepoAndID(ctx, tn.am.config, tn.h, name, "")
if err != nil {
return false, ctx, libkbfs.File, ""
}
ctx = context.WithValue(ctx, ctxReadWriteKey, 1)
if name != normalizedRepoName {
return true, ctx, libkbfs.Sym, normalizedRepoName
}
return true, ctx, libkbfs.Dir, ""
}
// WrapChild implements the Node interface for tlfNode.
func (tn tlfNode) WrapChild(child libkbfs.Node) libkbfs.Node {
child = tn.Node.WrapChild(child)
return newRepoNode(child, tn.am, tn.h, child.GetBasename())
}
// tlfTypeNode represents an autogit subdirectory corresponding to a
// specific TLF type. It can only contain subdirectories that
// correspond to valid TLF name for the TLF type.
type tlfTypeNode struct {
libkbfs.Node
am *AutogitManager
tlfType tlf.Type
}
var _ libkbfs.Node = (*tlfTypeNode)(nil)
// ShouldCreateMissedLookup implements the Node interface for
// tlfTypeNode.
func (ttn tlfTypeNode) ShouldCreateMissedLookup(
ctx context.Context, name string) (
bool, context.Context, libkbfs.EntryType, string) {
_, err := libkbfs.ParseTlfHandle(
ctx, ttn.am.config.KBPKI(), ttn.am.config.MDOps(), name, ttn.tlfType)
ctx = context.WithValue(ctx, ctxReadWriteKey, struct{}{})
switch e := errors.Cause(err).(type) {
case nil:
return true, ctx, libkbfs.Dir, ""
case libkbfs.TlfNameNotCanonical:
return true, ctx, libkbfs.Sym, e.NameToTry
default:
ttn.am.log.CDebugf(ctx,
"Error parsing handle for name %s: %+v", name, err)
return ttn.Node.ShouldCreateMissedLookup(ctx, name)
}
}
// WrapChild implements the Node interface for tlfTypeNode.
func (ttn tlfTypeNode) WrapChild(child libkbfs.Node) libkbfs.Node {
child = ttn.Node.WrapChild(child)
ctx, cancel := context.WithTimeout(context.Background(), populateTimeout)
defer cancel()
h, err := libkbfs.ParseTlfHandle(
ctx, ttn.am.config.KBPKI(), ttn.am.config.MDOps(),
child.GetBasename(), ttn.tlfType)
if err != nil {
// If we have a node for the child already, it can't be
// non-canonical because symlinks don't have Nodes.
ttn.am.log.CDebugf(ctx,
"Error parsing handle for tlfTypeNode child: %+v", err)
return child
}
return &tlfNode{child, ttn.am, h}
}
// autogitRootNode represents the .kbfs_autogit folder, and can only
// contain subdirectories corresponding to TLF types.
type autogitRootNode struct {
libkbfs.Node
am *AutogitManager
}
var _ libkbfs.Node = (*autogitRootNode)(nil)
// ShouldCreateMissedLookup implements the Node interface for
// autogitRootNode.
func (arn autogitRootNode) ShouldCreateMissedLookup(
ctx context.Context, name string) (
bool, context.Context, libkbfs.EntryType, string) {
switch name {
case public, private, team:
ctx = context.WithValue(ctx, ctxReadWriteKey, struct{}{})
return true, ctx, libkbfs.Dir, ""
default:
return arn.Node.ShouldCreateMissedLookup(ctx, name)
}
}
// WrapChild implements the Node interface for autogitRootNode.
func (arn autogitRootNode) WrapChild(child libkbfs.Node) libkbfs.Node {
child = arn.Node.WrapChild(child)
var tlfType tlf.Type
switch child.GetBasename() {
case public:
tlfType = tlf.Public
case private:
tlfType = tlf.Private
case team:
tlfType = tlf.SingleTeam
default:
return child
}
return &tlfTypeNode{
Node: child,
am: arn.am,
tlfType: tlfType,
}
}
// readonlyNode is a read-only node by default, unless `ctxReadWriteKey`
// has a value set in the context.
type readonlyNode struct {
libkbfs.Node
}
var _ libkbfs.Node = (*readonlyNode)(nil)
// Readonly implements the Node interface for readonlyNode.
func (rn readonlyNode) Readonly(ctx context.Context) bool {
return ctx.Value(ctxReadWriteKey) == nil
}
// WrapChild implements the Node interface for readonlyNode.
func (rn readonlyNode) WrapChild(child libkbfs.Node) libkbfs.Node {
return &readonlyNode{rn.Node.WrapChild(child)}
}
// rootNode is a Node wrapper around a TLF root node, that causes the
// autogit root to be created when it is accessed.
type rootNode struct {
libkbfs.Node
am *AutogitManager
}
var _ libkbfs.Node = (*rootNode)(nil)
// ShouldCreateMissedLookup implements the Node interface for
// rootNode.
func (rn rootNode) ShouldCreateMissedLookup(ctx context.Context, name string) (
bool, context.Context, libkbfs.EntryType, string) {
if name == autogitRoot {
ctx = context.WithValue(ctx, ctxReadWriteKey, struct{}{})
ctx = context.WithValue(ctx, libkbfs.CtxAllowNameKey, autogitRoot)
return true, ctx, libkbfs.Dir, ""
}
return rn.Node.ShouldCreateMissedLookup(ctx, name)
}
// WrapChild implements the Node interface for rootNode.
func (rn rootNode) WrapChild(child libkbfs.Node) libkbfs.Node {
child = rn.Node.WrapChild(child)
if child.GetBasename() == autogitRoot {
return &autogitRootNode{
Node: &readonlyNode{child},
am: rn.am,
}
}
return child
}
// rootWrapper is a struct that manages wrapping root nodes with
// autogit-related context.
type rootWrapper struct {
am *AutogitManager
}
func (rw rootWrapper) wrap(node libkbfs.Node) libkbfs.Node {
return &rootNode{node, rw.am}
}
| 1 | 19,098 | Might want to add that this only happens if you have write permissions to the folder. | keybase-kbfs | go |
@@ -53,6 +53,12 @@ type Config struct {
// to use based on region.
EndpointResolver endpoints.Resolver
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry. If this is set and ShouldRetry is called, then the request's
+ // Retryable field can be either nil or set. Proper handling of the Retryable
+ // field is important when setting this field.
+ EnforceShouldRetryCheck *bool
+
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints" | 1 | package aws
import (
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
)
// UseServiceDefaultRetries instructs the config to use the service's own
// default number of retries. This will be the default action if
// Config.MaxRetries is nil also.
const UseServiceDefaultRetries = -1
// RequestRetryer is an alias for a type that implements the request.Retryer
// interface.
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig tructure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3),
// }))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, &aws.Config{
// Region: aws.String("us-west-2"),
// })
type Config struct {
// Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to
// retrieve credentials.
CredentialsChainVerboseErrors *bool
// The credentials object to use when signing requests. Defaults to a
// chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials *credentials.Credentials
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// @note You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The resolver to use for looking up endpoints for AWS service clients
// to use based on region.
EndpointResolver endpoints.Resolver
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
// AWS Regions and Endpoints
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
// to `false`.
DisableSSL *bool
// The HTTP client to use when sending requests. Defaults to
// `http.DefaultClient`.
HTTPClient *http.Client
// An integer value representing the logging level. The default log level
// is zero (LogOff), which represents no logging. To enable logging set
// to a LogLevel Value.
LogLevel *LogLevelType
// The logger writer interface to write logging messages to. Defaults to
// standard out.
Logger Logger
// The maximum number of times that a request will be retried for failures.
// Defaults to -1, which defers the max retry setting to the service
// specific configuration.
MaxRetries *int
// Retryer guides how HTTP requests should be retried in case of
// recoverable failures.
//
// When nil or the value does not implement the request.Retryer interface,
// the request.DefaultRetryer will be used.
//
// When both Retryer and MaxRetries are non-nil, the former is used and
// the latter ignored.
//
// To set the Retryer field in a type-safe manner and with chaining, use
// the request.WithRetryer helper function:
//
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
//
Retryer RequestRetryer
// Disables semantic parameter validation, which validates input for
// missing required fields and/or other semantic request input errors.
DisableParamValidation *bool
// Disables the computation of request and response checksums, e.g.,
// CRC32 checksums in Amazon DynamoDB.
DisableComputeChecksums *bool
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// @note This configuration option is specific to the Amazon S3 service.
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
// header to PUT requests over 2MB of content. 100-Continue instructs the
// HTTP client not to send the body until the service responds with a
// `continue` status. This is useful to prevent sending the request body
// until after the request is authenticated, and validated.
//
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
//
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
// You should use this flag to disble 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
// Set this to `true` to enable S3 Accelerate feature. For all operations
// compatible with S3 Accelerate will use the accelerate endpoint for
// requests. Requests not compatible will fall back to normal S3 requests.
//
// The bucket must be enable for accelerate to be used with S3 client with
// accelerate enabled. If the bucket is not enabled for accelerate an error
// will be returned. The bucket name must be DNS compatible to also work
// with accelerate.
S3UseAccelerate *bool
// Set this to `true` to disable the EC2Metadata client from overriding the
// default http.Client's Timeout. This is helpful if you do not want the
// EC2Metadata client to create a new http.Client. This options is only
// meaningful if you're not already using a custom HTTP client with the
// SDK. Enabled by default.
//
// Must be set and provided to the session.NewSession() in order to disable
// the EC2Metadata overriding the timeout for default credentials chain.
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
// .WithEC2MetadataDiableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
EC2MetadataDisableTimeoutOverride *bool
// Instructs the endpiont to be generated for a service client to
// be the dual stack endpoint. The dual stack endpoint will support
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
// to make requets. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//
// If the Endpoint config value is also provided the UseDualStack flag
// will be ignored.
//
// Only supported with.
//
// sess := session.Must(session.NewSession())
//
// svc := s3.New(sess, &aws.Config{
// UseDualStack: aws.Bool(true),
// })
UseDualStack *bool
// SleepDelay is an override for the func the SDK will call when sleeping
// during the lifecycle of a request. Specifically this will be used for
// request delays. This value should only be used for testing. To adjust
// the delay of a request see the aws/client.DefaultRetryer and
// aws/request.Retryer.
//
// SleepDelay will prevent any Context from being used for canceling retry
// delay of an API operation. It is recommended to not use SleepDelay at all
// and specify a Retryer instead.
SleepDelay func(time.Duration)
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
// Will default to false. This would only be used for empty directory names in s3 requests.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
DisableRestProtocolURICleaning *bool
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(aws.NewConfig().
// WithMaxRetries(3),
// ))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, aws.NewConfig().
// WithRegion("us-west-2"),
// )
func NewConfig() *Config {
return &Config{}
}
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
// a Config pointer.
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
c.CredentialsChainVerboseErrors = &verboseErrs
return c
}
// WithCredentials sets a config Credentials value returning a Config pointer
// for chaining.
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
c.Credentials = creds
return c
}
// WithEndpoint sets a config Endpoint value returning a Config pointer for
// chaining.
func (c *Config) WithEndpoint(endpoint string) *Config {
c.Endpoint = &endpoint
return c
}
// WithEndpointResolver sets a config EndpointResolver value returning a
// Config pointer for chaining.
func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
c.EndpointResolver = resolver
return c
}
// WithRegion sets a config Region value returning a Config pointer for
// chaining.
func (c *Config) WithRegion(region string) *Config {
c.Region = ®ion
return c
}
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
// for chaining.
func (c *Config) WithDisableSSL(disable bool) *Config {
c.DisableSSL = &disable
return c
}
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
// for chaining.
func (c *Config) WithHTTPClient(client *http.Client) *Config {
c.HTTPClient = client
return c
}
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
// for chaining.
func (c *Config) WithMaxRetries(max int) *Config {
c.MaxRetries = &max
return c
}
// WithDisableParamValidation sets a config DisableParamValidation value
// returning a Config pointer for chaining.
func (c *Config) WithDisableParamValidation(disable bool) *Config {
c.DisableParamValidation = &disable
return c
}
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
// returning a Config pointer for chaining.
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
c.DisableComputeChecksums = &disable
return c
}
// WithLogLevel sets a config LogLevel value returning a Config pointer for
// chaining.
func (c *Config) WithLogLevel(level LogLevelType) *Config {
c.LogLevel = &level
return c
}
// WithLogger sets a config Logger value returning a Config pointer for
// chaining.
func (c *Config) WithLogger(logger Logger) *Config {
c.Logger = logger
return c
}
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
// pointer for chaining.
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
c.S3ForcePathStyle = &force
return c
}
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
// a Config pointer for chaining.
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
c.S3Disable100Continue = &disable
return c
}
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
// pointer for chaining.
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
c.S3UseAccelerate = &enable
return c
}
// WithUseDualStack sets a config UseDualStack value returning a Config
// pointer for chaining.
func (c *Config) WithUseDualStack(enable bool) *Config {
c.UseDualStack = &enable
return c
}
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
c.EC2MetadataDisableTimeoutOverride = &enable
return c
}
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
c.SleepDelay = fn
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
mergeInConfig(c, other)
}
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
}
if other.CredentialsChainVerboseErrors != nil {
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
}
if other.Credentials != nil {
dst.Credentials = other.Credentials
}
if other.Endpoint != nil {
dst.Endpoint = other.Endpoint
}
if other.EndpointResolver != nil {
dst.EndpointResolver = other.EndpointResolver
}
if other.Region != nil {
dst.Region = other.Region
}
if other.DisableSSL != nil {
dst.DisableSSL = other.DisableSSL
}
if other.HTTPClient != nil {
dst.HTTPClient = other.HTTPClient
}
if other.LogLevel != nil {
dst.LogLevel = other.LogLevel
}
if other.Logger != nil {
dst.Logger = other.Logger
}
if other.MaxRetries != nil {
dst.MaxRetries = other.MaxRetries
}
if other.Retryer != nil {
dst.Retryer = other.Retryer
}
if other.DisableParamValidation != nil {
dst.DisableParamValidation = other.DisableParamValidation
}
if other.DisableComputeChecksums != nil {
dst.DisableComputeChecksums = other.DisableComputeChecksums
}
if other.S3ForcePathStyle != nil {
dst.S3ForcePathStyle = other.S3ForcePathStyle
}
if other.S3Disable100Continue != nil {
dst.S3Disable100Continue = other.S3Disable100Continue
}
if other.S3UseAccelerate != nil {
dst.S3UseAccelerate = other.S3UseAccelerate
}
if other.UseDualStack != nil {
dst.UseDualStack = other.UseDualStack
}
if other.EC2MetadataDisableTimeoutOverride != nil {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
}
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
if other.DisableRestProtocolURICleaning != nil {
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
}
}
// Copy will return a shallow copy of the Config object. If any additional
// configurations are provided they will be merged into the new config returned.
func (c *Config) Copy(cfgs ...*Config) *Config {
dst := &Config{}
dst.MergeIn(c)
for _, cfg := range cfgs {
dst.MergeIn(cfg)
}
return dst
}
| 1 | 8,705 | Adding a small blurb about why someone would want to enable this flag would help clarify what it is for. Such as something about when providing a custom retry handler and how`ShouldRetry` will be handled with and without the flag enabled. | aws-aws-sdk-go | go |
@@ -59,6 +59,6 @@ func Register(registry transport.Registry, service Service) {
proto := service.Protocol()
for method, h := range service.Handlers() {
handler := thriftHandler{Handler: h, Protocol: proto}
- registry.Register(procedureName(name, method), handler)
+ registry.Register("", procedureName(name, method), handler)
}
} | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package thrift
import (
"github.com/yarpc/yarpc-go/transport"
"github.com/thriftrw/thriftrw-go/protocol"
"github.com/thriftrw/thriftrw-go/wire"
)
// Handler represents a Thrift request handler.
type Handler interface {
Handle(req *Request, body wire.Value) (wire.Value, *Response, error)
}
// HandlerFunc is a convenience type alias for functions that implement that act as Handlers.
type HandlerFunc func(*Request, wire.Value) (wire.Value, *Response, error)
// Handle forwards the request to the underlying function.
func (f HandlerFunc) Handle(req *Request, body wire.Value) (wire.Value, *Response, error) {
return f(req, body)
}
// Service represents a Thrift service implementation.
type Service interface {
// Name of the Thrift service.
Name() string
// Protocol to use for requests and responses of this service.
Protocol() protocol.Protocol
// Map of method name to Handler for all methods of this service.
Handlers() map[string]Handler
}
// Register registers the handlers for the methods of the given service with the
// given Registry.
func Register(registry transport.Registry, service Service) {
name := service.Name()
proto := service.Protocol()
for method, h := range service.Handlers() {
handler := thriftHandler{Handler: h, Protocol: proto}
registry.Register(procedureName(name, method), handler)
}
}
| 1 | 9,465 | assume the user will be able to register for a custom service name in future? | yarpc-yarpc-go | go |
@@ -27,7 +27,9 @@ using System.Threading.Tasks;
using JetBrains.Annotations;
using pwiz.Common.Collections;
using pwiz.Common.SystemUtil;
-using pwiz.Skyline.Util;
+using pwiz.Skyline.Util;
+
+
// ReSharper disable InconsistentlySynchronizedField
namespace pwiz.Skyline.Controls.Graphs | 1 | /*
* Original author: Rita Chupalov <ritach .at. uw.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2020 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using pwiz.Skyline.Model;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using JetBrains.Annotations;
using pwiz.Common.Collections;
using pwiz.Common.SystemUtil;
using pwiz.Skyline.Util;
// ReSharper disable InconsistentlySynchronizedField
namespace pwiz.Skyline.Controls.Graphs
{
public class DetectionPlotData
{
private static DetectionDataCache _dataCache;
private Dictionary<DetectionsGraphController.TargetType, DataSet> _data = new Dictionary<DetectionsGraphController.TargetType, DataSet>();
public SrmDocument Document { get; private set; }
public float QValueCutoff { get; private set; }
public bool IsValid { get; }
public int ReplicateCount { get; private set; }
public DataSet GetTargetData(DetectionsGraphController.TargetType target)
{
return _data[target];
}
public static DetectionDataCache GetDataCache()
{
if (_dataCache == null)
_dataCache = new DetectionDataCache();
return _dataCache;
}
public static void ReleaseDataCache()
{
_dataCache?.Dispose();
_dataCache = null;
}
public List<string> ReplicateNames { get; private set; }
public static DetectionPlotData INVALID = new DetectionPlotData(null, 0.001f);
public const int REPORTING_STEP = 3;
public DetectionPlotData(SrmDocument document, float qValueCutoff,
CancellationToken cancellationToken = default(CancellationToken), [CanBeNull] Action<int> progressReport = null)
{
if (document == null || qValueCutoff == 0 || qValueCutoff == 1 ||
!document.Settings.HasResults) return;
if (document.MoleculeTransitionGroupCount == 0 || document.PeptideCount == 0 ||
document.MeasuredResults.Chromatograms.Count == 0)
return;
QValueCutoff = qValueCutoff;
Document = document;
var precursorData = new List<QData>(document.MoleculeTransitionGroupCount);
var peptideData = new List<QData>(document.PeptideCount);
ReplicateCount = document.MeasuredResults.Chromatograms.Count;
ReplicateNames = (from chromatogram in document.MeasuredResults.Chromatograms
select chromatogram.Name).ToList();
var thisPeptideData = new List<List<float>>();
var peptideCount = 0;
var currentProgress = 0;
var reportingStep = document.PeptideCount / (90/REPORTING_STEP);
foreach (var peptide in document.Peptides)
{
thisPeptideData.Clear();
//iterate over peptide's precursors
foreach (var precursor in peptide.TransitionGroups)
{
if (cancellationToken.IsCancellationRequested)
return;
if (precursor.IsDecoy) continue;
var qs = new List<float>(ReplicateCount);
//get q-values for precursor replicates
foreach (var i in Enumerable.Range(0, ReplicateCount))
{
var chromInfo = precursor.GetSafeChromInfo(i).FirstOrDefault(c => c.OptimizationStep == 0);
if (chromInfo != null && chromInfo.QValue.HasValue)
qs.Add(chromInfo.QValue.Value);
else
qs.Add(float.NaN);
}
precursorData.Add(new QData(precursor.Id, qs));
thisPeptideData.Add(qs);
}
if (thisPeptideData.Count > 0)
{
peptideData.Add(new QData(peptide.Id,
Enumerable.Range(0, ReplicateCount).Select(
i =>
{
var min = new Statistics(thisPeptideData.Select(lst => (double) lst[i])).Min();
return (float) min;
}).ToList()
));
}
if(peptideCount++ == reportingStep * currentProgress)
progressReport?.Invoke(REPORTING_STEP * currentProgress++);
}
_data[DetectionsGraphController.TargetType.PRECURSOR] = new DataSet(precursorData, ReplicateCount, QValueCutoff);
_data[DetectionsGraphController.TargetType.PEPTIDE] = new DataSet(peptideData, ReplicateCount, QValueCutoff);
IsValid = true;
}
private bool IsValidFor(SrmDocument document, float qValue)
{
return document != null && Document != null && IsValid &&
ReferenceEquals(document, Document) &&
qValue == QValueCutoff;
}
public class DataSet
{
public ImmutableList<int> TargetsCount { get; private set; }
public ImmutableList<int> TargetsCumulative { get; private set; }
public ImmutableList<int> TargetsAll { get; private set; }
public ImmutableList<float> QMedians { get; private set; }
public ImmutableList<int> Histogram { get; private set; }
public double MaxCount
{
get { return new Statistics(TargetsCumulative.Select(i => (double)i)).Max(); }
}
public DataSet(List<QData> data, int replicateCount, float qValueCutoff,
CancellationToken cancellationToken = default(CancellationToken), [CanBeNull] Action<int> progressReport = null)
{
TargetsCount = ImmutableList<int>.ValueOf(Enumerable.Range(0, replicateCount)
.Select(i => data.Count(t => t.QValues[i] < qValueCutoff)));
CancelOrReport(92, cancellationToken, progressReport);
QMedians = ImmutableList<float>.ValueOf(Enumerable.Range(0, replicateCount)
.Select(i =>
{
var qStats = new Statistics(
data.FindAll(t => t.QValues[i] < qValueCutoff)
.Select(t => (double)t.QValues[i]));
return (float)qStats.Median();
}));
CancelOrReport(94, cancellationToken, progressReport);
TargetsCumulative = ImmutableList<int>.ValueOf(Enumerable.Range(0, replicateCount)
.Select(i => data.Count(t => t.MinQValues[i] < qValueCutoff)));
CancelOrReport(96, cancellationToken, progressReport);
TargetsAll = ImmutableList<int>.ValueOf(Enumerable.Range(0, replicateCount)
.Select(i => data.Count(t => t.MaxQValues[i] < qValueCutoff)));
CancelOrReport(98, cancellationToken, progressReport);
var histogramPairs = data.Select(t => t.QValues.Count(f => f < qValueCutoff)) //Count replicates for each target
.GroupBy(f => f, c => 1,
(f, c) => new
{replicateNum = f, histCount = c.Sum()}).ToLookup((tuple)=> tuple.replicateNum); //Group targets by the number of replicates
Histogram = ImmutableList<int>.ValueOf(Enumerable.Range(1, replicateCount + 1)
.Select(n => histogramPairs.Contains(n) ? histogramPairs[n].First().histCount : 0));
CancelOrReport(100, cancellationToken, progressReport);
}
private static void CancelOrReport(int percent,
CancellationToken cancellationToken = default(CancellationToken), [CanBeNull] Action<int> progressReport = null)
{
if (cancellationToken.IsCancellationRequested)
throw new OperationCanceledException();
progressReport?.Invoke(percent);
}
/// <summary>
/// Returns count of targets detected in at least minRep replicates
/// </summary>
public int getCountForMinReplicates(int minRep)
{
if (minRep > Histogram.Count) return 0;
return Enumerable.Range(Math.Max(minRep-1, 0), Histogram.Count - Math.Max(minRep - 1, 0)).Select(i => Histogram[i]).Sum();
}
}
/// <summary>
/// List of q-values across replicates for a single target (peptide or precursor)
/// It is also equipped with lists of running mins and maxes for this target.
/// </summary>
public class QData
{
public QData(Identity target, IReadOnlyList<float> qValues)
{
Target = target;
QValues = ImmutableList.ValueOf(qValues);
// Calculate running mins and maxes while taking NaNs into account
var mins = Enumerable.Repeat(float.NaN, qValues.Count).ToList();
var maxes = Enumerable.Repeat(float.NaN, qValues.Count).ToList();
if (!qValues.All(float.IsNaN))
{
var runningNaN = true;
for (var i = 0; i < qValues.Count; i++)
{
//if this and all previous values are NaN
if (float.IsNaN(qValues[i]))
{
if (!runningNaN)
{
mins[i] = mins[i - 1];
maxes[i] = maxes[i - 1];
}
}
else
{
if (runningNaN)
{
mins[i] = maxes[i] = qValues[i];
runningNaN = false;
}
else
{
mins[i] = Math.Min(mins[i - 1], qValues[i]);
maxes[i] = Math.Max(maxes[i - 1], qValues[i]);
}
}
}
}
MinQValues = ImmutableList.ValueOf(mins);
MaxQValues = ImmutableList.ValueOf(maxes);
}
public Identity Target { get; private set; }
public ImmutableList<float> QValues { get; private set; }
public ImmutableList<float> MinQValues { get; private set; }
public ImmutableList<float> MaxQValues { get; private set; }
}
public class DetectionDataCache : IDisposable
{
private class DataRequest
{
public float qValue;
}
public SrmDocument _document;
private ConcurrentQueue<DetectionPlotData> _datas;
private readonly StackWorker<DataRequest> _stackWorker;
private CancellationTokenSource _tokenSource;
private Action<DetectionPlotData> _callback;
private readonly object _statusLock = new object();
// This is max number of replicates that the cache will store before starting to purge
// the old datasets. Number of replicates is a good proxy for the amount of memory used
// by a dataset.
private const int CACHE_CAPACITY = 200;
public event Action<CacheStatus> StatusChange;
public event Action<int> ReportProgress;
public enum CacheStatus { idle, processing, error, canceled }
private CacheStatus _status;
public CacheStatus Status
{
get => _status;
set
{
_status = value;
StatusChange?.Invoke(_status);
}
}
//exposing for testing purposes
public ConcurrentQueue<DetectionPlotData> Datas => _datas;
public DetectionDataCache()
{
_stackWorker = new StackWorker<DataRequest>(null, CacheData);
//single worker thread because we do not need to paralellize the calculations,
//only to offload them from the UI thread
_stackWorker.RunAsync(1, @"DetectionsDataCache");
_tokenSource = new CancellationTokenSource();
_datas = new ConcurrentQueue<DetectionPlotData>();
Status = CacheStatus.idle;
}
public bool TryGet(SrmDocument doc, float qValue, Action<DetectionPlotData> callback, out DetectionPlotData data)
{
data = INVALID;
if (IsDisposed) return false;
var request = new DataRequest() { qValue = qValue};
if (ReferenceEquals(doc, _document))
{
data = Get(request) ?? INVALID;
if (data.IsValid)
return true;
_callback = callback;
_stackWorker.Add(request);
}
else
{
_document = doc;
new Task(() => CancelWorker(request, false)).Start();
}
return false;
}
public void Cancel()
{
if (IsDisposed) return;
new Task(() => CancelWorker(null, true)).Start();
}
private void CancelWorker(DataRequest request, bool userCancel)
{
//signal cancel to other workers and wait
_tokenSource.Cancel();
var oldTokenSource = _tokenSource;
_tokenSource = new CancellationTokenSource();
lock (_statusLock) //Wait for the current worker to complete
{
oldTokenSource.Dispose();
//purge the queue
var queueLength = _datas.Count;
for (var i = 0; i < queueLength; i++)
{
if(_datas.TryDequeue(out var dump))
if(ReferenceEquals(_document, dump.Document))
_datas.Enqueue(dump);
}
if(userCancel)
Status = CacheStatus.canceled;
else
Status = CacheStatus.idle;
}
//if provided, add the new request to the queue after cancellation is complete
if (request != null)
{
_stackWorker.Add(request);
}
}
private DetectionPlotData Get(DataRequest request)
{
return _datas.FirstOrDefault(d => d.IsValidFor(_document, request.qValue));
}
//Worker thread method
private void CacheData(DataRequest request, int index)
{
try
{
lock (_statusLock)
{
//first make sure it hasn't been retrieved already
//and calculate the queue size
var currentSize = 0;
DetectionPlotData res = null;
foreach (var dat in _datas)
{
currentSize += dat.ReplicateCount;
if (dat.IsValidFor(_document, request.qValue)) res = dat;
}
if (res != null)
return;
Status = CacheStatus.processing;
res = new DetectionPlotData(_document, request.qValue, _tokenSource.Token, ReportProgress);
Status = CacheStatus.idle;
if (res.IsValid)
{
if (currentSize + res.ReplicateCount >= CACHE_CAPACITY) _datas.TryDequeue(out var dump);
_datas.Enqueue(res);
_callback.Invoke(res);
}
}
}
catch (Exception)
{
Status = CacheStatus.error;
throw;
}
}
public bool IsDisposed { get; private set; }
public void Dispose()
{
// Will only be called from UI thread, so it's safe to not have a lock
if (!IsDisposed)
{
_tokenSource.Cancel();
_stackWorker.Dispose();
_tokenSource.Dispose();
IsDisposed = true;
}
}
}
}
}
| 1 | 13,590 | The extra two empty lines seem unnecessary. Please review your diffs more carefully. | ProteoWizard-pwiz | .cs |
@@ -28,7 +28,7 @@ type API interface {
LiveToken() TokenAPI
SideToken() TokenAPI
TestToken() TestTokenAPI
- OracleUSD() OracleAPI
+ OracleAPI
}
type ProfileRegistryAPI interface { | 1 | package blockchain
import (
"context"
"crypto/ecdsa"
"fmt"
"math/big"
"strings"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/noxiouz/zapctx/ctxlog"
"github.com/pkg/errors"
"github.com/sonm-io/core/blockchain/market"
marketAPI "github.com/sonm-io/core/blockchain/market/api"
pb "github.com/sonm-io/core/proto"
"go.uber.org/zap"
)
type API interface {
ProfileRegistry() ProfileRegistryAPI
Events() EventsAPI
Market() MarketAPI
Blacklist() BlacklistAPI
LiveToken() TokenAPI
SideToken() TokenAPI
TestToken() TestTokenAPI
OracleUSD() OracleAPI
}
type ProfileRegistryAPI interface {
GetValidator(ctx context.Context, validatorID common.Address) (*pb.Validator, error)
GetCertificate(ctx context.Context, certificateID *big.Int) (*pb.Certificate, error)
}
type EventsAPI interface {
GetEvents(ctx context.Context, fromBlockInitial *big.Int) (chan *Event, error)
}
type MarketAPI interface {
OpenDeal(ctx context.Context, key *ecdsa.PrivateKey, askID, bigID *big.Int) <-chan DealOrError
CloseDeal(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int, blacklisted bool) <-chan error
GetDealInfo(ctx context.Context, dealID *big.Int) (*pb.Deal, error)
GetDealsAmount(ctx context.Context) (*big.Int, error)
PlaceOrder(ctx context.Context, key *ecdsa.PrivateKey, order *pb.Order) <-chan OrderOrError
CancelOrder(ctx context.Context, key *ecdsa.PrivateKey, id *big.Int) <-chan error
GetOrderInfo(ctx context.Context, orderID *big.Int) (*pb.Order, error)
GetOrdersAmount(ctx context.Context) (*big.Int, error)
Bill(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int) <-chan error
RegisterWorker(ctx context.Context, key *ecdsa.PrivateKey, master common.Address) <-chan error
ConfirmWorker(ctx context.Context, key *ecdsa.PrivateKey, slave common.Address) <-chan error
RemoveWorker(ctx context.Context, key *ecdsa.PrivateKey, master, slave common.Address) <-chan error
GetMaster(ctx context.Context, slave common.Address) (common.Address, error)
GetDealChangeRequestInfo(ctx context.Context, dealID *big.Int) (*pb.DealChangeRequest, error)
GetNumBenchmarks(ctx context.Context) (uint64, error)
}
type BlacklistAPI interface {
Check(ctx context.Context, who, whom common.Address) (bool, error)
Add(ctx context.Context, key *ecdsa.PrivateKey, who, whom common.Address) (*types.Transaction, error)
Remove(ctx context.Context, key *ecdsa.PrivateKey, whom common.Address) error
AddMaster(ctx context.Context, key *ecdsa.PrivateKey, root common.Address) (*types.Transaction, error)
RemoveMaster(ctx context.Context, key *ecdsa.PrivateKey, root common.Address) (*types.Transaction, error)
SetMarketAddress(ctx context.Context, key *ecdsa.PrivateKey, market common.Address) (*types.Transaction, error)
}
// TokenAPI is a go implementation of ERC20-compatibility token with full functionality high-level interface
// standard description with placed: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md
type TokenAPI interface {
// Approve - add allowance from caller to other contract to spend tokens
Approve(ctx context.Context, key *ecdsa.PrivateKey, to string, amount *big.Int) (*types.Transaction, error)
// Transfer token from caller
Transfer(ctx context.Context, key *ecdsa.PrivateKey, to string, amount *big.Int) (*types.Transaction, error)
// TransferFrom fallback function for contracts to transfer you allowance
TransferFrom(ctx context.Context, key *ecdsa.PrivateKey, from string, to string, amount *big.Int) (*types.Transaction, error)
// BalanceOf returns balance of given address
BalanceOf(ctx context.Context, address string) (*big.Int, error)
// AllowanceOf returns allowance of given address to spender account
AllowanceOf(ctx context.Context, from string, to string) (*big.Int, error)
// TotalSupply - all amount of emitted token
TotalSupply(ctx context.Context) (*big.Int, error)
}
type TestTokenAPI interface {
// GetTokens - send 100 SNMT token for message caller
// this function added for MVP purposes and has been deleted later
GetTokens(ctx context.Context, key *ecdsa.PrivateKey) (*types.Transaction, error)
}
// OracleAPI manage price relation between some currency and SNM token
type OracleAPI interface {
// SetCurrentPrice sets current price relation between some currency and SONM token
SetCurrentPrice(ctx context.Context, key *ecdsa.PrivateKey, price *big.Int) (*types.Transaction, error)
// GetCurrentPrice returns current price relation between some currency and SONM token
GetCurrentPrice(ctx context.Context) (*big.Int, error)
}
type BasicAPI struct {
market MarketAPI
liveToken TokenAPI
sideToken TokenAPI
testToken TestTokenAPI
blacklist BlacklistAPI
profileRegistry ProfileRegistryAPI
events EventsAPI
oracle OracleAPI
}
func NewAPI(opts ...Option) (API, error) {
defaults := defaultOptions()
for _, o := range opts {
o(defaults)
}
client, err := initEthClient(defaults.apiEndpoint)
if err != nil {
return nil, err
}
liveToken, err := NewStandardToken(client, market.SNMAddr(), defaults.gasPrice)
if err != nil {
return nil, err
}
testToken, err := NewTestToken(client, market.SNMAddr(), defaults.gasPrice)
if err != nil {
return nil, err
}
clientSidechain, err := initEthClient(defaults.apiSidechainEndpoint)
if err != nil {
return nil, err
}
customClientSidechain, err := initCustomEthClient(defaults.apiSidechainEndpoint)
if err != nil {
return nil, err
}
blacklist, err := NewBasicBlacklist(customClientSidechain, market.BlacklistAddr(), defaults.logParsePeriod,
defaults.gasPriceSidechain, defaults.blockConfirmations)
if err != nil {
return nil, err
}
marketApi, err := NewBasicMarket(customClientSidechain, market.MarketAddr(), defaults.gasPriceSidechain, defaults.logParsePeriod, defaults.blockConfirmations)
if err != nil {
return nil, err
}
profileRegistry, err := NewProfileRegistry(clientSidechain, market.ProfileRegistryAddr(), defaults.gasPriceSidechain)
if err != nil {
return nil, err
}
sideToken, err := NewStandardToken(clientSidechain, market.SNMSidechainAddr(), defaults.gasPriceSidechain)
if err != nil {
return nil, err
}
// fixme: wtf? context.Background for logger?
events, err := NewEventsAPI(clientSidechain, ctxlog.GetLogger(context.Background()))
if err != nil {
return nil, err
}
oracle, err := NewOracleUSDAPI(market.OracleUsdAddr(), clientSidechain, defaults.gasPriceSidechain)
if err != nil {
return nil, err
}
return &BasicAPI{
market: marketApi,
blacklist: blacklist,
profileRegistry: profileRegistry,
liveToken: liveToken,
sideToken: sideToken,
testToken: testToken,
events: events,
oracle: oracle,
}, nil
}
func (api *BasicAPI) Market() MarketAPI {
return api.market
}
func (api *BasicAPI) LiveToken() TokenAPI {
return api.liveToken
}
func (api *BasicAPI) SideToken() TokenAPI {
return api.sideToken
}
func (api *BasicAPI) TestToken() TestTokenAPI {
return api.testToken
}
func (api *BasicAPI) Blacklist() BlacklistAPI {
return api.blacklist
}
func (api *BasicAPI) ProfileRegistry() ProfileRegistryAPI {
return api.profileRegistry
}
func (api *BasicAPI) Events() EventsAPI {
return api.events
}
func (api *BasicAPI) OracleUSD() OracleAPI {
return api.oracle
}
type BasicMarketAPI struct {
client CustomEthereumClient
marketContract *marketAPI.Market
gasPrice int64
logParsePeriod time.Duration
blockConfirmations int64
}
func NewBasicMarket(client CustomEthereumClient, address common.Address, gasPrice int64, logParsePeriod time.Duration, blockConfirmations int64) (MarketAPI, error) {
marketContract, err := marketAPI.NewMarket(address, client)
if err != nil {
return nil, err
}
return &BasicMarketAPI{
client: client,
marketContract: marketContract,
gasPrice: gasPrice,
logParsePeriod: logParsePeriod,
blockConfirmations: blockConfirmations,
}, nil
}
func (api *BasicMarketAPI) OpenDeal(ctx context.Context, key *ecdsa.PrivateKey, askID, bidID *big.Int) <-chan DealOrError {
ch := make(chan DealOrError, 0)
go api.openDeal(ctx, key, askID, bidID, ch)
return ch
}
func (api *BasicMarketAPI) openDeal(ctx context.Context, key *ecdsa.PrivateKey, askID, bidID *big.Int, ch chan DealOrError) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.OpenDeal(opts, askID, bidID)
if err != nil {
ch <- DealOrError{nil, err}
return
}
receipt, err := WaitTransactionReceipt(ctx, api.client, api.blockConfirmations, api.logParsePeriod, tx)
if err != nil {
ch <- DealOrError{nil, err}
return
}
logs, err := FindLogByTopic(receipt, market.DealOpenedTopic)
if err != nil {
ch <- DealOrError{nil, err}
return
}
id, err := extractBig(logs.Topics, 1)
if err != nil {
ch <- DealOrError{nil, err}
return
}
deal, err := api.GetDealInfo(ctx, id)
ch <- DealOrError{deal, err}
}
func (api *BasicMarketAPI) CloseDeal(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int, blacklisted bool) <-chan error {
ch := make(chan error, 0)
go api.closeDeal(ctx, key, dealID, blacklisted, ch)
return ch
}
func (api *BasicMarketAPI) closeDeal(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int, blacklisted bool, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.CloseDeal(opts, dealID, blacklisted)
if err != nil {
ch <- err
return
}
_, err = waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.DealUpdatedTopic)
if err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) GetDealInfo(ctx context.Context, dealID *big.Int) (*pb.Deal, error) {
deal1, err := api.marketContract.GetDealInfo(getCallOptions(ctx), dealID)
if err != nil {
return nil, err
}
noAsk := deal1.AskID.Cmp(big.NewInt(0)) == 0
noBid := deal1.BidID.Cmp(big.NewInt(0)) == 0
if noAsk && noBid {
return nil, fmt.Errorf("no deal with id = %s", dealID.String())
}
deal2, err := api.marketContract.GetDealParams(getCallOptions(ctx), dealID)
if err != nil {
return nil, err
}
benchmarks, err := pb.NewBenchmarks(deal1.Benchmarks)
if err != nil {
return nil, err
}
return &pb.Deal{
Id: pb.NewBigInt(dealID),
Benchmarks: benchmarks,
SupplierID: pb.NewEthAddress(deal1.SupplierID),
ConsumerID: pb.NewEthAddress(deal1.ConsumerID),
MasterID: pb.NewEthAddress(deal1.MasterID),
AskID: pb.NewBigInt(deal1.AskID),
BidID: pb.NewBigInt(deal1.BidID),
Duration: deal2.Duration.Uint64(),
Price: pb.NewBigInt(deal2.Price),
StartTime: &pb.Timestamp{Seconds: deal1.StartTime.Int64()},
EndTime: &pb.Timestamp{Seconds: deal2.EndTime.Int64()},
Status: pb.DealStatus(deal2.Status),
BlockedBalance: pb.NewBigInt(deal2.BlockedBalance),
TotalPayout: pb.NewBigInt(deal2.TotalPayout),
LastBillTS: &pb.Timestamp{Seconds: deal2.LastBillTS.Int64()},
}, nil
}
func (api *BasicMarketAPI) GetDealsAmount(ctx context.Context) (*big.Int, error) {
return api.marketContract.GetDealsAmount(getCallOptions(ctx))
}
func (api *BasicMarketAPI) PlaceOrder(ctx context.Context, key *ecdsa.PrivateKey, order *pb.Order) <-chan OrderOrError {
ch := make(chan OrderOrError, 0)
go api.placeOrder(ctx, key, order, ch)
return ch
}
func (api *BasicMarketAPI) placeOrder(ctx context.Context, key *ecdsa.PrivateKey, order *pb.Order, ch chan OrderOrError) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
fixedNetflags := pb.UintToNetflags(order.Netflags)
var fixedTag [32]byte
copy(fixedTag[:], order.Tag[:])
tx, err := api.marketContract.PlaceOrder(opts,
uint8(order.OrderType),
order.CounterpartyID.Unwrap(),
big.NewInt(int64(order.Duration)),
order.Price.Unwrap(),
fixedNetflags,
uint8(order.IdentityLevel),
common.HexToAddress(order.Blacklist),
fixedTag,
order.GetBenchmarks().ToArray(),
)
if err != nil {
ch <- OrderOrError{nil, err}
return
}
receipt, err := WaitTransactionReceipt(ctx, api.client, api.blockConfirmations, api.logParsePeriod, tx)
if err != nil {
ch <- OrderOrError{nil, err}
return
}
logs, err := FindLogByTopic(receipt, market.OrderPlacedTopic)
if err != nil {
ch <- OrderOrError{nil, err}
return
}
id, err := extractBig(logs.Topics, 1)
if err != nil {
ch <- OrderOrError{nil, err}
return
}
orderInfo, err := api.GetOrderInfo(ctx, id)
ch <- OrderOrError{orderInfo, err}
}
func (api *BasicMarketAPI) CancelOrder(ctx context.Context, key *ecdsa.PrivateKey, id *big.Int) <-chan error {
ch := make(chan error, 0)
go api.cancelOrder(ctx, key, id, ch)
return ch
}
func (api *BasicMarketAPI) cancelOrder(ctx context.Context, key *ecdsa.PrivateKey, id *big.Int, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.CancelOrder(opts, id)
if err != nil {
ch <- err
return
}
if _, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.OrderUpdatedTopic); err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) GetOrderInfo(ctx context.Context, orderID *big.Int) (*pb.Order, error) {
order1, err := api.marketContract.GetOrderInfo(getCallOptions(ctx), orderID)
if err != nil {
return nil, err
}
noAuthor := order1.Author.Big().Cmp(big.NewInt(0)) == 0
noType := pb.OrderType(order1.OrderType) == pb.OrderType_ANY
if noAuthor && noType {
return nil, fmt.Errorf("no order with id = %s", orderID.String())
}
order2, err := api.marketContract.GetOrderParams(getCallOptions(ctx), orderID)
if err != nil {
return nil, err
}
netflags := pb.NetflagsToUint(order1.Netflags)
benchmarks, err := pb.NewBenchmarks(order1.Benchmarks)
if err != nil {
return nil, err
}
return &pb.Order{
Id: pb.NewBigInt(orderID),
DealID: pb.NewBigInt(order2.DealID),
OrderType: pb.OrderType(order1.OrderType),
OrderStatus: pb.OrderStatus(order2.OrderStatus),
AuthorID: pb.NewEthAddress(order1.Author),
CounterpartyID: pb.NewEthAddress(order1.Counterparty),
Duration: order1.Duration.Uint64(),
Price: pb.NewBigInt(order1.Price),
Netflags: netflags,
IdentityLevel: pb.IdentityLevel(order1.IdentityLevel),
Blacklist: order1.Blacklist.String(),
Tag: order1.Tag[:],
Benchmarks: benchmarks,
FrozenSum: pb.NewBigInt(order1.FrozenSum),
}, nil
}
func (api *BasicMarketAPI) GetOrdersAmount(ctx context.Context) (*big.Int, error) {
return api.marketContract.GetOrdersAmount(getCallOptions(ctx))
}
func (api *BasicMarketAPI) Bill(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int) <-chan error {
ch := make(chan error, 0)
go api.bill(ctx, key, dealID, ch)
return ch
}
func (api *BasicMarketAPI) bill(ctx context.Context, key *ecdsa.PrivateKey, dealID *big.Int, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.Bill(opts, dealID)
if err != nil {
ch <- err
return
}
if _, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.BilledTopic); err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) RegisterWorker(ctx context.Context, key *ecdsa.PrivateKey, master common.Address) <-chan error {
ch := make(chan error, 0)
go api.registerWorker(ctx, key, master, ch)
return ch
}
func (api *BasicMarketAPI) registerWorker(ctx context.Context, key *ecdsa.PrivateKey, master common.Address, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.RegisterWorker(opts, master)
if err != nil {
ch <- err
return
}
if _, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.WorkerAnnouncedTopic); err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) ConfirmWorker(ctx context.Context, key *ecdsa.PrivateKey, slave common.Address) <-chan error {
ch := make(chan error, 0)
go api.confirmWorker(ctx, key, slave, ch)
return ch
}
func (api *BasicMarketAPI) confirmWorker(ctx context.Context, key *ecdsa.PrivateKey, slave common.Address, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.ConfirmWorker(opts, slave)
if err != nil {
ch <- err
return
}
if _, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.WorkerConfirmedTopic); err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) RemoveWorker(ctx context.Context, key *ecdsa.PrivateKey, master, slave common.Address) <-chan error {
ch := make(chan error, 0)
go api.removeWorker(ctx, key, master, slave, ch)
return ch
}
func (api *BasicMarketAPI) removeWorker(ctx context.Context, key *ecdsa.PrivateKey, master, slave common.Address, ch chan error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.marketContract.RemoveWorker(opts, master, slave)
if err != nil {
ch <- err
return
}
if _, err := waitForTransactionResult(ctx, api.client, api.logParsePeriod, tx, market.WorkerRemovedTopic); err != nil {
ch <- err
return
}
ch <- nil
}
func (api *BasicMarketAPI) GetMaster(ctx context.Context, slave common.Address) (common.Address, error) {
return api.marketContract.GetMaster(getCallOptions(ctx), slave)
}
func (api *BasicMarketAPI) GetDealChangeRequestInfo(ctx context.Context, changeRequestID *big.Int) (*pb.DealChangeRequest, error) {
changeRequest, err := api.marketContract.GetChangeRequestInfo(getCallOptions(ctx), changeRequestID)
if err != nil {
return nil, err
}
return &pb.DealChangeRequest{
Id: pb.NewBigInt(changeRequestID),
DealID: pb.NewBigInt(changeRequest.DealID),
RequestType: pb.OrderType(changeRequest.RequestType),
Duration: changeRequest.Duration.Uint64(),
Price: pb.NewBigInt(changeRequest.Price),
Status: pb.ChangeRequestStatus(changeRequest.Status),
}, nil
}
func (api *BasicMarketAPI) GetNumBenchmarks(ctx context.Context) (uint64, error) {
num, err := api.marketContract.GetBenchmarksQuantity(getCallOptions(ctx))
if err != nil {
return 0, err
}
if !num.IsUint64() {
return 0, errors.New("benchmarks quantity overflows int64")
}
return num.Uint64(), nil
}
type ProfileRegistry struct {
client EthereumClientBackend
profileRegistryContract *marketAPI.ProfileRegistry
gasPrice int64
}
func NewProfileRegistry(client EthereumClientBackend, address common.Address, gasPrice int64) (ProfileRegistryAPI, error) {
profileRegistryContract, err := marketAPI.NewProfileRegistry(address, client)
if err != nil {
return nil, err
}
return &ProfileRegistry{
client: client,
profileRegistryContract: profileRegistryContract,
gasPrice: gasPrice,
}, nil
}
func (api *ProfileRegistry) GetValidator(ctx context.Context, validatorID common.Address) (*pb.Validator, error) {
level, err := api.profileRegistryContract.GetValidatorLevel(getCallOptions(ctx), validatorID)
if err != nil {
return nil, err
}
return &pb.Validator{
Id: pb.NewEthAddress(validatorID),
Level: uint64(level),
}, nil
}
func (api *ProfileRegistry) GetCertificate(ctx context.Context, certificateID *big.Int) (*pb.Certificate, error) {
validatorID, ownerID, attribute, value, err := api.profileRegistryContract.GetCertificate(getCallOptions(ctx), certificateID)
if err != nil {
return nil, err
}
return &pb.Certificate{
ValidatorID: pb.NewEthAddress(validatorID),
OwnerID: pb.NewEthAddress(ownerID),
Attribute: attribute.Uint64(),
Value: value,
}, nil
}
type BasicBlacklistAPI struct {
client CustomEthereumClient
blacklistContract *marketAPI.Blacklist
gasPrice int64
logParsePeriod time.Duration
blockConfirmations int64
}
func NewBasicBlacklist(client CustomEthereumClient, address common.Address, logParsePeriod time.Duration, gasPrice, blockConfirmations int64) (BlacklistAPI, error) {
blacklistContract, err := marketAPI.NewBlacklist(address, client)
if err != nil {
return nil, err
}
return &BasicBlacklistAPI{
client: client,
blacklistContract: blacklistContract,
gasPrice: gasPrice,
logParsePeriod: logParsePeriod,
blockConfirmations: blockConfirmations,
}, nil
}
func (api *BasicBlacklistAPI) Check(ctx context.Context, who, whom common.Address) (bool, error) {
return api.blacklistContract.Check(getCallOptions(ctx), who, whom)
}
func (api *BasicBlacklistAPI) Add(ctx context.Context, key *ecdsa.PrivateKey, who, whom common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.Add(opts, who, whom)
}
func (api *BasicBlacklistAPI) Remove(ctx context.Context, key *ecdsa.PrivateKey, whom common.Address) error {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
tx, err := api.blacklistContract.Remove(opts, whom)
if err != nil {
return err
}
rec, err := WaitTransactionReceipt(ctx, api.client, api.blockConfirmations, api.logParsePeriod, tx)
if err != nil {
return err
}
if _, err := FindLogByTopic(rec, market.RemovedFromBlacklistTopic); err != nil {
return err
}
return nil
}
func (api *BasicBlacklistAPI) AddMaster(ctx context.Context, key *ecdsa.PrivateKey, root common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.AddMaster(opts, root)
}
func (api *BasicBlacklistAPI) RemoveMaster(ctx context.Context, key *ecdsa.PrivateKey, root common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.RemoveMaster(opts, root)
}
func (api *BasicBlacklistAPI) SetMarketAddress(ctx context.Context, key *ecdsa.PrivateKey, market common.Address) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.blacklistContract.SetMarketAddress(opts, market)
}
type StandardTokenApi struct {
client EthereumClientBackend
tokenContract *marketAPI.StandardToken
gasPrice int64
}
func NewStandardToken(client EthereumClientBackend, address common.Address, gasPrice int64) (TokenAPI, error) {
tokenContract, err := marketAPI.NewStandardToken(address, client)
if err != nil {
return nil, err
}
return &StandardTokenApi{
client: client,
tokenContract: tokenContract,
gasPrice: gasPrice,
}, nil
}
func (api *StandardTokenApi) BalanceOf(ctx context.Context, address string) (*big.Int, error) {
return api.tokenContract.BalanceOf(getCallOptions(ctx), common.HexToAddress(address))
}
func (api *StandardTokenApi) AllowanceOf(ctx context.Context, from string, to string) (*big.Int, error) {
return api.tokenContract.Allowance(getCallOptions(ctx), common.HexToAddress(from), common.HexToAddress(to))
}
func (api *StandardTokenApi) Approve(ctx context.Context, key *ecdsa.PrivateKey, to string, amount *big.Int) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimit, api.gasPrice)
return api.tokenContract.Approve(opts, common.HexToAddress(to), amount)
}
func (api *StandardTokenApi) Transfer(ctx context.Context, key *ecdsa.PrivateKey, to string, amount *big.Int) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimit, api.gasPrice)
return api.tokenContract.Transfer(opts, common.HexToAddress(to), amount)
}
func (api *StandardTokenApi) TransferFrom(ctx context.Context, key *ecdsa.PrivateKey, from string, to string, amount *big.Int) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimit, api.gasPrice)
return api.tokenContract.TransferFrom(opts, common.HexToAddress(from), common.HexToAddress(to), amount)
}
func (api *StandardTokenApi) TotalSupply(ctx context.Context) (*big.Int, error) {
return api.tokenContract.TotalSupply(getCallOptions(ctx))
}
type TestTokenApi struct {
client EthereumClientBackend
tokenContract *marketAPI.SNMTToken
gasPrice int64
}
func NewTestToken(client EthereumClientBackend, address common.Address, gasPrice int64) (TestTokenAPI, error) {
tokenContract, err := marketAPI.NewSNMTToken(address, client)
if err != nil {
return nil, err
}
return &TestTokenApi{
client: client,
tokenContract: tokenContract,
gasPrice: gasPrice,
}, nil
}
func (api *TestTokenApi) GetTokens(ctx context.Context, key *ecdsa.PrivateKey) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimit, api.gasPrice)
return api.tokenContract.GetTokens(opts)
}
type BasicEventsAPI struct {
client EthereumClientBackend
logger *zap.Logger
marketABI abi.ABI
profilesABI abi.ABI
}
func NewEventsAPI(client EthereumClientBackend, logger *zap.Logger) (EventsAPI, error) {
marketABI, err := abi.JSON(strings.NewReader(marketAPI.MarketABI))
if err != nil {
return nil, err
}
profilesABI, err := abi.JSON(strings.NewReader(marketAPI.ProfileRegistryABI))
if err != nil {
return nil, err
}
return &BasicEventsAPI{
client: client,
logger: logger,
marketABI: marketABI,
profilesABI: profilesABI,
}, nil
}
func (api *BasicEventsAPI) GetEvents(ctx context.Context, fromBlockInitial *big.Int) (chan *Event, error) {
var (
topics [][]common.Hash
eventTopic = []common.Hash{
market.DealOpenedTopic,
market.DealUpdatedTopic,
market.OrderPlacedTopic,
market.OrderUpdatedTopic,
market.DealChangeRequestSentTopic,
market.DealChangeRequestUpdatedTopic,
market.BilledTopic,
market.WorkerAnnouncedTopic,
market.WorkerConfirmedTopic,
market.WorkerConfirmedTopic,
market.WorkerRemovedTopic,
market.AddedToBlacklistTopic,
market.RemovedFromBlacklistTopic,
market.ValidatorCreatedTopic,
market.ValidatorDeletedTopic,
market.CertificateCreatedTopic,
}
out = make(chan *Event, 128)
)
topics = append(topics, eventTopic)
go func() {
var (
lastLogBlockNumber = fromBlockInitial.Uint64()
fromBlock = fromBlockInitial.Uint64()
tk = time.NewTicker(time.Second)
)
for {
select {
case <-ctx.Done():
return
case <-tk.C:
logs, err := api.client.FilterLogs(ctx, ethereum.FilterQuery{
Topics: topics,
FromBlock: big.NewInt(0).SetUint64(fromBlock),
Addresses: []common.Address{
market.MarketAddr(),
market.BlacklistAddr(),
market.ProfileRegistryAddr(),
},
})
if err != nil {
out <- &Event{
Data: &ErrorData{Err: errors.Wrap(err, "failed to FilterLogs")},
BlockNumber: fromBlock,
}
}
numLogs := len(logs)
if numLogs < 1 {
api.logger.Info("no logs, skipping")
continue
}
var eventTS uint64
for _, log := range logs {
// Skip logs from the last seen block.
if log.BlockNumber == fromBlock {
continue
}
// Update eventTS if we've got a new block.
if lastLogBlockNumber != log.BlockNumber {
lastLogBlockNumber = log.BlockNumber
block, err := api.client.BlockByNumber(ctx, big.NewInt(0).SetUint64(lastLogBlockNumber))
if err != nil {
api.logger.Warn("failed to get event timestamp", zap.Error(err),
zap.Uint64("blockNumber", lastLogBlockNumber))
} else {
eventTS = block.Time().Uint64()
}
}
api.processLog(log, eventTS, out)
}
fromBlock = logs[numLogs-1].BlockNumber
}
}
}()
return out, nil
}
func (api *BasicEventsAPI) processLog(log types.Log, eventTS uint64, out chan *Event) {
// This should never happen, but it's ethereum, and things might happen.
if len(log.Topics) < 1 {
out <- &Event{
Data: &ErrorData{Err: errors.New("malformed log entry"), Topic: "unknown"},
BlockNumber: log.BlockNumber,
}
return
}
sendErr := func(out chan *Event, err error, topic common.Hash) {
out <- &Event{Data: &ErrorData{Err: err, Topic: topic.String()}, BlockNumber: log.BlockNumber, TS: eventTS}
}
sendData := func(data interface{}) {
out <- &Event{Data: data, BlockNumber: log.BlockNumber, TS: eventTS}
}
var topic = log.Topics[0]
switch topic {
case market.DealOpenedTopic:
id, err := extractBig(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&DealOpenedData{ID: id})
case market.DealUpdatedTopic:
id, err := extractBig(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&DealUpdatedData{ID: id})
case market.DealChangeRequestSentTopic:
id, err := extractBig(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&DealChangeRequestSentData{ID: id})
case market.DealChangeRequestUpdatedTopic:
id, err := extractBig(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&DealChangeRequestUpdatedData{ID: id})
case market.BilledTopic:
var billedData = &BilledData{}
if err := api.marketABI.Unpack(billedData, "Billed", log.Data); err != nil {
sendErr(out, err, topic)
return
}
sendData(billedData)
case market.OrderPlacedTopic:
id, err := extractBig(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&OrderPlacedData{ID: id})
case market.OrderUpdatedTopic:
id, err := extractBig(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&OrderUpdatedData{ID: id})
case market.WorkerAnnouncedTopic:
slaveID, err := extractAddress(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
masterID, err := extractAddress(log.Topics, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&WorkerAnnouncedData{SlaveID: slaveID, MasterID: masterID})
case market.WorkerConfirmedTopic:
slaveID, err := extractAddress(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
masterID, err := extractAddress(log.Topics, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&WorkerConfirmedData{SlaveID: slaveID, MasterID: masterID})
case market.WorkerRemovedTopic:
slaveID, err := extractAddress(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
masterID, err := extractAddress(log.Topics, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&WorkerRemovedData{SlaveID: slaveID, MasterID: masterID})
case market.AddedToBlacklistTopic:
adderID, err := extractAddress(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
addeeID, err := extractAddress(log.Topics, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&AddedToBlacklistData{AdderID: adderID, AddeeID: addeeID})
case market.RemovedFromBlacklistTopic:
removerID, err := extractAddress(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
removeeID, err := extractAddress(log.Topics, 2)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&RemovedFromBlacklistData{RemoverID: removerID, RemoveeID: removeeID})
case market.ValidatorCreatedTopic:
id, err := extractAddress(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&ValidatorCreatedData{ID: id})
case market.ValidatorDeletedTopic:
id, err := extractAddress(log.Topics, 1)
if err != nil {
sendErr(out, err, topic)
return
}
sendData(&ValidatorDeletedData{ID: id})
case market.CertificateCreatedTopic:
var id = big.NewInt(0)
if err := api.profilesABI.Unpack(&id, "CertificateCreated", log.Data); err != nil {
sendErr(out, err, topic)
return
}
sendData(&CertificateCreatedData{ID: id})
default:
out <- &Event{
Data: &ErrorData{Err: errors.New("unknown topic"), Topic: topic.String()},
BlockNumber: log.BlockNumber,
}
}
}
type OracleUSDAPI struct {
client EthereumClientBackend
oracleContract *marketAPI.OracleUSD
gasPrice int64
}
func NewOracleUSDAPI(address common.Address, client EthereumClientBackend, gasPrice int64) (OracleAPI, error) {
oracleContract, err := marketAPI.NewOracleUSD(address, client)
if err != nil {
return nil, err
}
return &OracleUSDAPI{
client: client,
oracleContract: oracleContract,
gasPrice: gasPrice,
}, nil
}
func (api *OracleUSDAPI) SetCurrentPrice(ctx context.Context, key *ecdsa.PrivateKey, price *big.Int) (*types.Transaction, error) {
opts := getTxOpts(ctx, key, defaultGasLimitForSidechain, api.gasPrice)
return api.oracleContract.SetCurrentPrice(opts, price)
}
func (api *OracleUSDAPI) GetCurrentPrice(ctx context.Context) (*big.Int, error) {
return api.oracleContract.GetCurrentPrice(getCallOptions(ctx))
}
| 1 | 7,002 | What the reason to embed the Oracle? | sonm-io-core | go |
@@ -53,6 +53,8 @@ Workshops::Application.routes.draw do
resources :purchases, only: :index
end
+ match 'pages/new-topics' => 'pages#show', id: 'new-topics'
+
match '/auth/:provider/callback', to: 'auth_callbacks#create'
match '/watch' => 'high_voltage/pages#show', as: :watch, id: 'watch' | 1 | Workshops::Application.routes.draw do
mount RailsAdmin::Engine => '/new_admin', :as => 'rails_admin'
root to: 'topics#index'
match '/pages/tmux' => redirect("/products/4-humans-present-tmux")
resource :session, controller: 'sessions'
resources :sections, only: [:show] do
resources :registrations, only: [:index, :new, :create]
resources :redemptions, only: [:new]
end
resources :courses, only: [:index, :show] do
resources :follow_ups, only: [:create]
end
resources :products, only: [:show] do
resources :redemptions, only: [:new]
resources :purchases, only: [:new, :create, :show] do
resources :videos, only: [:show]
member do
get 'paypal'
get 'watch'
end
end
end
resources :payments, only: [:create]
resource :shopify, controller: 'shopify' do
member do
post 'order_paid'
end
end
resources :topics, only: :index
resources :topics, only: :show, as: :full_topic
match '/admin' => 'admin/courses#index', as: :admin
namespace :admin do
resources :courses do
resource :position
resources :sections
resources :follow_ups
resources :questions, only: [:destroy]
end
resources :coupons
resources :audiences
resources :sections do
resources :registrations
end
resources :teachers, except: :destroy
resources :products, except: :destroy
resources :purchases, only: :index
end
match '/auth/:provider/callback', to: 'auth_callbacks#create'
match '/watch' => 'high_voltage/pages#show', as: :watch, id: 'watch'
match '/directions' => "high_voltage/pages#show", as: :directions, id: "directions"
match '/group-training' => "high_voltage/pages#show", as: :group_training, id: "group-training"
match '/humans-present/oss' => "high_voltage/pages#show", as: :humans_present_oss, id: "humans-present-oss"
match '/backbone-js-on-rails' => redirect("/products/1-backbone-js-on-rails")
match '/rubyist-booster-shot' => "high_voltage/pages#show", as: :rubyist_booster_shot, id: "rubyist-booster-shot"
match '/my_account' => 'users#update', as: 'my_account', via: :put
match '/my_account' => 'users#edit', as: 'my_account'
match '/sign_up' => 'users#new', as: 'sign_up'
match '/sign_in' => 'sessions#new', as: 'sign_in'
mount Split::Dashboard, at: 'split'
get ':id' => 'topics#show', as: :topic, :constraints => { format: 'html' }
end
| 1 | 6,423 | Shouldn't this happen automatically with High Voltage? | thoughtbot-upcase | rb |
@@ -505,7 +505,7 @@ public class InitCodeTransformer {
InitValue initValue = initValueConfig.getResourceNameBindingValues().get(entityName);
switch (initValue.getType()) {
case Variable:
- entityValue = context.getNamer().localVarName(Name.from(initValue.getValue()));
+ entityValue = context.getNamer().varReference(Name.from(initValue.getValue()));
break;
case Random:
entityValue = context.getNamer().injectRandomStringGeneratorCode(initValue.getValue()); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import com.google.api.codegen.config.FieldConfig;
import com.google.api.codegen.config.ResourceNameConfig;
import com.google.api.codegen.config.ResourceNameOneofConfig;
import com.google.api.codegen.config.ResourceNameType;
import com.google.api.codegen.config.SingleResourceNameConfig;
import com.google.api.codegen.metacode.InitCodeContext;
import com.google.api.codegen.metacode.InitCodeContext.InitCodeOutputType;
import com.google.api.codegen.metacode.InitCodeLineType;
import com.google.api.codegen.metacode.InitCodeNode;
import com.google.api.codegen.metacode.InitValue;
import com.google.api.codegen.metacode.InitValueConfig;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.SymbolTable;
import com.google.api.codegen.util.testing.TestValueGenerator;
import com.google.api.codegen.viewmodel.FieldSettingView;
import com.google.api.codegen.viewmodel.FormattedInitValueView;
import com.google.api.codegen.viewmodel.InitCodeLineView;
import com.google.api.codegen.viewmodel.InitCodeView;
import com.google.api.codegen.viewmodel.InitValueView;
import com.google.api.codegen.viewmodel.ListInitCodeLineView;
import com.google.api.codegen.viewmodel.MapEntryView;
import com.google.api.codegen.viewmodel.MapInitCodeLineView;
import com.google.api.codegen.viewmodel.OneofConfigView;
import com.google.api.codegen.viewmodel.RepeatedResourceNameInitValueView;
import com.google.api.codegen.viewmodel.ResourceNameInitValueView;
import com.google.api.codegen.viewmodel.ResourceNameOneofInitValueView;
import com.google.api.codegen.viewmodel.SimpleInitCodeLineView;
import com.google.api.codegen.viewmodel.SimpleInitValueView;
import com.google.api.codegen.viewmodel.StructureInitCodeLineView;
import com.google.api.codegen.viewmodel.testing.ClientTestAssertView;
import com.google.api.tools.framework.model.TypeRef;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* InitCodeTransformer generates initialization code for a given method and then transforms it to a
* view object which can be rendered by a template engine.
*/
public class InitCodeTransformer {
private final ImportSectionTransformer importSectionTransformer;
public InitCodeTransformer() {
this(new StandardImportSectionTransformer());
}
public InitCodeTransformer(ImportSectionTransformer importSectionTransformer) {
this.importSectionTransformer = importSectionTransformer;
}
/**
* Generates initialization code from the given GapicMethodContext and InitCodeContext objects.
*/
public InitCodeView generateInitCode(
GapicMethodContext methodContext, InitCodeContext initCodeContext) {
InitCodeNode rootNode = InitCodeNode.createTree(initCodeContext);
if (initCodeContext.outputType() == InitCodeOutputType.FieldList) {
return buildInitCodeViewFlattened(methodContext, rootNode);
} else {
return buildInitCodeViewRequestObject(methodContext, rootNode);
}
}
public InitCodeContext createRequestInitCodeContext(
GapicMethodContext context,
SymbolTable symbolTable,
Iterable<FieldConfig> fieldConfigs,
InitCodeOutputType outputType,
TestValueGenerator valueGenerator) {
return InitCodeContext.newBuilder()
.initObjectType(context.getMethod().getInputType())
.symbolTable(symbolTable)
.suggestedName(Name.from("request"))
.initFieldConfigStrings(context.getMethodConfig().getSampleCodeInitFields())
.initValueConfigMap(InitCodeTransformer.createCollectionMap(context))
.initFields(FieldConfig.toFieldIterable(fieldConfigs))
.fieldConfigMap(FieldConfig.toFieldConfigMap(fieldConfigs))
.outputType(outputType)
.valueGenerator(valueGenerator)
.build();
}
/** Generates assert views for the test of the tested method and its fields. */
public List<ClientTestAssertView> generateRequestAssertViews(
GapicMethodContext methodContext, InitCodeContext initContext) {
InitCodeNode rootNode =
InitCodeNode.createTree(
InitCodeContext.newBuilder()
.initObjectType(methodContext.getMethod().getInputType())
.initFields(initContext.initFields())
.initValueConfigMap(createCollectionMap(methodContext))
.suggestedName(Name.from("request"))
.fieldConfigMap(initContext.fieldConfigMap())
.build());
List<ClientTestAssertView> assertViews = new ArrayList<>();
SurfaceNamer namer = methodContext.getNamer();
// Add request fields checking
for (InitCodeNode fieldItemTree : rootNode.getChildren().values()) {
FieldConfig fieldConfig = fieldItemTree.getFieldConfig();
String getterMethod =
namer.getFieldGetFunctionName(methodContext.getFeatureConfig(), fieldConfig);
String expectedValueIdentifier = getVariableName(methodContext, fieldItemTree);
String expectedTransformFunction = null;
String actualTransformFunction = null;
if (methodContext.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) {
if (fieldConfig.requiresParamTransformationFromAny()) {
expectedTransformFunction = namer.getToStringMethod();
actualTransformFunction = namer.getToStringMethod();
} else if (fieldConfig.requiresParamTransformation()) {
expectedTransformFunction =
namer.getResourceOneofCreateMethod(methodContext.getTypeTable(), fieldConfig);
}
}
boolean isArray =
fieldConfig.getField().getType().isRepeated()
&& !fieldConfig.getField().getType().isMap();
String enumTypeName = null;
TypeRef fieldType = fieldItemTree.getType();
if (fieldType.isEnum() && !fieldType.isRepeated()) {
enumTypeName = methodContext.getTypeTable().getNicknameFor(fieldType);
}
String messageTypeName = null;
if (fieldType.isMessage()) {
messageTypeName = methodContext.getTypeTable().getFullNameFor(fieldType.getMessageType());
}
assertViews.add(
createAssertView(
expectedValueIdentifier,
expectedTransformFunction,
actualTransformFunction,
isArray,
getterMethod,
enumTypeName,
messageTypeName));
}
return assertViews;
}
/**
* A utility method which creates the InitValueConfig map that contains the collection config
* data.
*/
public static ImmutableMap<String, InitValueConfig> createCollectionMap(
GapicMethodContext context) {
ImmutableMap.Builder<String, InitValueConfig> mapBuilder = ImmutableMap.builder();
Map<String, String> fieldNamePatterns = context.getMethodConfig().getFieldNamePatterns();
for (Map.Entry<String, String> fieldNamePattern : fieldNamePatterns.entrySet()) {
SingleResourceNameConfig resourceNameConfig =
context.getSingleResourceNameConfig(fieldNamePattern.getValue());
String apiWrapperClassName =
context.getNamer().getApiWrapperClassName(context.getInterfaceConfig());
InitValueConfig initValueConfig =
InitValueConfig.create(apiWrapperClassName, resourceNameConfig);
mapBuilder.put(fieldNamePattern.getKey(), initValueConfig);
}
return mapBuilder.build();
}
private ClientTestAssertView createAssertView(
String expected,
String expectedTransformFunction,
String actualTransformFunction,
boolean isArray,
String actual,
String enumTypeName,
String messageTypeName) {
return ClientTestAssertView.newBuilder()
.expectedValueIdentifier(expected)
.isArray(isArray)
.expectedValueTransformFunction(expectedTransformFunction)
.actualValueTransformFunction(actualTransformFunction)
.actualValueGetter(actual)
.enumTypeName(enumTypeName)
.messageTypeName(messageTypeName)
.build();
}
private InitCodeView buildInitCodeViewFlattened(GapicMethodContext context, InitCodeNode root) {
List<InitCodeNode> orderedItems = root.listInInitializationOrder();
List<InitCodeNode> argItems = new ArrayList<>(root.getChildren().values());
//Remove the request object for flattened method
orderedItems.remove(orderedItems.size() - 1);
return buildInitCodeView(context, orderedItems, argItems);
}
private InitCodeView buildInitCodeViewRequestObject(
GapicMethodContext context, InitCodeNode root) {
List<InitCodeNode> orderedItems = root.listInInitializationOrder();
List<InitCodeNode> argItems = Lists.newArrayList(root);
return buildInitCodeView(context, orderedItems, argItems);
}
private InitCodeView buildInitCodeView(
GapicMethodContext context,
Iterable<InitCodeNode> orderedItems,
Iterable<InitCodeNode> argItems) {
ModelTypeTable typeTable = context.getTypeTable();
SurfaceNamer namer = context.getNamer();
// Initialize the type table with the apiClassName since each sample will be using the
// apiClass.
typeTable.getAndSaveNicknameFor(
namer.getFullyQualifiedApiWrapperClassName(context.getInterfaceConfig()));
return InitCodeView.newBuilder()
.lines(generateSurfaceInitCodeLines(context, orderedItems))
.topLevelLines(generateSurfaceInitCodeLines(context, argItems))
.fieldSettings(getFieldSettings(context, argItems))
.importSection(importSectionTransformer.generateImportSection(context, orderedItems))
.versionIndexFileImportName(namer.getVersionIndexFileImportName())
.topLevelIndexFileImportName(namer.getTopLevelIndexFileImportName())
.apiFileName(namer.getServiceFileName(context.getInterfaceConfig()))
.build();
}
private List<InitCodeLineView> generateSurfaceInitCodeLines(
GapicMethodContext context, Iterable<InitCodeNode> specItemNode) {
List<InitCodeLineView> surfaceLines = new ArrayList<>();
for (InitCodeNode item : specItemNode) {
surfaceLines.add(generateSurfaceInitCodeLine(context, item));
}
return surfaceLines;
}
private InitCodeLineView generateSurfaceInitCodeLine(
GapicMethodContext context, InitCodeNode specItemNode) {
switch (specItemNode.getLineType()) {
case StructureInitLine:
return generateStructureInitCodeLine(context, specItemNode);
case ListInitLine:
return generateListInitCodeLine(context, specItemNode);
case SimpleInitLine:
return generateSimpleInitCodeLine(context, specItemNode);
case MapInitLine:
return generateMapInitCodeLine(context, specItemNode);
default:
throw new RuntimeException("unhandled line type: " + specItemNode.getLineType());
}
}
private InitCodeLineView generateSimpleInitCodeLine(
GapicMethodContext context, InitCodeNode item) {
SimpleInitCodeLineView.Builder surfaceLine = SimpleInitCodeLineView.newBuilder();
FieldConfig fieldConfig = item.getFieldConfig();
SurfaceNamer namer = context.getNamer();
ModelTypeTable typeTable = context.getTypeTable();
surfaceLine.lineType(InitCodeLineType.SimpleInitLine);
if (context.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) {
if (!context.isFlattenedMethodContext()) {
// In a non-flattened context, we always use the resource name type set on the message
// instead of set on the flattened method
fieldConfig = fieldConfig.getMessageFieldConfig();
}
if (item.getType().isRepeated()) {
surfaceLine.typeName(namer.getAndSaveResourceTypeName(typeTable, fieldConfig));
} else {
surfaceLine.typeName(namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig));
}
} else {
surfaceLine.typeName(typeTable.getAndSaveNicknameFor(item.getType()));
}
surfaceLine.identifier(getVariableName(context, item));
surfaceLine.initValue(getInitValue(context, item));
return surfaceLine.build();
}
private InitCodeLineView generateStructureInitCodeLine(
GapicMethodContext context, InitCodeNode item) {
StructureInitCodeLineView.Builder surfaceLine = StructureInitCodeLineView.newBuilder();
SurfaceNamer namer = context.getNamer();
ModelTypeTable typeTable = context.getTypeTable();
surfaceLine.lineType(InitCodeLineType.StructureInitLine);
surfaceLine.identifier(namer.localVarName(item.getIdentifier()));
String typeName = typeTable.getAndSaveNicknameFor(item.getType());
surfaceLine.typeName(typeName);
surfaceLine.fullyQualifiedTypeName(typeTable.getFullNameFor(item.getType()));
surfaceLine.typeConstructor(namer.getTypeConstructor(typeName));
surfaceLine.fieldSettings(getFieldSettings(context, item.getChildren().values()));
return surfaceLine.build();
}
private InitCodeLineView generateListInitCodeLine(GapicMethodContext context, InitCodeNode item) {
ListInitCodeLineView.Builder surfaceLine = ListInitCodeLineView.newBuilder();
FieldConfig fieldConfig = item.getFieldConfig();
SurfaceNamer namer = context.getNamer();
ModelTypeTable typeTable = context.getTypeTable();
surfaceLine.lineType(InitCodeLineType.ListInitLine);
surfaceLine.identifier(namer.localVarName(item.getIdentifier()));
if (context.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) {
surfaceLine.elementTypeName(namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig));
} else {
surfaceLine.elementTypeName(
typeTable.getAndSaveNicknameForElementType(item.getType().makeOptional()));
}
List<String> entries = new ArrayList<>();
List<InitCodeLineView> elements = new ArrayList<>();
for (InitCodeNode child : item.getChildren().values()) {
entries.add(namer.localVarName(child.getIdentifier()));
elements.add(generateSurfaceInitCodeLine(context, child));
}
surfaceLine.elementIdentifiers(entries);
surfaceLine.elements(elements);
return surfaceLine.build();
}
private InitCodeLineView generateMapInitCodeLine(GapicMethodContext context, InitCodeNode item) {
MapInitCodeLineView.Builder surfaceLine = MapInitCodeLineView.newBuilder();
SurfaceNamer namer = context.getNamer();
ModelTypeTable typeTable = context.getTypeTable();
surfaceLine.lineType(InitCodeLineType.MapInitLine);
surfaceLine.identifier(namer.localVarName(item.getIdentifier()));
surfaceLine.keyTypeName(
typeTable.getAndSaveNicknameFor(item.getType().getMapKeyField().getType()));
surfaceLine.valueTypeName(
typeTable.getAndSaveNicknameFor(item.getType().getMapValueField().getType()));
List<MapEntryView> entries = new ArrayList<>();
for (Map.Entry<String, InitCodeNode> entry : item.getChildren().entrySet()) {
MapEntryView.Builder mapEntry = MapEntryView.newBuilder();
mapEntry.key(
typeTable.renderPrimitiveValue(
item.getType().getMapKeyField().getType(), entry.getKey()));
mapEntry.valueString(context.getNamer().localVarName(entry.getValue().getIdentifier()));
mapEntry.value(generateSurfaceInitCodeLine(context, entry.getValue()));
entries.add(mapEntry.build());
}
surfaceLine.initEntries(entries);
return surfaceLine.build();
}
private InitValueView getInitValue(GapicMethodContext context, InitCodeNode item) {
SurfaceNamer namer = context.getNamer();
ModelTypeTable typeTable = context.getTypeTable();
InitValueConfig initValueConfig = item.getInitValueConfig();
FieldConfig fieldConfig = item.getFieldConfig();
if (context.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) {
if (!context.isFlattenedMethodContext()) {
ResourceNameConfig messageResNameConfig = fieldConfig.getMessageResourceNameConfig();
if (messageResNameConfig == null
|| messageResNameConfig.getResourceNameType() != ResourceNameType.ANY) {
// In a non-flattened context, we always use the resource name type set on the message
// instead of set on the flattened method, unless the resource name type on message
// is ANY.
fieldConfig = fieldConfig.getMessageFieldConfig();
}
}
if (item.getType().isRepeated()) {
return RepeatedResourceNameInitValueView.newBuilder()
.resourceTypeName(
namer.getAndSaveElementResourceTypeName(context.getTypeTable(), fieldConfig))
.build();
}
SingleResourceNameConfig singleResourceNameConfig;
switch (fieldConfig.getResourceNameType()) {
case ANY:
// TODO(michaelbausor): handle case where there are no other resource names at all...
singleResourceNameConfig =
Iterables.get(context.getProductConfig().getSingleResourceNameConfigs(), 0);
FieldConfig anyResourceNameFieldConfig =
fieldConfig.withResourceNameConfig(singleResourceNameConfig);
return createResourceNameInitValueView(context, anyResourceNameFieldConfig, item).build();
case FIXED:
throw new UnsupportedOperationException("entity name invalid");
case ONEOF:
ResourceNameOneofConfig oneofConfig =
(ResourceNameOneofConfig) fieldConfig.getResourceNameConfig();
singleResourceNameConfig = Iterables.get(oneofConfig.getSingleResourceNameConfigs(), 0);
FieldConfig singleResourceNameFieldConfig =
fieldConfig.withResourceNameConfig(singleResourceNameConfig);
ResourceNameInitValueView initView =
createResourceNameInitValueView(context, singleResourceNameFieldConfig, item).build();
return ResourceNameOneofInitValueView.newBuilder()
.resourceOneofTypeName(
namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig))
.specificResourceNameView(initView)
.build();
case SINGLE:
return createResourceNameInitValueView(context, fieldConfig, item).build();
case NONE:
default:
throw new UnsupportedOperationException("unexpected entity name type");
}
} else if (initValueConfig.hasFormattingConfig() && !item.getType().isRepeated()) {
if (context.getFeatureConfig().enableStringFormatFunctions()) {
FormattedInitValueView.Builder initValue = FormattedInitValueView.newBuilder();
initValue.apiWrapperName(
context.getNamer().getApiWrapperClassName(context.getInterfaceConfig()));
initValue.fullyQualifiedApiWrapperName(
context.getNamer().getFullyQualifiedApiWrapperClassName(context.getInterfaceConfig()));
initValue.formatFunctionName(
context
.getNamer()
.getFormatFunctionName(
context.getInterface(), initValueConfig.getSingleResourceNameConfig()));
List<String> varList =
Lists.newArrayList(
initValueConfig.getSingleResourceNameConfig().getNameTemplate().vars());
initValue.formatArgs(getFormatFunctionArgs(context, varList, initValueConfig));
return initValue.build();
} else {
return createResourceNameInitValueView(context, fieldConfig, item)
.convertToString(true)
.build();
}
} else {
SimpleInitValueView.Builder initValue = SimpleInitValueView.newBuilder();
if (initValueConfig.hasSimpleInitialValue()) {
String value = initValueConfig.getInitialValue().getValue();
switch (initValueConfig.getInitialValue().getType()) {
case Literal:
if (item.getType().isEnum()) {
value = context.getTypeTable().getEnumValue(item.getType(), value);
} else {
value = context.getTypeTable().renderPrimitiveValue(item.getType(), value);
}
break;
case Random:
value = context.getNamer().injectRandomStringGeneratorCode(value);
break;
case Variable:
value = context.getNamer().localVarName(Name.from(value));
break;
default:
throw new IllegalArgumentException("Unhandled init value type");
}
initValue.initialValue(value);
} else {
initValue.initialValue(
context.getTypeTable().getSnippetZeroValueAndSaveNicknameFor(item.getType()));
initValue.isRepeated(item.getType().isRepeated());
}
return initValue.build();
}
}
private ResourceNameInitValueView.Builder createResourceNameInitValueView(
GapicMethodContext context, FieldConfig fieldConfig, InitCodeNode item) {
String resourceName =
context.getNamer().getAndSaveElementResourceTypeName(context.getTypeTable(), fieldConfig);
SingleResourceNameConfig singleResourceNameConfig =
(SingleResourceNameConfig) fieldConfig.getResourceNameConfig();
List<String> varList = Lists.newArrayList(singleResourceNameConfig.getNameTemplate().vars());
return ResourceNameInitValueView.newBuilder()
.resourceTypeName(resourceName)
.formatArgs(getFormatFunctionArgs(context, varList, item.getInitValueConfig()));
}
private static List<String> getFormatFunctionArgs(
GapicMethodContext context, List<String> varList, InitValueConfig initValueConfig) {
List<String> formatFunctionArgs = new ArrayList<>();
for (String entityName : varList) {
String entityValue =
context.getNamer().quoted("[" + Name.from(entityName).toUpperUnderscore() + "]");
if (initValueConfig.hasFormattingConfigInitialValues()
&& initValueConfig.getResourceNameBindingValues().containsKey(entityName)) {
InitValue initValue = initValueConfig.getResourceNameBindingValues().get(entityName);
switch (initValue.getType()) {
case Variable:
entityValue = context.getNamer().localVarName(Name.from(initValue.getValue()));
break;
case Random:
entityValue = context.getNamer().injectRandomStringGeneratorCode(initValue.getValue());
break;
case Literal:
entityValue = initValue.getValue();
break;
default:
throw new IllegalArgumentException("Unhandled init value type");
}
}
formatFunctionArgs.add(entityValue);
}
return formatFunctionArgs;
}
private List<FieldSettingView> getFieldSettings(
GapicMethodContext context, Iterable<InitCodeNode> childItems) {
SurfaceNamer namer = context.getNamer();
List<FieldSettingView> allSettings = new ArrayList<>();
for (InitCodeNode item : childItems) {
FieldSettingView.Builder fieldSetting = FieldSettingView.newBuilder();
FieldConfig fieldConfig = item.getFieldConfig();
if (context.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) {
fieldSetting.fieldSetFunction(
namer.getResourceNameFieldSetFunctionName(fieldConfig.getMessageFieldConfig()));
} else {
fieldSetting.fieldSetFunction(
namer.getFieldSetFunctionName(item.getType(), Name.from(item.getKey())));
}
fieldSetting.fieldAddFunction(
namer.getFieldAddFunctionName(item.getType(), Name.from(item.getKey())));
fieldSetting.fieldGetFunction(
namer.getFieldGetFunctionName(item.getType(), Name.from(item.getKey())));
fieldSetting.identifier(getVariableName(context, item));
fieldSetting.initCodeLine(generateSurfaceInitCodeLine(context, item));
fieldSetting.fieldName(context.getNamer().publicFieldName(Name.from(item.getKey())));
fieldSetting.isMap(item.getType().isMap());
fieldSetting.isArray(!item.getType().isMap() && item.getType().isRepeated());
fieldSetting.elementTypeName(context.getTypeTable().getFullNameFor(item.getType()));
if (item.getOneofConfig() != null) {
fieldSetting.oneofConfig(
OneofConfigView.newBuilder()
.groupName(namer.publicFieldName(item.getOneofConfig().groupName()))
.variantType(namer.getOneofVariantTypeName(item.getOneofConfig()))
.build());
}
allSettings.add(fieldSetting.build());
}
return allSettings;
}
private static String getVariableName(GapicMethodContext context, InitCodeNode item) {
if (!context.getFeatureConfig().useResourceNameFormatOption(item.getFieldConfig())
&& item.getInitValueConfig().hasFormattingConfig()) {
return context.getNamer().getFormattedVariableName(item.getIdentifier());
}
return context.getNamer().localVarName(item.getIdentifier());
}
}
| 1 | 23,957 | This is changed because we need `projectId` to be printed as `$projectId` in PHP. | googleapis-gapic-generator | java |
@@ -1,7 +1,7 @@
<div class="row">
<div class="col-md-12">
<h1>
- <%= _('Template History') %>
+ <%= templates.first.customization_of.present? ? _('Template Customisation History') : _('Template History') %>
<div class="pull-right">
<%= link_to _('View all templates'), referrer, class: "btn btn-primary" %>
</div> | 1 | <div class="row">
<div class="col-md-12">
<h1>
<%= _('Template History') %>
<div class="pull-right">
<%= link_to _('View all templates'), referrer, class: "btn btn-primary" %>
</div>
</h1>
<p><%= raw _('Here you can view previously published versions of your template. These can no longer be modified.')%></p>
</div>
</div>
<div class="row">
<div class="col-md-12">
<!-- List of own templates -->
<% if templates.length > 0 %>
<%= paginable_renderise(
partial: '/paginable/templates/history',
controller: 'paginable/templates',
action: 'history',
path_params: { id: template.id },
query_params: { sort_field: :version, sort_direction: :desc },
scope: templates,
locals: { current: current }) %>
<% else %>
<p><%= _('This template is new and does not yet have any publication history.') %></p>
<% end %>
</div>
</div>
| 1 | 17,607 | This title change seems more accurate to me however not sure if this will confuse users. | DMPRoadmap-roadmap | rb |
@@ -117,7 +117,8 @@ private
# Some k/v's are wikipedia=http://en.wikipedia.org/wiki/Full%20URL
return nil if value =~ /^https?:\/\//
- if key == "wikipedia"
+ # match wikipedia or xxxxx:wikipedia
+ if key =~ /^(?:.*:)*wikipedia$/
# This regex should match Wikipedia language codes, everything
# from de to zh-classical
if value =~ /^([a-z-]{2,12}):(.+)$/i | 1 | module BrowseHelper
def printable_name(object, version=false)
if object.id.is_a?(Array)
id = object.id[0]
else
id = object.id
end
name = t 'printable_name.with_id', :id => id.to_s
if version
name = t 'printable_name.with_version', :id => name, :version => object.version.to_s
end
# don't look at object tags if redacted, so as to avoid giving
# away redacted version tag information.
unless object.redacted?
locale = I18n.locale.to_s
while locale =~ /-[^-]+/ and not object.tags.include? "name:#{I18n.locale}"
locale = locale.sub(/-[^-]+/, "")
end
if object.tags.include? "name:#{locale}"
name = t 'printable_name.with_name_html', :name => content_tag(:bdi, object.tags["name:#{locale}"].to_s ), :id => content_tag(:bdi, name)
elsif object.tags.include? 'name'
name = t 'printable_name.with_name_html', :name => content_tag(:bdi, object.tags['name'].to_s ), :id => content_tag(:bdi, name)
end
end
name
end
def link_class(type, object)
classes = [ type ]
if object.redacted?
classes << "deleted"
else
classes += icon_tags(object).flatten.map { |t| h(t) }
classes << "deleted" unless object.visible?
end
classes.join(" ")
end
def link_title(object)
if object.redacted?
""
else
h(icon_tags(object).map { |k,v| k + '=' + v }.to_sentence)
end
end
def format_key(key)
if url = wiki_link("key", key)
link_to h(key), url, :title => t('browse.tag_details.wiki_link.key', :key => key)
else
h(key)
end
end
def format_value(key, value)
if wp = wikipedia_link(key, value)
link_to h(wp[:title]), wp[:url], :title => t('browse.tag_details.wikipedia_link', :page => wp[:title])
elsif wdt = wikidata_link(key, value)
link_to h(wdt[:title]), wdt[:url], :title => t('browse.tag_details.wikidata_link', :page => wdt[:title])
elsif url = wiki_link("tag", "#{key}=#{value}")
link_to h(value), url, :title => t('browse.tag_details.wiki_link.tag', :key => key, :value => value)
elsif url = telephone_link(key, value)
link_to h(value), url, :title => t('browse.tag_details.telephone_link', :phone_number => value)
else
linkify h(value)
end
end
def type_and_paginated_count(type, pages)
if pages.page_count == 1
t "browse.changeset.#{type}",
:count => pages.item_count
else
t "browse.changeset.#{type}_paginated",
:x => pages.current_page.first_item,
:y => pages.current_page.last_item,
:count => pages.item_count
end
end
private
ICON_TAGS = [
"aeroway", "amenity", "barrier", "building", "highway", "historic", "landuse",
"leisure", "man_made", "natural", "railway", "shop", "tourism", "waterway"
]
def icon_tags(object)
object.tags.find_all { |k,v| ICON_TAGS.include? k }.sort
end
def wiki_link(type, lookup)
locale = I18n.locale.to_s
# update-wiki-pages does s/ /_/g on keys before saving them, we
# have to replace spaces with underscore so we'll link
# e.g. `source=Isle of Man Government aerial imagery (2001)' to
# the correct page.
lookup_us = lookup.tr(" ", "_")
if page = WIKI_PAGES[locale][type][lookup_us] rescue nil
url = "http://wiki.openstreetmap.org/wiki/#{page}?uselang=#{locale}"
elsif page = WIKI_PAGES["en"][type][lookup_us] rescue nil
url = "http://wiki.openstreetmap.org/wiki/#{page}?uselang=#{locale}"
end
return url
end
def wikipedia_link(key, value)
# Some k/v's are wikipedia=http://en.wikipedia.org/wiki/Full%20URL
return nil if value =~ /^https?:\/\//
if key == "wikipedia"
# This regex should match Wikipedia language codes, everything
# from de to zh-classical
if value =~ /^([a-z-]{2,12}):(.+)$/i
# Value is <lang>:<title> so split it up
# Note that value is always left as-is, see: https://trac.openstreetmap.org/ticket/4315
lang = $1
else
# Value is <title> so default to English Wikipedia
lang = 'en'
end
elsif key =~ /^wikipedia:(\S+)$/
# Language is in the key, so assume value is the title
lang = $1
else
# Not a wikipedia key!
return nil
end
if value =~ /^([^#]*)(#.*)/ then
# Contains a reference to a section of the wikipedia article
# Must break it up to correctly build the url
value = $1
section = $2
else
section = ""
end
return {
:url => "http://#{lang}.wikipedia.org/wiki/#{value}?uselang=#{I18n.locale}#{section}",
:title => value + section
}
end
def wikidata_link(key, value)
if key == "wikidata" and value =~ /^[Qq][1-9][0-9]*$/
return {
:url => "//www.wikidata.org/wiki/#{value}?uselang=#{I18n.locale}",
:title => value
}
end
return nil
end
def telephone_link(key, value)
# does it look like a phone number? eg "+1 (234) 567-8901 " ?
return nil unless value =~ /^\s*\+[\d\s\(\)\/\.-]{6,25}\s*$/
# remove all whitespace instead of encoding it http://tools.ietf.org/html/rfc3966#section-5.1.1
# "+1 (234) 567-8901 " -> "+1(234)567-8901"
valueNoWhitespace = value.gsub(/\s+/, '')
return "tel:#{valueNoWhitespace}"
end
end
| 1 | 9,401 | Here (and on line 158 as well), would it be more efficient to use the String end_with() method? Not tested | openstreetmap-openstreetmap-website | rb |
@@ -505,7 +505,13 @@ class RAMHandler(logging.Handler):
if record.levelno >= minlevel:
lines.append(fmt(record))
return '\n'.join(lines)
-
+
+ def change_log_capacity(self, capacity):
+ """
+ change log capacity according to user specifcation
+ """
+ ram_handler = RAMHandler(capacity=capacity)
+ self._data = collections.deque(self._data, maxlen=capacity)
class ColoredFormatter(logging.Formatter):
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Loggers and utilities related to logging."""
import os
import sys
import html as pyhtml
import logging
import contextlib
import collections
import faulthandler
import traceback
import warnings
import json
import inspect
from PyQt5 import QtCore
# Optional imports
try:
import colorama
except ImportError:
colorama = None
_log_inited = False
COLORS = ['black', 'red', 'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
COLOR_ESCAPES = {color: '\033[{}m'.format(i)
for i, color in enumerate(COLORS, start=30)}
RESET_ESCAPE = '\033[0m'
# Log formats to use.
SIMPLE_FMT = ('{green}{asctime:8}{reset} {log_color}{levelname}{reset}: '
'{message}')
EXTENDED_FMT = ('{green}{asctime:8}{reset} '
'{log_color}{levelname:8}{reset} '
'{cyan}{name:10} {module}:{funcName}:{lineno}{reset} '
'{log_color}{message}{reset}')
EXTENDED_FMT_HTML = (
'<tr>'
'<td><pre>%(green)s%(asctime)-8s%(reset)s</pre></td>'
'<td><pre>%(log_color)s%(levelname)-8s%(reset)s</pre></td>'
'<td></pre>%(cyan)s%(name)-10s</pre></td>'
'<td><pre>%(cyan)s%(module)s:%(funcName)s:%(lineno)s%(reset)s</pre></td>'
'<td><pre>%(log_color)s%(message)s%(reset)s</pre></td>'
'</tr>'
)
DATEFMT = '%H:%M:%S'
LOG_COLORS = {
'VDEBUG': 'white',
'DEBUG': 'white',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
# We first monkey-patch logging to support our VDEBUG level before getting the
# loggers. Based on http://stackoverflow.com/a/13638084
VDEBUG_LEVEL = 9
logging.addLevelName(VDEBUG_LEVEL, 'VDEBUG')
logging.VDEBUG = VDEBUG_LEVEL
LOG_LEVELS = {
'VDEBUG': logging.VDEBUG,
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
def vdebug(self, msg, *args, **kwargs):
"""Log with a VDEBUG level.
VDEBUG is used when a debug message is rather verbose, and probably of
little use to the end user or for post-mortem debugging, i.e. the content
probably won't change unless the code changes.
"""
if self.isEnabledFor(VDEBUG_LEVEL):
# pylint: disable=protected-access
self._log(VDEBUG_LEVEL, msg, args, **kwargs)
logging.Logger.vdebug = vdebug
# The different loggers used.
statusbar = logging.getLogger('statusbar')
completion = logging.getLogger('completion')
destroy = logging.getLogger('destroy')
modes = logging.getLogger('modes')
webview = logging.getLogger('webview')
mouse = logging.getLogger('mouse')
misc = logging.getLogger('misc')
url = logging.getLogger('url')
procs = logging.getLogger('procs')
commands = logging.getLogger('commands')
init = logging.getLogger('init')
signals = logging.getLogger('signals')
hints = logging.getLogger('hints')
keyboard = logging.getLogger('keyboard')
downloads = logging.getLogger('downloads')
js = logging.getLogger('js') # Javascript console messages
qt = logging.getLogger('qt') # Warnings produced by Qt
rfc6266 = logging.getLogger('rfc6266')
ipc = logging.getLogger('ipc')
shlexer = logging.getLogger('shlexer')
save = logging.getLogger('save')
message = logging.getLogger('message')
config = logging.getLogger('config')
sessions = logging.getLogger('sessions')
ram_handler = None
def stub(suffix=''):
"""Show a STUB: message for the calling function."""
function = inspect.stack()[1][3]
text = "STUB: {}".format(function)
if suffix:
text = '{} ({})'.format(text, suffix)
misc.warning(text)
class CriticalQtWarning(Exception):
"""Exception raised when there's a critical Qt warning."""
def init_log(args):
"""Init loggers based on the argparse namespace passed."""
level = args.loglevel.upper()
try:
numeric_level = getattr(logging, level)
except AttributeError:
raise ValueError("Invalid log level: {}".format(args.loglevel))
if numeric_level > logging.DEBUG and args.debug:
numeric_level = logging.DEBUG
console, ram = _init_handlers(numeric_level, args.color, args.force_color,
args.json_logging, args.loglines)
root = logging.getLogger()
if console is not None:
if args.logfilter is not None:
console.addFilter(LogFilter(args.logfilter.split(',')))
root.addHandler(console)
if ram is not None:
root.addHandler(ram)
root.setLevel(logging.NOTSET)
logging.captureWarnings(True)
_init_py_warnings()
QtCore.qInstallMessageHandler(qt_message_handler)
global _log_inited
_log_inited = True
def _init_py_warnings():
"""Initialize Python warning handling."""
warnings.simplefilter('default')
warnings.filterwarnings('ignore', module='pdb', category=ResourceWarning)
@contextlib.contextmanager
def disable_qt_msghandler():
"""Contextmanager which temporarily disables the Qt message handler."""
old_handler = QtCore.qInstallMessageHandler(None)
try:
yield
finally:
QtCore.qInstallMessageHandler(old_handler)
@contextlib.contextmanager
def ignore_py_warnings(**kwargs):
"""Contextmanager to temporarily disable certain Python warnings."""
warnings.filterwarnings('ignore', **kwargs)
yield
if _log_inited:
_init_py_warnings()
def _init_handlers(level, color, force_color, json_logging, ram_capacity):
"""Init log handlers.
Args:
level: The numeric logging level.
color: Whether to use color if available.
force_color: Force colored output.
json_logging: Output log lines in JSON (this disables all colors).
"""
global ram_handler
console_fmt, ram_fmt, html_fmt, use_colorama = _init_formatters(
level, color, force_color, json_logging)
if sys.stderr is None:
console_handler = None
else:
strip = False if force_color else None
if use_colorama:
stream = colorama.AnsiToWin32(sys.stderr, strip=strip)
else:
stream = sys.stderr
console_handler = logging.StreamHandler(stream)
console_handler.setLevel(level)
console_handler.setFormatter(console_fmt)
if ram_capacity == 0:
ram_handler = None
else:
ram_handler = RAMHandler(capacity=ram_capacity)
ram_handler.setLevel(logging.NOTSET)
ram_handler.setFormatter(ram_fmt)
ram_handler.html_formatter = html_fmt
return console_handler, ram_handler
def _init_formatters(level, color, force_color, json_logging):
"""Init log formatters.
Args:
level: The numeric logging level.
color: Whether to use color if available.
force_color: Force colored output.
json_logging: Format lines as JSON (disables all color).
Return:
A (console_formatter, ram_formatter, use_colorama) tuple.
console_formatter/ram_formatter: logging.Formatter instances.
use_colorama: Whether to use colorama.
"""
console_fmt = EXTENDED_FMT if level <= logging.DEBUG else SIMPLE_FMT
ram_formatter = ColoredFormatter(EXTENDED_FMT, DATEFMT, '{',
use_colors=False)
html_formatter = HTMLFormatter(EXTENDED_FMT_HTML, DATEFMT,
log_colors=LOG_COLORS)
if sys.stderr is None:
return None, ram_formatter, html_formatter, False
if json_logging:
console_formatter = JSONFormatter()
return console_formatter, ram_formatter, html_formatter, False
use_colorama = False
color_supported = os.name == 'posix' or colorama
if color_supported and (sys.stderr.isatty() or force_color) and color:
use_colors = True
if colorama and os.name != 'posix':
use_colorama = True
else:
use_colors = False
console_formatter = ColoredFormatter(console_fmt, DATEFMT, '{',
use_colors=use_colors)
return console_formatter, ram_formatter, html_formatter, use_colorama
def qt_message_handler(msg_type, context, msg):
"""Qt message handler to redirect qWarning etc. to the logging system.
Args:
QtMsgType msg_type: The level of the message.
QMessageLogContext context: The source code location of the message.
msg: The message text.
"""
# Mapping from Qt logging levels to the matching logging module levels.
# Note we map critical to ERROR as it's actually "just" an error, and fatal
# to critical.
qt_to_logging = {
QtCore.QtDebugMsg: logging.DEBUG,
QtCore.QtWarningMsg: logging.WARNING,
QtCore.QtCriticalMsg: logging.ERROR,
QtCore.QtFatalMsg: logging.CRITICAL,
}
try:
# pylint: disable=no-member,useless-suppression
qt_to_logging[QtCore.QtInfoMsg] = logging.INFO
except AttributeError:
# Qt < 5.5
pass
# Change levels of some well-known messages to debug so they don't get
# shown to the user.
#
# If a message starts with any text in suppressed_msgs, it's not logged as
# error.
suppressed_msgs = [
# PNGs in Qt with broken color profile
# https://bugreports.qt.io/browse/QTBUG-39788
'libpng warning: iCCP: Not recognizing known sRGB profile that has '
'been edited', # flake8: disable=E131
'libpng warning: iCCP: known incorrect sRGB profile',
# Hopefully harmless warning
'OpenType support missing for script ',
# Error if a QNetworkReply gets two different errors set. Harmless Qt
# bug on some pages.
# https://bugreports.qt.io/browse/QTBUG-30298
'QNetworkReplyImplPrivate::error: Internal problem, this method must '
'only be called once.',
# Sometimes indicates missing text, but most of the time harmless
'load glyph failed ',
# Harmless, see https://bugreports.qt.io/browse/QTBUG-42479
'content-type missing in HTTP POST, defaulting to '
'application/x-www-form-urlencoded. '
'Use QNetworkRequest::setHeader() to fix this problem.',
# https://bugreports.qt.io/browse/QTBUG-43118
'Using blocking call!',
# Hopefully harmless
'"Method "GetAll" with signature "s" on interface '
'"org.freedesktop.DBus.Properties" doesn\'t exist',
'"Method \\"GetAll\\" with signature \\"s\\" on interface '
'\\"org.freedesktop.DBus.Properties\\" doesn\'t exist\\n"',
'WOFF support requires QtWebKit to be built with zlib support.',
# Weird Enlightment/GTK X extensions
'QXcbWindow: Unhandled client message: "_E_',
'QXcbWindow: Unhandled client message: "_ECORE_',
'QXcbWindow: Unhandled client message: "_GTK_',
# Happens on AppVeyor CI
'SetProcessDpiAwareness failed:',
# https://bugreports.qt.io/browse/QTBUG-49174
'QObject::connect: Cannot connect (null)::stateChanged('
'QNetworkSession::State) to '
'QNetworkReplyHttpImpl::_q_networkSessionStateChanged('
'QNetworkSession::State)',
# https://bugreports.qt.io/browse/QTBUG-53989
"Image of format '' blocked because it is not considered safe. If you "
"are sure it is safe to do so, you can white-list the format by "
"setting the environment variable QTWEBKIT_IMAGEFORMAT_WHITELIST=",
# Installing Qt from the installer may cause it looking for SSL3 which
# may not be available on the system
"QSslSocket: cannot resolve SSLv3_client_method",
"QSslSocket: cannot resolve SSLv3_server_method",
]
if sys.platform == 'darwin':
suppressed_msgs += [
'libpng warning: iCCP: known incorrect sRGB profile',
# https://bugreports.qt.io/browse/QTBUG-47154
'virtual void QSslSocketBackendPrivate::transmit() SSLRead failed '
'with: -9805', # flake8: disable=E131
]
# Messages which will trigger an exception immediately
critical_msgs = [
'Could not parse stylesheet of object',
]
if any(msg.strip().startswith(pattern) for pattern in critical_msgs):
# For some reason, the stack gets lost when raising here...
logger = logging.getLogger('misc')
logger.error("Got critical Qt warning!", stack_info=True)
raise CriticalQtWarning(msg)
elif any(msg.strip().startswith(pattern) for pattern in suppressed_msgs):
level = logging.DEBUG
else:
level = qt_to_logging[msg_type]
if context.function is None:
func = 'none'
elif ':' in context.function:
func = '"{}"'.format(context.function)
else:
func = context.function
if context.category is None or context.category == 'default':
name = 'qt'
else:
name = 'qt-' + context.category
if msg.splitlines()[0] == ('This application failed to start because it '
'could not find or load the Qt platform plugin '
'"xcb".'):
# Handle this message specially.
msg += ("\n\nOn Archlinux, this should fix the problem:\n"
" pacman -S libxkbcommon-x11")
faulthandler.disable()
stack = ''.join(traceback.format_stack())
record = qt.makeRecord(name, level, context.file, context.line, msg, None,
None, func, sinfo=stack)
qt.handle(record)
@contextlib.contextmanager
def hide_qt_warning(pattern, logger='qt'):
"""Hide Qt warnings matching the given regex."""
log_filter = QtWarningFilter(pattern)
logger_obj = logging.getLogger(logger)
logger_obj.addFilter(log_filter)
try:
yield
finally:
logger_obj.removeFilter(log_filter)
class QtWarningFilter(logging.Filter):
"""Filter to filter Qt warnings.
Attributes:
_pattern: The start of the message.
"""
def __init__(self, pattern):
super().__init__()
self._pattern = pattern
def filter(self, record):
"""Determine if the specified record is to be logged."""
do_log = not record.msg.strip().startswith(self._pattern)
return do_log
class LogFilter(logging.Filter):
"""Filter to filter log records based on the commandline argument.
The default Filter only supports one name to show - we support a
comma-separated list instead.
Attributes:
_names: A list of names that should be logged.
"""
def __init__(self, names):
super().__init__()
self._names = names
def filter(self, record):
"""Determine if the specified record is to be logged."""
if self._names is None:
return True
if record.levelno > logging.DEBUG:
# More important than DEBUG, so we won't filter at all
return True
for name in self._names:
if record.name == name:
return True
elif not record.name.startswith(name):
continue
elif record.name[len(name)] == '.':
return True
return False
class RAMHandler(logging.Handler):
"""Logging handler which keeps the messages in a deque in RAM.
Loosely based on logging.BufferingHandler which is unsuitable because it
uses a simple list rather than a deque.
Attributes:
_data: A deque containing the logging records.
"""
def __init__(self, capacity):
super().__init__()
self.html_formatter = None
if capacity != -1:
self._data = collections.deque(maxlen=capacity)
else:
self._data = collections.deque()
def emit(self, record):
if record.levelno >= logging.DEBUG:
# We don't log VDEBUG to RAM.
self._data.append(record)
def dump_log(self, html=False, level='vdebug'):
"""Dump the complete formatted log data as string.
FIXME: We should do all the HTML formatter via jinja2.
(probably obsolete when moving to a widget for logging,
https://github.com/The-Compiler/qutebrowser/issues/34
"""
minlevel = LOG_LEVELS.get(level.upper(), VDEBUG_LEVEL)
lines = []
fmt = self.html_formatter.format if html else self.format
self.acquire()
try:
records = list(self._data)
finally:
self.release()
for record in records:
if record.levelno >= minlevel:
lines.append(fmt(record))
return '\n'.join(lines)
class ColoredFormatter(logging.Formatter):
"""Logging formatter to output colored logs.
Attributes:
use_colors: Whether to do colored logging or not.
"""
def __init__(self, fmt, datefmt, style, *, use_colors):
super().__init__(fmt, datefmt, style)
self._use_colors = use_colors
def format(self, record):
if self._use_colors:
color_dict = dict(COLOR_ESCAPES)
color_dict['reset'] = RESET_ESCAPE
log_color = LOG_COLORS[record.levelname]
color_dict['log_color'] = COLOR_ESCAPES[log_color]
else:
color_dict = {color: '' for color in COLOR_ESCAPES}
color_dict['reset'] = ''
color_dict['log_color'] = ''
record.__dict__.update(color_dict)
return super().format(record)
class HTMLFormatter(logging.Formatter):
"""Formatter for HTML-colored log messages.
Attributes:
_log_colors: The colors to use for logging levels.
_colordict: The colordict passed to the logger.
"""
def __init__(self, fmt, datefmt, log_colors):
"""Constructor.
Args:
fmt: The format string to use.
datefmt: The date format to use.
log_colors: The colors to use for logging levels.
"""
super().__init__(fmt, datefmt)
self._log_colors = log_colors
self._colordict = {}
# We could solve this nicer by using CSS, but for this simple case this
# works.
for color in COLORS:
self._colordict[color] = '<font color="{}">'.format(color)
self._colordict['reset'] = '</font>'
def format(self, record):
record.__dict__.update(self._colordict)
if record.levelname in self._log_colors:
color = self._log_colors[record.levelname]
record.log_color = self._colordict[color]
else:
record.log_color = ''
for field in ['msg', 'filename', 'funcName', 'levelname', 'module',
'name', 'pathname', 'processName', 'threadName']:
data = str(getattr(record, field))
setattr(record, field, pyhtml.escape(data))
msg = super().format(record)
if not msg.endswith(self._colordict['reset']):
msg += self._colordict['reset']
return msg
def formatTime(self, record, datefmt=None):
out = super().formatTime(record, datefmt)
return pyhtml.escape(out)
class JSONFormatter(logging.Formatter):
"""Formatter for JSON-encoded log messages."""
def format(self, record):
obj = {}
for field in ['created', 'levelname', 'name', 'module', 'funcName',
'lineno', 'levelno']:
obj[field] = getattr(record, field)
obj['message'] = record.getMessage()
if record.exc_info is not None:
obj['traceback'] = super().formatException(record.exc_info)
return json.dumps(obj)
| 1 | 15,572 | Why are you creating a new (second) `RAMHandler` here? Is this just an oversight from deleting the previous code? | qutebrowser-qutebrowser | py |
@@ -25,7 +25,11 @@ void CreateEdgeIndexProcessor::process(const cpp2::CreateEdgeIndexReq& req) {
auto ret = getEdgeIndexID(space, indexName);
if (ret.ok()) {
LOG(ERROR) << "Create Edge Index Failed: " << indexName << " have existed";
- resp_.set_code(cpp2::ErrorCode::E_EXISTED);
+ if (req.get_if_not_exists()) {
+ resp_.set_code(cpp2::ErrorCode::SUCCEEDED);
+ } else {
+ resp_.set_code(cpp2::ErrorCode::E_EXISTED);
+ }
onFinished();
return;
} | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "meta/processors/indexMan/CreateEdgeIndexProcessor.h"
namespace nebula {
namespace meta {
void CreateEdgeIndexProcessor::process(const cpp2::CreateEdgeIndexReq& req) {
auto space = req.get_space_id();
CHECK_SPACE_ID_AND_RETURN(space);
const auto &indexName = req.get_index_name();
const auto &properties = req.get_properties();
if (properties.get_fields().empty()) {
LOG(ERROR) << "Edge's Field should not empty";
resp_.set_code(cpp2::ErrorCode::E_INVALID_PARM);
onFinished();
return;
}
folly::SharedMutex::WriteHolder wHolder(LockUtils::edgeIndexLock());
auto ret = getEdgeIndexID(space, indexName);
if (ret.ok()) {
LOG(ERROR) << "Create Edge Index Failed: " << indexName << " have existed";
resp_.set_code(cpp2::ErrorCode::E_EXISTED);
onFinished();
return;
}
std::map<std::string, std::vector<nebula::cpp2::ColumnDef>> edgeColumns;
for (auto const &element : properties.get_fields()) {
auto edgeName = element.first;
auto edgeType = getEdgeType(space, edgeName);
if (!edgeType.ok()) {
LOG(ERROR) << "Create Edge Index Failed: " << edgeName << " not exist";
resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND);
onFinished();
return;
}
auto fieldsResult = getLatestEdgeFields(space, edgeName);
if (!fieldsResult.ok()) {
LOG(ERROR) << "Get Latest Edge Property Name Failed";
resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND);
onFinished();
return;
}
auto fields = fieldsResult.value();
std::vector<nebula::cpp2::ColumnDef> columns;
for (auto &field : element.second) {
auto iter = std::find_if(std::begin(fields), std::end(fields),
[field](const auto& pair) {
return field == pair.first;
});
if (iter == fields.end()) {
LOG(ERROR) << "Field " << field << " not found in Edge " << edgeName;
resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND);
onFinished();
return;
} else {
auto type = fields[field];
nebula::cpp2::ColumnDef column;
column.set_name(std::move(field));
column.set_type(std::move(type));
columns.emplace_back(std::move(column));
}
}
edgeColumns.emplace(edgeName, std::move(columns));
}
nebula::meta::cpp2::IndexFields indexFields;
indexFields.set_fields(std::move(edgeColumns));
std::vector<kvstore::KV> data;
auto edgeIndexRet = autoIncrementId();
if (!nebula::ok(edgeIndexRet)) {
LOG(ERROR) << "Create edge index failed: Get edge index ID failed";
resp_.set_code(nebula::error(edgeIndexRet));
onFinished();
return;
}
auto edgeIndex = nebula::value(edgeIndexRet);
data.emplace_back(MetaServiceUtils::indexEdgeIndexKey(space, indexName),
std::string(reinterpret_cast<const char*>(&edgeIndex), sizeof(EdgeIndexID)));
data.emplace_back(MetaServiceUtils::edgeIndexKey(space, edgeIndex),
MetaServiceUtils::edgeIndexVal(indexName, indexFields));
LOG(INFO) << "Create Edge Index " << indexName << ", edgeIndex " << edgeIndex;
resp_.set_id(to(edgeIndex, EntryType::EDGE_INDEX));
doPut(std::move(data));
}
} // namespace meta
} // namespace nebula
| 1 | 26,294 | Move the line 27 to line 31 is better? | vesoft-inc-nebula | cpp |
@@ -4704,6 +4704,11 @@ func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
return err
}
}
+ if rmd.IsRekeySet() {
+ fbo.rekeyFSM.Event(NewRekeyRequestEvent())
+ } else {
+ fbo.rekeyFSM.Event(NewRekeyNotNeededEvent())
+ }
appliedRevs = append(appliedRevs, rmd)
}
if len(appliedRevs) > 0 { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// mdReadType indicates whether a read needs identifies.
type mdReadType int
const (
// A read request that doesn't need an identify to be
// performed.
mdReadNoIdentify mdReadType = iota
// A read request that needs an identify to be performed (if
// it hasn't been already).
mdReadNeedIdentify
)
// mdUpdateType indicates update type.
type mdUpdateType int
const (
mdWrite mdUpdateType = iota
// A rekey request. Doesn't need an identify to be performed, as
// a rekey does its own (finer-grained) identifies.
mdRekey
)
type branchType int
const (
standard branchType = iota // an online, read-write branch
archive // an online, read-only branch
offline // an offline, read-write branch
archiveOffline // an offline, read-only branch
)
// Constants used in this file. TODO: Make these configurable?
const (
// MaxBlockSizeBytesDefault is the default maximum block size for KBFS.
// 512K blocks by default, block changes embedded max == 8K.
// Block size was chosen somewhat arbitrarily by trying to
// minimize the overall size of the history written by a user when
// appending 1KB writes to a file, up to a 1GB total file. Here
// is the output of a simple script that approximates that
// calculation:
//
// Total history size for 0065536-byte blocks: 1134341128192 bytes
// Total history size for 0131072-byte blocks: 618945052672 bytes
// Total history size for 0262144-byte blocks: 412786622464 bytes
// Total history size for 0524288-byte blocks: 412786622464 bytes
// Total history size for 1048576-byte blocks: 618945052672 bytes
// Total history size for 2097152-byte blocks: 1134341128192 bytes
// Total history size for 4194304-byte blocks: 2216672886784 bytes
MaxBlockSizeBytesDefault = 512 << 10
// Maximum number of blocks that can be sent in parallel
maxParallelBlockPuts = 100
// Maximum number of blocks that can be fetched in parallel
maxParallelBlockGets = 10
// Max response size for a single DynamoDB query is 1MB.
maxMDsAtATime = 10
// Cap the number of times we retry after a recoverable error
maxRetriesOnRecoverableErrors = 10
// When the number of dirty bytes exceeds this level, force a sync.
dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault
// The timeout for any background task.
backgroundTaskTimeout = 1 * time.Minute
// If it's been more than this long since our last update, check
// the current head before downloading all of the new revisions.
fastForwardTimeThresh = 15 * time.Minute
// If there are more than this many new revisions, fast forward
// rather than downloading them all.
fastForwardRevThresh = 50
)
type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
fboHead fboMutexLevel = 2
fboBlock fboMutexLevel = 3
)
func (o fboMutexLevel) String() string {
switch o {
case fboMDWriter:
return "mdWriterLock"
case fboHead:
return "headLock"
case fboBlock:
return "blockLock"
default:
return fmt.Sprintf("Invalid fboMutexLevel %d", int(o))
}
}
func fboMutexLevelToString(o mutexLevel) string {
return (fboMutexLevel(o)).String()
}
// Rules for working with lockState in FBO:
//
// - Every "execution flow" (i.e., program flow that happens
// sequentially) needs its own lockState object. This usually means
// that each "public" FBO method does:
//
// lState := makeFBOLockState()
//
// near the top.
//
// - Plumb lState through to all functions that hold any of the
// relevant locks, or are called under those locks.
//
// This way, violations of the lock hierarchy will be detected at
// runtime.
func makeFBOLockState() *lockState {
return makeLevelState(fboMutexLevelToString)
}
// blockLock is just like a sync.RWMutex, but with an extra operation
// (DoRUnlockedIfPossible).
type blockLock struct {
leveledRWMutex
locked bool
}
func (bl *blockLock) Lock(lState *lockState) {
bl.leveledRWMutex.Lock(lState)
bl.locked = true
}
func (bl *blockLock) Unlock(lState *lockState) {
bl.locked = false
bl.leveledRWMutex.Unlock(lState)
}
// DoRUnlockedIfPossible must be called when r- or w-locked. If
// r-locked, r-unlocks, runs the given function, and r-locks after
// it's done. Otherwise, just runs the given function.
func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) {
if !bl.locked {
bl.RUnlock(lState)
defer bl.RLock(lState)
}
f(lState)
}
// headTrustStatus marks whether the head is from a trusted or
// untrusted source. When rekeying we get the head MD by folder id
// and do not check the tlf handle
type headTrustStatus int
const (
headUntrusted headTrustStatus = iota
headTrusted
)
type cachedDirOp struct {
dirOp op
nodes []Node
}
// folderBranchOps implements the KBFSOps interface for a specific
// branch of a specific folder. It is go-routine safe for operations
// within the folder.
//
// We use locks to protect against multiple goroutines accessing the
// same folder-branch. The goal with our locking strategy is maximize
// concurrent access whenever possible. See design/state_machine.md
// for more details. There are three important locks:
//
// 1) mdWriterLock: Any "remote-sync" operation (one which modifies the
// folder's metadata) must take this lock during the entirety of
// its operation, to avoid forking the MD.
//
// 2) headLock: This is a read/write mutex. It must be taken for
// reading before accessing any part of the current head MD. It
// should be taken for the shortest time possible -- that means in
// general that it should be taken, and the MD copied to a
// goroutine-local variable, and then it can be released.
// Remote-sync operations should take it for writing after pushing
// all of the blocks and MD to the KBFS servers (i.e., all network
// accesses), and then hold it until after all notifications have
// been fired, to ensure that no concurrent "local" operations ever
// see inconsistent state locally.
//
// 3) blockLock: This too is a read/write mutex. It must be taken for
// reading before accessing any blocks in the block cache that
// belong to this folder/branch. This includes checking their
// dirty status. It should be taken for the shortest time possible
// -- that means in general it should be taken, and then the blocks
// that will be modified should be copied to local variables in the
// goroutine, and then it should be released. The blocks should
// then be modified locally, and then readied and pushed out
// remotely. Only after the blocks have been pushed to the server
// should a remote-sync operation take the lock again (this time
// for writing) and put/finalize the blocks. Write and Truncate
// should take blockLock for their entire lifetime, since they
// don't involve writes over the network. Furthermore, if a block
// is not in the cache and needs to be fetched, we should release
// the mutex before doing the network operation, and lock it again
// before writing the block back to the cache.
//
// We want to allow writes and truncates to a file that's currently
// being sync'd, like any good networked file system. The tricky part
// is making sure the changes can both: a) be read while the sync is
// happening, and b) be applied to the new file path after the sync is
// done.
//
// For now, we just do the dumb, brute force thing for now: if a block
// is currently being sync'd, it copies the block and puts it back
// into the cache as modified. Then, when the sync finishes, it
// throws away the modified blocks and re-applies the change to the
// new file path (which might have a completely different set of
// blocks, so we can't just reuse the blocks that were modified during
// the sync.)
type folderBranchOps struct {
config Config
folderBranch FolderBranch
bid BranchID // protected by mdWriterLock
bType branchType
observers *observerList
// these locks, when locked concurrently by the same goroutine,
// should only be taken in the following order to avoid deadlock:
mdWriterLock leveledMutex // taken by any method making MD modifications
dirOps []cachedDirOp
// protects access to head, headStatus, latestMergedRevision,
// and hasBeenCleared.
headLock leveledRWMutex
head ImmutableRootMetadata
headStatus headTrustStatus
// latestMergedRevision tracks the latest heard merged revision on server
latestMergedRevision kbfsmd.Revision
// Has this folder ever been cleared?
hasBeenCleared bool
blocks folderBlockOps
prepper folderUpdatePrepper
// nodeCache itself is goroutine-safe, but this object's use
// of it has special requirements:
//
// - Reads can call PathFromNode() unlocked, since there are
// no guarantees with concurrent reads.
//
// - Operations that takes mdWriterLock always needs the
// most up-to-date paths, so those must call
// PathFromNode() under mdWriterLock.
//
// - Block write operations (write/truncate/sync) need to
// coordinate. Specifically, sync must make sure that
// blocks referenced in a path (including all of the child
// blocks) must exist in the cache during calls to
// PathFromNode from write/truncate. This means that sync
// must modify dirty file blocks only under blockLock, and
// write/truncate must call PathFromNode() under
// blockLock.
//
// Furthermore, calls to UpdatePointer() must happen
// before the copy-on-write mode induced by Sync() is
// finished.
nodeCache NodeCache
// Whether we've identified this TLF or not.
identifyLock sync.Mutex
identifyDone bool
identifyTime time.Time
// The current status summary for this folder
status *folderBranchStatusKeeper
// How to log
log traceLogger
deferLog traceLogger
// Closed on shutdown
shutdownChan chan struct{}
// Can be used to turn off notifications for a while (e.g., for testing)
updatePauseChan chan (<-chan struct{})
cancelUpdatesLock sync.Mutex
// Cancels the goroutine currently waiting on TLF MD updates.
cancelUpdates context.CancelFunc
// After a shutdown, this channel will be closed when the register
// goroutine completes.
updateDoneChan chan struct{}
// forceSyncChan is read from by the background sync process
// to know when it should sync immediately.
forceSyncChan <-chan struct{}
// syncNeededChan is signalled when a buffered write happens, and
// lets the background syncer wait rather than waking up all the
// time.
syncNeededChan chan struct{}
// How to resolve conflicts
cr *ConflictResolver
// Helper class for archiving and cleaning up the blocks for this TLF
fbm *folderBlockManager
rekeyFSM RekeyFSM
editHistory *TlfEditHistory
branchChanges kbfssync.RepeatedWaitGroup
mdFlushes kbfssync.RepeatedWaitGroup
forcedFastForwards kbfssync.RepeatedWaitGroup
muLastGetHead sync.Mutex
// We record a timestamp everytime getHead or getTrustedHead is called, and
// use this as a heuristic for whether user is actively using KBFS. If user
// has been generating KBFS activities recently, it makes sense to try to
// reconnect as soon as possible in case of a deployment causes
// disconnection.
lastGetHead time.Time
}
var _ KBFSOps = (*folderBranchOps)(nil)
var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
var nodeCache NodeCache
if config.Mode() == InitMinimal {
// If we're in minimal mode, let the node cache remain nil to
// ensure that the user doesn't try any data reads or writes.
} else {
nodeCache = newNodeCacheStandard(fb)
}
// make logger
branchSuffix := ""
if fb.Branch != MasterBranch {
branchSuffix = " " + string(fb.Branch)
}
tlfStringFull := fb.Tlf.String()
// Shorten the TLF ID for the module name. 8 characters should be
// unique enough for a local node.
log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8],
branchSuffix))
// But print it out once in full, just in case.
log.CInfof(nil, "Created new folder-branch for %s", tlfStringFull)
observers := newObserverList()
mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{})
headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{})
blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{})
forceSyncChan := make(chan struct{})
fbo := &folderBranchOps{
config: config,
folderBranch: fb,
bid: BranchID{},
bType: bType,
observers: observers,
status: newFolderBranchStatusKeeper(config, nodeCache),
mdWriterLock: mdWriterLock,
headLock: headLock,
blocks: folderBlockOps{
config: config,
log: log,
folderBranch: fb,
observers: observers,
forceSyncChan: forceSyncChan,
blockLock: blockLock{
leveledRWMutex: blockLockMu,
},
dirtyFiles: make(map[BlockPointer]*dirtyFile),
deferred: make(map[BlockRef]deferredState),
unrefCache: make(map[BlockRef]*syncInfo),
deCache: make(map[BlockRef]deCacheEntry),
nodeCache: nodeCache,
},
nodeCache: nodeCache,
log: traceLogger{log},
deferLog: traceLogger{log.CloneWithAddedDepth(1)},
shutdownChan: make(chan struct{}),
updatePauseChan: make(chan (<-chan struct{})),
forceSyncChan: forceSyncChan,
syncNeededChan: make(chan struct{}, 1),
}
fbo.prepper = folderUpdatePrepper{
config: config,
folderBranch: fb,
blocks: &fbo.blocks,
log: log,
}
fbo.cr = NewConflictResolver(config, fbo)
fbo.fbm = newFolderBlockManager(config, fb, fbo)
fbo.editHistory = NewTlfEditHistory(config, fbo, log)
fbo.rekeyFSM = NewRekeyFSM(fbo)
if config.DoBackgroundFlushes() {
go fbo.backgroundFlusher()
}
return fbo
}
// markForReIdentifyIfNeeded checks whether this tlf is identified and mark
// it for lazy reidentification if it exceeds time limits.
func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) {
fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime)
fbo.identifyDone = false
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by folderBranchOps.
func (fbo *folderBranchOps) Shutdown(ctx context.Context) error {
if fbo.config.CheckStateOnShutdown() {
lState := makeFBOLockState()
if fbo.blocks.GetState(lState) == dirtyState {
fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state")
} else if !fbo.isMasterBranch(lState) {
fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged")
} else {
// Make sure we're up to date first
if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch); err != nil {
return err
}
// Check the state for consistency before shutting down.
sc := NewStateChecker(fbo.config)
if err := sc.CheckMergedState(ctx, fbo.id()); err != nil {
return err
}
}
}
close(fbo.shutdownChan)
fbo.cr.Shutdown()
fbo.fbm.shutdown()
fbo.editHistory.Shutdown()
fbo.rekeyFSM.Shutdown()
// Wait for the update goroutine to finish, so that we don't have
// any races with logging during test reporting.
if fbo.updateDoneChan != nil {
<-fbo.updateDoneChan
}
return nil
}
func (fbo *folderBranchOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBranchOps) branch() BranchName {
return fbo.folderBranch.Branch
}
func (fbo *folderBranchOps) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return nil, errors.New("GetFavorites is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) {
// no-op
}
func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("DeleteFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) AddFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("AddFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) addToFavorites(ctx context.Context,
favorites *Favorites, created bool) (err error) {
lState := makeFBOLockState()
head := fbo.getTrustedHead(lState)
if head == (ImmutableRootMetadata{}) {
return OpsCantHandleFavorite{"Can't add a favorite without a handle"}
}
return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created)
}
func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context,
favorites *Favorites, handle *TlfHandle, created bool) (err error) {
if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil {
// Can't favorite while not logged in
return nil
}
favorites.AddAsync(ctx, handle.toFavToAdd(created))
return nil
}
func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context,
favorites *Favorites) error {
if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil {
// Can't unfavorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getTrustedHead(lState)
if head == (ImmutableRootMetadata{}) {
// This can happen when identifies fail and the head is never set.
return OpsCantHandleFavorite{"Can't delete a favorite without a handle"}
}
h := head.GetTlfHandle()
return favorites.Delete(ctx, h.ToFavorite())
}
func (fbo *folderBranchOps) doFavoritesOp(ctx context.Context,
favs *Favorites, fop FavoritesOp, handle *TlfHandle) error {
switch fop {
case FavoritesOpNoChange:
return nil
case FavoritesOpAdd:
if handle != nil {
return fbo.addToFavoritesByHandle(ctx, favs, handle, false)
}
return fbo.addToFavorites(ctx, favs, false)
case FavoritesOpAddNewlyCreated:
if handle != nil {
return fbo.addToFavoritesByHandle(ctx, favs, handle, true)
}
return fbo.addToFavorites(ctx, favs, true)
case FavoritesOpRemove:
return fbo.deleteFromFavorites(ctx, favs)
default:
return InvalidFavoritesOpError{}
}
}
func (fbo *folderBranchOps) updateLastGetHeadTimestamp() {
fbo.muLastGetHead.Lock()
defer fbo.muLastGetHead.Unlock()
fbo.lastGetHead = fbo.config.Clock().Now()
}
// getTrustedHead should not be called outside of folder_branch_ops.go.
// Returns ImmutableRootMetadata{} when the head is not trusted.
// See the comment on headTrustedStatus for more information.
func (fbo *folderBranchOps) getTrustedHead(lState *lockState) ImmutableRootMetadata {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
if fbo.headStatus == headUntrusted {
return ImmutableRootMetadata{}
}
// This triggers any mdserver backoff timer to fast forward. In case of a
// deployment, this causes KBFS client to try to reconnect to mdserver
// immediately rather than waiting until the random backoff timer is up.
// Note that this doesn't necessarily guarantee that the fbo handler that
// called this method would get latest MD.
fbo.config.MDServer().FastForwardBackoff()
fbo.updateLastGetHeadTimestamp()
return fbo.head
}
// getHead should not be called outside of folder_branch_ops.go.
func (fbo *folderBranchOps) getHead(lState *lockState) (
ImmutableRootMetadata, headTrustStatus) {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
// See getTrustedHead for explanation.
fbo.config.MDServer().FastForwardBackoff()
fbo.updateLastGetHeadTimestamp()
return fbo.head, fbo.headStatus
}
// isMasterBranch should not be called if mdWriterLock is already taken.
func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid BranchID) {
fbo.mdWriterLock.AssertLocked(lState)
if fbo.bid != bid {
fbo.cr.BeginNewBranch()
}
fbo.bid = bid
if bid == NullBranchID {
fbo.status.setCRSummary(nil, nil)
}
}
var errNoFlushedRevisions = errors.New("No flushed MDs yet")
var errNoMergedRevWhileStaged = errors.New(
"Cannot find most recent merged revision while staged")
// getJournalPredecessorRevision returns the revision that precedes
// the current journal head if journaling enabled and there are
// unflushed MD updates; otherwise it returns
// kbfsmd.RevisionUninitialized. If there aren't any flushed MD
// revisions, it returns errNoFlushedRevisions.
func (fbo *folderBranchOps) getJournalPredecessorRevision(ctx context.Context) (
kbfsmd.Revision, error) {
jServer, err := GetJournalServer(fbo.config)
if err != nil {
// Journaling is disabled entirely.
return kbfsmd.RevisionUninitialized, nil
}
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
// Journaling is disabled for this TLF, so use the local head.
// TODO: JournalStatus could return other errors (likely
// file/disk corruption) that indicate a real problem, so it
// might be nice to type those errors so we can distinguish
// them.
return kbfsmd.RevisionUninitialized, nil
}
if jStatus.BranchID != NullBranchID.String() {
return kbfsmd.RevisionUninitialized, errNoMergedRevWhileStaged
}
if jStatus.RevisionStart == kbfsmd.RevisionUninitialized {
// The journal is empty, so the local head must be the most recent.
return kbfsmd.RevisionUninitialized, nil
} else if jStatus.RevisionStart == kbfsmd.RevisionInitial {
// Nothing has been flushed to the servers yet, so don't
// return anything.
return kbfsmd.RevisionUninitialized, errNoFlushedRevisions
}
return jStatus.RevisionStart - 1, nil
}
// validateHeadLocked validates an untrusted head and sets it as trusted.
// see headTrustedState comment for more information.
func (fbo *folderBranchOps) validateHeadLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.headLock.AssertLocked(lState)
// Validate fbo against fetched md and discard the fetched one.
if fbo.head.TlfID() != md.TlfID() {
fbo.log.CCriticalf(ctx, "Fake untrusted TLF encountered %v %v %v %v", fbo.head.TlfID(), md.TlfID(), fbo.head.mdID, md.mdID)
return MDTlfIDMismatch{fbo.head.TlfID(), md.TlfID()}
}
fbo.headStatus = headTrusted
return nil
}
func (fbo *folderBranchOps) setHeadLocked(
ctx context.Context, lState *lockState,
md ImmutableRootMetadata, headStatus headTrustStatus) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
isFirstHead := fbo.head == ImmutableRootMetadata{}
wasReadable := false
if !isFirstHead {
if headStatus == headUntrusted {
panic("setHeadLocked: Trying to set an untrusted head over an existing head")
}
wasReadable = fbo.head.IsReadable()
if fbo.headStatus == headUntrusted {
err := fbo.validateHeadLocked(ctx, lState, md)
if err != nil {
return err
}
if fbo.head.mdID == md.mdID {
return nil
}
}
if fbo.head.mdID == md.mdID {
panic(errors.Errorf("Re-putting the same MD: %s", md.mdID))
}
}
fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision())
// If this is the first time the MD is being set, and we are
// operating on unmerged data, initialize the state properly and
// kick off conflict resolution.
if isFirstHead && md.MergedStatus() == Unmerged {
fbo.setBranchIDLocked(lState, md.BID())
// Use uninitialized for the merged branch; the unmerged
// revision is enough to trigger conflict resolution.
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
} else if md.MergedStatus() == Merged {
journalEnabled := TLFJournalEnabled(fbo.config, fbo.id())
if journalEnabled {
if isFirstHead {
// If journaling is on, and this is the first head
// we're setting, we have to make sure we use the
// server's notion of the latest MD, not the one
// potentially coming from our journal. If there are
// no flushed revisions, it's not a hard error, and we
// just leave the latest merged revision
// uninitialized.
journalPred, err := fbo.getJournalPredecessorRevision(ctx)
switch err {
case nil:
// journalPred will be
// kbfsmd.RevisionUninitialized when the journal
// is empty.
if journalPred >= kbfsmd.RevisionInitial {
fbo.setLatestMergedRevisionLocked(
ctx, lState, journalPred, false)
} else {
fbo.setLatestMergedRevisionLocked(ctx, lState,
md.Revision(), false)
}
case errNoFlushedRevisions:
// The server has no revisions, so leave the
// latest merged revision uninitialized.
default:
return err
}
} else {
// If this isn't the first head, then this is either
// an update from the server, or an update just
// written by the client. But since journaling is on,
// then latter case will be handled by onMDFlush when
// the update is properly flushed to the server. So
// ignore updates written by this device.
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
if session.VerifyingKey != md.LastModifyingWriterVerifyingKey() {
fbo.setLatestMergedRevisionLocked(
ctx, lState, md.Revision(), false)
}
}
} else {
// This is a merged revision, and journaling is disabled,
// so it's definitely the latest revision on the server as
// well.
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
}
// Make sure that any unembedded block changes have been swapped
// back in.
if fbo.config.Mode() == InitDefault &&
md.data.Changes.Info.BlockPointer != zeroPtr &&
len(md.data.Changes.Ops) == 0 {
return errors.New("Must swap in block changes before setting head")
}
fbo.head = md
if isFirstHead && headStatus == headTrusted {
fbo.headStatus = headTrusted
}
fbo.status.setRootMetadata(md)
if isFirstHead {
// Start registering for updates right away, using this MD
// as a starting point. For now only the master branch can
// get updates
if fbo.branch() == MasterBranch {
fbo.updateDoneChan = make(chan struct{})
go fbo.registerAndWaitForUpdates()
}
}
if !wasReadable && md.IsReadable() {
// Let any listeners know that this folder is now readable,
// which may indicate that a rekey successfully took place.
fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification(
md.GetTlfHandle(), md.TlfID().Type() == tlf.Public))
}
return nil
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched not due to a user action, i.e. via a Rekey
// notification, and we don't have a TLF name to check against.
func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headUntrusted)
}
// setNewInitialHeadLocked is for when we're creating a brand-new TLF.
// This is trusted.
func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setNewInitialHeadLocked")
}
if md.Revision() != kbfsmd.RevisionInitial {
return errors.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision())
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setInitialHeadTrustedLocked is for when the given RootMetadata
// was fetched due to a user action, and will be checked against the
// TLF name.
func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setHeadSuccessorLocked is for when we're applying updates from the
// server or when we're applying new updates we created ourselves.
func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata, rebased bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// This can happen in tests via SyncFromServerForTesting().
return fbo.setInitialHeadTrustedLocked(ctx, lState, md)
}
if !rebased {
err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly())
if err != nil {
return err
}
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// Newer handles should be equal or more resolved over time.
//
// TODO: In some cases, they shouldn't, e.g. if we're on an
// unmerged branch. Add checks for this.
resolvesTo, partialResolvedOldHandle, err :=
oldHandle.ResolvesTo(
ctx, fbo.config.Codec(), fbo.config.KBPKI(),
*newHandle)
if err != nil {
return err
}
oldName := oldHandle.GetCanonicalName()
newName := newHandle.GetCanonicalName()
if !resolvesTo {
return IncompatibleHandleError{
oldName,
partialResolvedOldHandle.GetCanonicalName(),
newName,
}
}
err = fbo.setHeadLocked(ctx, lState, md, headTrusted)
if err != nil {
return err
}
if oldName != newName {
fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)",
oldName, newName)
// If the handle has changed, send out a notification.
fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle())
// Also the folder should be re-identified given the
// newly-resolved assertions.
func() {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
fbo.identifyDone = false
}()
}
return nil
}
// setHeadPredecessorLocked is for when we're unstaging updates.
func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return errors.New("Unexpected nil head in setHeadPredecessorLocked")
}
if fbo.head.Revision() <= kbfsmd.RevisionInitial {
return errors.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision())
}
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadPredecessorLocked")
}
err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly())
if err != nil {
return err
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// The two handles must be the same, since no rekeying is done
// while unmerged.
eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle)
if err != nil {
return err
}
if !eq {
return errors.Errorf(
"head handle %v unexpectedly not equal to new handle = %v",
oldHandle, newHandle)
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setHeadConflictResolvedLocked is for when we're setting the merged
// update with resolved conflicts.
func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadConflictResolvedLocked")
}
if md.MergedStatus() != Merged {
return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
func (fbo *folderBranchOps) identifyOnce(
ctx context.Context, md ReadOnlyRootMetadata) error {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
ei := getExtendedIdentify(ctx)
if fbo.identifyDone && !ei.behavior.AlwaysRunIdentify() {
// TODO: provide a way for the service to break this cache when identify
// state changes on a TLF. For now, we do it this way to make chat work.
return nil
}
h := md.GetTlfHandle()
fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath())
kbpki := fbo.config.KBPKI()
err := identifyHandle(ctx, kbpki, kbpki, h)
if err != nil {
fbo.log.CDebugf(ctx, "Identify finished with error: %v", err)
// For now, if the identify fails, let the
// next function to hit this code path retry.
return err
}
if ei.behavior.WarningInsteadOfErrorOnBrokenTracks() &&
len(ei.getTlfBreakAndClose().Breaks) > 0 {
fbo.log.CDebugf(ctx,
"Identify finished with no error but broken proof warnings")
} else if ei.behavior == keybase1.TLFIdentifyBehavior_CHAT_SKIP {
fbo.log.CDebugf(ctx, "Identify skipped")
} else {
fbo.log.CDebugf(ctx, "Identify finished successfully")
fbo.identifyDone = true
fbo.identifyTime = fbo.config.Clock().Now()
}
return nil
}
// getMDForReadLocked returns an existing md for a read
// operation. Note that mds will not be fetched here.
func (fbo *folderBranchOps) getMDForReadLocked(
ctx context.Context, lState *lockState, rtype mdReadType) (
md ImmutableRootMetadata, err error) {
if rtype != mdReadNeedIdentify && rtype != mdReadNoIdentify {
panic("Invalid rtype in getMDLockedForRead")
}
md = fbo.getTrustedHead(lState)
if md != (ImmutableRootMetadata{}) {
if rtype != mdReadNoIdentify {
err = fbo.identifyOnce(ctx, md.ReadOnly())
}
return md, err
}
return ImmutableRootMetadata{}, MDWriteNeededInRequest{}
}
// getMDForWriteOrRekeyLocked can fetch MDs, identify them and
// contains the fancy logic. For reading use getMDLockedForRead.
// Here we actually can fetch things from the server.
// rekeys are untrusted.
func (fbo *folderBranchOps) getMDForWriteOrRekeyLocked(
ctx context.Context, lState *lockState, mdType mdUpdateType) (
md ImmutableRootMetadata, err error) {
defer func() {
if err != nil || mdType == mdRekey {
return
}
err = fbo.identifyOnce(ctx, md.ReadOnly())
}()
md = fbo.getTrustedHead(lState)
if md != (ImmutableRootMetadata{}) {
return md, nil
}
// MDs coming from from rekey notifications are marked untrusted.
//
// TODO: Make tests not take this code path.
fbo.mdWriterLock.AssertLocked(lState)
// Not in cache, fetch from server and add to cache. First, see
// if this device has any unmerged commits -- take the latest one.
mdops := fbo.config.MDOps()
// get the head of the unmerged branch for this device (if any)
md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID)
if err != nil {
return ImmutableRootMetadata{}, err
}
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedMD == (ImmutableRootMetadata{}) {
return ImmutableRootMetadata{},
errors.WithStack(NoMergedMDError{fbo.id()})
}
if md == (ImmutableRootMetadata{}) {
// There are no unmerged MDs for this device, so just use the current head.
md = mergedMD
} else {
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// We don't need to do this for merged head
// because the setHeadLocked() already does
// that anyway.
fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false)
}()
}
if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) {
return ImmutableRootMetadata{}, errors.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable())
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
headStatus := headTrusted
if mdType == mdRekey {
// If we already have a head (that has been filled after the initial
// check, but before we acquired the lock), then just return it.
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head, nil
}
headStatus = headUntrusted
}
err = fbo.setHeadLocked(ctx, lState, md, headStatus)
if err != nil {
return ImmutableRootMetadata{}, err
}
return md, nil
}
func (fbo *folderBranchOps) getMDForReadHelper(
ctx context.Context, lState *lockState, rtype mdReadType) (ImmutableRootMetadata, error) {
md, err := fbo.getMDForReadLocked(ctx, lState, rtype)
if err != nil {
return ImmutableRootMetadata{}, err
}
if md.TlfID().Type() != tlf.Public {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isReader, err := md.IsReader(ctx, fbo.config.KBPKI(), session.UID)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !isReader {
return ImmutableRootMetadata{}, NewReadAccessError(
md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
// getMostRecentFullyMergedMD is a helper method that returns the most
// recent merged MD that has been flushed to the server. This could
// be different from the current local head if journaling is on. If
// the journal is on a branch, it returns an error.
func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) (
ImmutableRootMetadata, error) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedRev == kbfsmd.RevisionUninitialized {
// No unflushed journal entries, so use the local head.
lState := makeFBOLockState()
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
// Otherwise, use the specified revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
mergedRev, Merged)
if err != nil {
return ImmutableRootMetadata{}, err
}
fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev)
return rmd, nil
}
func (fbo *folderBranchOps) getMDForReadNoIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNeedIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
}
// getMDForReadNeedIdentifyOnMaybeFirstAccess should be called by a
// code path (like chat) that might be accessing this folder for the
// first time. Other folderBranchOps methods like Lookup which know
// the folder has already been accessed at least once (to get the root
// node, for example) do not need to call this. Unlike other getMD
// calls, this one may return a nil ImmutableRootMetadata along with a
// nil error, to indicate that there isn't any MD for this TLF yet and
// one must be created by the caller.
func (fbo *folderBranchOps) getMDForReadNeedIdentifyOnMaybeFirstAccess(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
}
if _, noMD := errors.Cause(err).(NoMergedMDError); noMD {
return ImmutableRootMetadata{}, nil
}
if err != nil {
return ImmutableRootMetadata{}, err
}
if md.TlfID().Type() != tlf.Public {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isReader, err := md.IsReader(ctx, fbo.config.KBPKI(), session.UID)
if !isReader {
return ImmutableRootMetadata{}, NewReadAccessError(
md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
func (fbo *folderBranchOps) getMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (
ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
if err != nil {
return ImmutableRootMetadata{}, err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isWriter, err := md.IsWriter(ctx, fbo.config.KBPKI(), session.UID)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !isWriter {
return ImmutableRootMetadata{}, NewWriteAccessError(
md.GetTlfHandle(), session.Name, filename)
}
return md, nil
}
func (fbo *folderBranchOps) getSuccessorMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (
*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, err
}
// Make a new successor of the current MD to hold the coming
// writes. The caller must pass this into `finalizeMDWriteLocked`
// or the changes will be lost.
return md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(),
md.mdID, true)
}
// getSuccessorMDForWriteLocked returns a new RootMetadata object with
// an incremented version number for modification. If the returned
// object is put to the MDServer (via MDOps), mdWriterLock must be
// held until then. (See comments for mdWriterLock above.)
func (fbo *folderBranchOps) getSuccessorMDForWriteLocked(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getSuccessorMDForWriteLockedForFilename(ctx, lState, "")
}
func (fbo *folderBranchOps) getMDForRekeyWriteLocked(
ctx context.Context, lState *lockState) (
rmd *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey,
wasRekeySet bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdRekey)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
handle := md.GetTlfHandle()
// must be a reader or writer (it checks both.)
if !handle.IsReader(session.UID) {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(md.GetTlfHandle(), session.Name)
}
newMd, err := md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(),
md.mdID, handle.IsWriter(session.UID))
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
// readers shouldn't modify writer metadata
if !handle.IsWriter(session.UID) && !newMd.IsWriterMetadataCopiedSet() {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(handle, session.Name)
}
return newMd, md.LastModifyingWriterVerifyingKey(), md.IsRekeySet(), nil
}
func (fbo *folderBranchOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
func (fbo *folderBranchOps) maybeUnembedAndPutBlocks(ctx context.Context,
md *RootMetadata) (*blockPutState, error) {
if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) {
return nil, nil
}
chargedTo, err := chargedToForTLF(
ctx, fbo.config.KBPKI(), md.GetTlfHandle())
if err != nil {
return nil, err
}
bps := newBlockPutState(1)
err = fbo.prepper.unembedBlockChanges(
ctx, bps, md, &md.data.Changes, chargedTo)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, fbo.deferLog, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return nil, err
}
if len(ptrsToDelete) > 0 {
return nil, errors.Errorf("Unexpected pointers to delete after "+
"unembedding block changes in gc op: %v", ptrsToDelete)
}
return bps, nil
}
// ResetRootBlock creates a new empty dir block and sets the given
// metadata's root block to it.
func ResetRootBlock(ctx context.Context, config Config,
rmd *RootMetadata) (Block, BlockInfo, ReadyBlockData, error) {
newDblock := NewDirBlock()
chargedTo, err := chargedToForTLF(ctx, config.KBPKI(), rmd.GetTlfHandle())
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, config.BlockCache(), config.BlockOps(),
config.Crypto(), rmd.ReadOnly(), newDblock, chargedTo,
keybase1.BlockType_DATA)
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
now := config.Clock().Now().UnixNano()
rmd.data.Dir = DirEntry{
BlockInfo: info,
EntryInfo: EntryInfo{
Type: Dir,
Size: uint64(plainSize),
Mtime: now,
Ctime: now,
},
}
prevDiskUsage := rmd.DiskUsage()
rmd.SetDiskUsage(0)
// Redundant, since this is called only for brand-new or
// successor RMDs, but leave in to be defensive.
rmd.ClearBlockChanges()
co := newCreateOpForRootDir()
rmd.AddOp(co)
rmd.AddRefBlock(rmd.data.Dir.BlockInfo)
// Set unref bytes to the previous disk usage, so that the
// accounting works out.
rmd.AddUnrefBytes(prevDiskUsage)
return newDblock, info, readyBlockData, nil
}
func (fbo *folderBranchOps) initMDLocked(
ctx context.Context, lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
handle := md.GetTlfHandle()
// make sure we're a writer before rekeying or putting any blocks.
isWriter, err := md.IsWriter(ctx, fbo.config.KBPKI(), session.UID)
if err != nil {
return err
}
if !isWriter {
return NewWriteAccessError(
handle, session.Name, handle.GetCanonicalPath())
}
var expectedKeyGen KeyGen
var tlfCryptKey *kbfscrypto.TLFCryptKey
switch md.TlfID().Type() {
case tlf.Public:
expectedKeyGen = PublicKeyGen
case tlf.Private:
var rekeyDone bool
// create a new set of keys for this metadata
rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false)
if err != nil {
return err
}
if !rekeyDone {
return errors.Errorf("Initial rekey unexpectedly not done for "+
"private TLF %v", md.TlfID())
}
expectedKeyGen = FirstValidKeyGen
case tlf.SingleTeam:
// Teams get their crypt key from the service, no need to
// rekey in KBFS.
tid, err := handle.FirstResolvedWriter().AsTeam()
if err != nil {
return err
}
keys, keyGen, err := fbo.config.KBPKI().GetTeamTLFCryptKeys(ctx, tid)
if err != nil {
return err
}
if keyGen < FirstValidKeyGen {
return errors.WithStack(
InvalidKeyGenerationError{md.TlfID(), keyGen})
}
expectedKeyGen = keyGen
md.bareMd.SetLatestKeyGenerationForTeamTLF(keyGen)
key, ok := keys[keyGen]
if !ok {
return errors.WithStack(
InvalidKeyGenerationError{md.TlfID(), keyGen})
}
tlfCryptKey = &key
}
keyGen := md.LatestKeyGeneration()
if keyGen != expectedKeyGen {
return InvalidKeyGenerationError{md.TlfID(), keyGen}
}
// create a dblock since one doesn't exist yet
newDblock, info, readyBlockData, err := ResetRootBlock(ctx, fbo.config, md)
if err != nil {
return err
}
// Some other thread got here first, so give up and let it go
// before we push anything to the servers.
if h, _ := fbo.getHead(lState); h != (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "Head was already set, aborting")
return nil
}
if err = PutBlockCheckLimitErrs(ctx, fbo.config.BlockServer(),
fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData,
md.GetTlfHandle().GetCanonicalName()); err != nil {
return err
}
if err = fbo.config.BlockCache().Put(
info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil {
return err
}
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Finally, write out the new metadata. TODO: if journaling is
// enabled, we should bypass it here, so we don't have to worry
// about delayed conflicts (since this is essentially a rekey, and
// we always bypass the journal for rekeys). The caller will have
// to intelligently deal with a conflict.
irmd, err := fbo.config.MDOps().Put(ctx, md, session.VerifyingKey)
if err != nil {
return err
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.Errorf(
"%v: Unexpected MD ID during new MD initialization: %v",
md.TlfID(), fbo.head.mdID)
}
fbo.setNewInitialHeadLocked(ctx, lState, irmd)
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
err = fbo.config.KeyCache().PutTLFCryptKey(
md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context,
h *TlfHandle) (keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) {
return nil, tlf.ID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetTLFID(ctx context.Context, h *TlfHandle) (tlf.ID, error) {
return tlf.ID{}, errors.New("GetTLFID is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) checkNode(node Node) error {
fb := node.GetFolderBranch()
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return nil
}
// SetInitialHeadFromServer sets the head to the given
// ImmutableRootMetadata, which must be retrieved from the MD server.
func (fbo *folderBranchOps) SetInitialHeadFromServer(
ctx context.Context, md ImmutableRootMetadata) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)",
md.Revision(), md.MergedStatus())
defer func() {
fbo.deferLog.CDebugf(ctx,
"SetInitialHeadFromServer, revision=%d (%s) done: %+v",
md.Revision(), md.MergedStatus(), err)
}()
if md.IsReadable() && fbo.config.Mode() != InitMinimal {
// We will prefetch this as on-demand so that it triggers downstream
// prefetches.
fbo.config.BlockOps().Prefetcher().PrefetchBlock(
&DirBlock{}, md.data.Dir.BlockPointer, md,
defaultOnDemandRequestPriority)
} else {
fbo.log.CDebugf(ctx,
"Setting an unreadable head with revision=%d", md.Revision())
}
// Return early if the head is already set. This avoids taking
// mdWriterLock for no reason, and it also avoids any side effects
// (e.g., calling `identifyOnce` and downloading the merged
// head) if head is already set.
lState := makeFBOLockState()
head, headStatus := fbo.getHead(lState)
if headStatus == headTrusted && head != (ImmutableRootMetadata{}) && head.mdID == md.mdID {
fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+
"need to set initial head again", md.Revision(), md.MergedStatus())
return nil
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{md.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, md.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if md.MergedStatus() == Unmerged {
mdops := fbo.config.MDOps()
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return err
}
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState,
mergedMD.Revision(), false)
}()
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Only update the head the first time; later it will be
// updated either directly via writes or through the
// background update processor.
if fbo.head == (ImmutableRootMetadata{}) {
err = fbo.setInitialHeadTrustedLocked(ctx, lState, md)
if err != nil {
return err
}
} else if headStatus == headUntrusted {
err = fbo.validateHeadLocked(ctx, lState, md)
if err != nil {
return err
}
}
return nil
})
}
// SetInitialHeadToNew creates a brand-new ImmutableRootMetadata
// object and sets the head to that. This is trusted.
func (fbo *folderBranchOps) SetInitialHeadToNew(
ctx context.Context, id tlf.ID, handle *TlfHandle) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadToNew %s", id)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetInitialHeadToNew %s done: %+v",
id, err)
}()
rmd, err := makeInitialRootMetadata(
fbo.config.MetadataVersion(), id, handle)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{rmd.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, rmd.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.initMDLocked(ctx, lState, rmd)
})
}
func getNodeIDStr(n Node) string {
if n == nil {
return "NodeID(nil)"
}
return fmt.Sprintf("NodeID(%v)", n.GetID())
}
func (fbo *folderBranchOps) getRootNode(ctx context.Context) (
node Node, ei EntryInfo, handle *TlfHandle, err error) {
fbo.log.CDebugf(ctx, "getRootNode")
defer func() {
fbo.deferLog.CDebugf(ctx, "getRootNode done: %s %+v",
getNodeIDStr(node), err)
}()
lState := makeFBOLockState()
var md ImmutableRootMetadata
md, err = fbo.getMDForReadLocked(ctx, lState, mdReadNoIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
func() {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
}()
}
if err != nil {
return nil, EntryInfo{}, nil, err
}
// we may be an unkeyed client
if err := isReadableOrError(ctx, fbo.config.KBPKI(), md.ReadOnly()); err != nil {
return nil, EntryInfo{}, nil, err
}
handle = md.GetTlfHandle()
node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(handle.GetCanonicalName()), nil)
if err != nil {
return nil, EntryInfo{}, nil, err
}
return node, md.Data().Dir.EntryInfo, handle, nil
}
type makeNewBlock func() Block
// pathFromNodeHelper() shouldn't be called except by the helper
// functions below.
func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) {
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// Helper functions to clarify uses of pathFromNodeHelper() (see
// nodeCache comments).
func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) {
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
fbo.log.CDebugf(ctx, "GetDirChildren %s", getNodeIDStr(dir))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetDirChildren %s done: %+v",
getNodeIDStr(dir), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, err
}
err = runUnlessCanceled(ctx, func() error {
var err error
lState := makeFBOLockState()
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
if fbo.nodeCache.IsUnlinked(dir) {
fbo.log.CDebugf(ctx, "Returning an empty children set for "+
"unlinked directory %v", dirPath.tailPointer())
return nil
}
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
children, err = fbo.blocks.GetDirtyDirChildren(
ctx, lState, md.ReadOnly(), dirPath)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return children, nil
}
func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) (
node Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Lookup %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "Lookup %s %s done: %v %+v",
getNodeIDStr(dir), name, getNodeIDStr(node), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
if fbo.nodeCache.IsUnlinked(dir) {
fbo.log.CDebugf(ctx, "Refusing a lookup for unlinked directory %v",
fbo.nodeCache.PathFromNode(dir).tailPointer())
return NoSuchNameError{name}
}
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
node, de, err = fbo.blocks.Lookup(ctx, lState, md.ReadOnly(), dir, name)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, EntryInfo{}, err
}
return node, de.EntryInfo, nil
}
// statEntry is like Stat, but it returns a DirEntry. This is used by
// tests.
func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) (
de DirEntry, err error) {
err = fbo.checkNode(node)
if err != nil {
return DirEntry{}, err
}
lState := makeFBOLockState()
nodePath, err := fbo.pathFromNodeForRead(node)
if err != nil {
return DirEntry{}, err
}
var md ImmutableRootMetadata
if nodePath.hasValidParent() {
md, err = fbo.getMDForReadNeedIdentify(ctx, lState)
} else {
// If nodePath has no valid parent, it's just the TLF
// root, so we don't need an identify in this case.
md, err = fbo.getMDForReadNoIdentify(ctx, lState)
}
if err != nil {
return DirEntry{}, err
}
if nodePath.hasValidParent() {
de, err = fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), nodePath)
if err != nil {
return DirEntry{}, err
}
} else {
// nodePath is just the root.
de = md.data.Dir
de = fbo.blocks.UpdateDirtyEntry(ctx, lState, de)
}
return de, nil
}
var zeroPtr BlockPointer
type blockState struct {
blockPtr BlockPointer
block Block
readyBlockData ReadyBlockData
syncedCb func() error
oldPtr BlockPointer
}
func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Stat %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "Stat %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return EntryInfo{}, err
}
return de.EntryInfo, nil
}
func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) (
ei NodeMetadata, err error) {
fbo.log.CDebugf(ctx, "GetNodeMetadata %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetNodeMetadata %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
var res NodeMetadata
if err != nil {
return res, err
}
res.BlockInfo = de.BlockInfo
id := de.Writer
if id == keybase1.UserOrTeamID("") {
id = de.Creator
}
res.LastWriterUnverified, err =
fbo.config.KBPKI().GetNormalizedUsername(ctx, id)
if err != nil {
return res, err
}
return res, nil
}
// blockPutState is an internal structure to track data when putting blocks
type blockPutState struct {
blockStates []blockState
}
func newBlockPutState(length int) *blockPutState {
bps := &blockPutState{}
bps.blockStates = make([]blockState, 0, length)
return bps
}
// addNewBlock tracks a new block that will be put. If syncedCb is
// non-nil, it will be called whenever the put for that block is
// complete (whether or not the put resulted in an error). Currently
// it will not be called if the block is never put (due to an earlier
// error).
func (bps *blockPutState) addNewBlock(
blockPtr BlockPointer, block Block,
readyBlockData ReadyBlockData, syncedCb func() error) {
bps.blockStates = append(bps.blockStates,
blockState{blockPtr, block, readyBlockData, syncedCb, zeroPtr})
}
// saveOldPtr stores the given BlockPointer as the old (pre-readied)
// pointer for the most recent blockState.
func (bps *blockPutState) saveOldPtr(oldPtr BlockPointer) {
bps.blockStates[len(bps.blockStates)-1].oldPtr = oldPtr
}
func (bps *blockPutState) mergeOtherBps(other *blockPutState) {
bps.blockStates = append(bps.blockStates, other.blockStates...)
}
func (bps *blockPutState) removeOtherBps(other *blockPutState) {
if len(other.blockStates) == 0 {
return
}
otherPtrs := make(map[BlockPointer]bool, len(other.blockStates))
for _, bs := range other.blockStates {
otherPtrs[bs.blockPtr] = true
}
// Assume that `other` is a subset of `bps` when initializing the
// slice length.
newLen := len(bps.blockStates) - len(other.blockStates)
if newLen <= 0 {
newLen = 1
}
// Remove any blocks that appear in `other`.
newBlockStates := make([]blockState, 0, newLen)
for _, bs := range bps.blockStates {
if otherPtrs[bs.blockPtr] {
continue
}
newBlockStates = append(newBlockStates, bs)
}
bps.blockStates = newBlockStates
}
func (bps *blockPutState) DeepCopy() *blockPutState {
newBps := &blockPutState{}
newBps.blockStates = make([]blockState, len(bps.blockStates))
copy(newBps.blockStates, bps.blockStates)
return newBps
}
type localBcache map[BlockPointer]*DirBlock
// Returns whether the given error is one that shouldn't block the
// removal of a file or directory.
//
// TODO: Consider other errors recoverable, e.g. ones that arise from
// present but corrupted blocks?
func isRecoverableBlockErrorForRemoval(err error) bool {
return isRecoverableBlockError(err)
}
func isRetriableError(err error, retries int) bool {
_, isExclOnUnmergedError := err.(ExclOnUnmergedError)
_, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError)
recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError ||
isRecoverableBlockError(err)
return recoverable && retries < maxRetriesOnRecoverableErrors
}
func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error {
if bps == nil {
return nil
}
bcache := fbo.config.BlockCache()
for _, blockState := range bps.blockStates {
newPtr := blockState.blockPtr
// only cache this block if we made a brand new block, not if
// we just incref'd some other block.
if !newPtr.IsFirstRef() {
continue
}
if err := bcache.Put(newPtr, fbo.id(), blockState.block,
TransientEntry); err != nil {
return err
}
}
return nil
}
// Returns true if the passed error indicates a revision conflict.
func isRevisionConflict(err error) bool {
if err == nil {
return false
}
_, isConflictRevision := err.(kbfsmd.ServerErrorConflictRevision)
_, isConflictPrevRoot := err.(kbfsmd.ServerErrorConflictPrevRoot)
_, isConflictDiskUsage := err.(kbfsmd.ServerErrorConflictDiskUsage)
_, isConditionFailed := err.(kbfsmd.ServerErrorConditionFailed)
_, isConflictFolderMapping := err.(kbfsmd.ServerErrorConflictFolderMapping)
_, isJournal := err.(MDJournalConflictError)
return isConflictRevision || isConflictPrevRoot ||
isConflictDiskUsage || isConditionFailed ||
isConflictFolderMapping || isJournal
}
func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl,
notifyFn func(ImmutableRootMetadata) error) (
err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
mdops := fbo.config.MDOps()
doUnmergedPut := true
mergedRev := kbfsmd.RevisionUninitialized
oldPrevRoot := md.PrevRoot()
var irmd ImmutableRootMetadata
// This puts on a delay on any cancellations arriving to ctx. It is intended
// to work sort of like a critical section, except that there isn't an
// explicit call to exit the critical section. The cancellation, if any, is
// triggered after a timeout (i.e.
// fbo.config.DelayedCancellationGracePeriod()).
//
// The purpose of trying to avoid cancellation once we start MD write is to
// avoid having an unpredictable perceived MD state. That is, when
// runUnlessCanceled returns Canceled on cancellation, application receives
// an EINTR, and would assume the operation didn't succeed. But the MD write
// continues, and there's a chance the write will succeed, meaning the
// operation succeeds. This contradicts with the application's perception
// through error code and can lead to horrible situations. An easily caught
// situation is when application calls Create with O_EXCL set, gets an EINTR
// while MD write succeeds, retries and gets an EEXIST error. If users hit
// Ctrl-C, this might not be a big deal. However, it also happens for other
// interrupts. For applications that use signals to communicate, e.g.
// SIGALRM and SIGUSR1, this can happen pretty often, which renders broken.
if err = EnableDelayedCancellationWithGracePeriod(
ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil {
return err
}
// we don't explicitly clean up (by using a defer) CancellationDelayer here
// because sometimes fuse makes another call using the same ctx. For example, in
// fuse's Create call handler, a dir.Create is followed by an Attr call. If
// we do a deferred cleanup here, if an interrupt has been received, it can
// cause ctx to be canceled before Attr call finishes, which causes FUSE to
// return EINTR for the Create request. But at this point, the request may
// have already succeeded. Returning EINTR makes application thinks the file
// is not created successfully.
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
if fbo.isMasterBranchLocked(lState) {
// only do a normal Put if we're not already staged.
irmd, err = mdops.Put(ctx, md, session.VerifyingKey)
if doUnmergedPut = isRevisionConflict(err); doUnmergedPut {
fbo.log.CDebugf(ctx, "Conflict: %v", err)
mergedRev = md.Revision()
if excl == WithExcl {
// If this was caused by an exclusive create, we shouldn't do an
// UnmergedPut, but rather try to get newest update from server, and
// retry afterwards.
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
return ExclOnUnmergedError{}
}
} else if err != nil {
return err
}
} else if excl == WithExcl {
return ExclOnUnmergedError{}
}
doResolve := false
resolveMergedRev := mergedRev
if doUnmergedPut {
// We're out of date, and this is not an exclusive write, so put it as an
// unmerged MD.
irmd, err = mdops.PutUnmerged(ctx, md, session.VerifyingKey)
if isRevisionConflict(err) {
// Self-conflicts are retried in `doMDWriteWithRetry`.
return UnmergedSelfConflictError{err}
} else if err != nil {
// If a PutUnmerged fails, we are in a bad situation: if
// we fail, but the put succeeded, then dirty data will
// remain cached locally and will be re-tried
// (non-idempotently) on the next sync call. This should
// be a very rare situation when journaling is enabled, so
// instead let's pretend it succeeded so that the cached
// data is cleared and the nodeCache is updated. If we're
// wrong, and the update didn't make it to the server,
// then the next call will get an
// UnmergedSelfConflictError but fail to find any new
// updates and fail the operation, but things will get
// fixed up once conflict resolution finally completes.
//
// TODO: how confused will the kernel cache get if the
// pointers are updated but the file system operation
// still gets an error returned by the wrapper function
// that calls us (in the event of a user cancellation)?
fbo.log.CInfof(ctx, "Ignoring a PutUnmerged error: %+v", err)
err = encryptMDPrivateData(
ctx, fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.Crypto(), fbo.config.KeyManager(), session.UID, md)
if err != nil {
return err
}
mdID, err := kbfsmd.MakeID(fbo.config.Codec(), md.bareMd)
if err != nil {
return err
}
irmd = MakeImmutableRootMetadata(
md, session.VerifyingKey, mdID, fbo.config.Clock().Now())
err = fbo.config.MDCache().Put(irmd)
if err != nil {
return err
}
}
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
doResolve = true
} else {
fbo.setBranchIDLocked(lState, NullBranchID)
if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() {
// Queue this folder for rekey if the bit was set and it's not a copy.
// This is for the case where we're coming out of conflict resolution.
// So why don't we do this in finalizeResolution? Well, we do but we don't
// want to block on a rekey so we queue it. Because of that it may fail
// due to a conflict with some subsequent write. By also handling it here
// we'll always retry if we notice we haven't been successful in clearing
// the bit yet. Note that I haven't actually seen this happen but it seems
// theoretically possible.
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
doResolve = true
resolveMergedRev = kbfsmd.RevisionUninitialized
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
// Call Resolve() after the head is set, to make sure it fetches
// the correct unmerged MD range during resolution.
if doResolve {
fbo.cr.Resolve(ctx, md.Revision(), resolveMergedRev)
}
if notifyFn != nil {
err := notifyFn(irmd)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) waitForJournalLocked(ctx context.Context,
lState *lockState, jServer *JournalServer) error {
fbo.mdWriterLock.AssertLocked(lState)
if !TLFJournalEnabled(fbo.config, fbo.id()) {
// Nothing to do.
return nil
}
if err := jServer.Wait(ctx, fbo.id()); err != nil {
return err
}
// Make sure everything flushed successfully, since we're holding
// the writer lock, no other revisions could have snuck in.
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
return err
}
if jStatus.RevisionEnd != kbfsmd.RevisionUninitialized {
return errors.Errorf("Couldn't flush all MD revisions; current "+
"revision end for the journal is %d", jStatus.RevisionEnd)
}
if jStatus.LastFlushErr != "" {
return errors.Errorf("Couldn't flush the journal: %s",
jStatus.LastFlushErr)
}
return nil
}
func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata,
lastWriterVerifyingKey kbfscrypto.VerifyingKey) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
oldPrevRoot := md.PrevRoot()
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so wait for the journal to flush and then push
// straight to the server. TODO: we're holding the writer lock
// while flushing the journal here (just like for exclusive
// writes), which may end up blocking incoming writes for a long
// time. Rekeys are pretty rare, but if this becomes an issue
// maybe we should consider letting these hit the journal and
// scrubbing them when converting it to a branch.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
if err = fbo.waitForJournalLocked(ctx, lState, jServer); err != nil {
return err
}
mdOps = jServer.delegateMDOps
}
var key kbfscrypto.VerifyingKey
if md.IsWriterMetadataCopiedSet() {
key = lastWriterVerifyingKey
} else {
var err error
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
key = session.VerifyingKey
}
irmd, err := mdOps.Put(ctx, md, key)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
}
if isConflict {
// Drop this block. We've probably collided with someone also
// trying to rekey the same folder but that's not necessarily
// the case. We'll queue another rekey just in case. It should
// be safe as it's idempotent. We don't want any rekeys present
// in unmerged history or that will just make a mess.
fbo.config.RekeyQueue().Enqueue(md.TlfID())
return RekeyConflictError{err}
}
fbo.setBranchIDLocked(lState, NullBranchID)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
}
md.loadCachedBlockChanges(ctx, nil, fbo.log)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Explicitly set the latest merged revision, since if journaling
// is on, `setHeadLocked` will not do it for us (even though
// rekeys bypass the journal).
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
return nil
}
func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *GCOp) (
err error) {
lState := makeFBOLockState()
// Lock the folder so we can get an internally-consistent MD
// revision number.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
if md.MergedStatus() == Unmerged {
return UnexpectedUnmergedPutError{}
}
md.AddOp(gco)
// TODO: if the revision number of this new commit is sequential
// with `LatestRev`, we can probably change this to
// `gco.LatestRev+1`.
md.SetLastGCRevision(gco.LatestRev)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
oldPrevRoot := md.PrevRoot()
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
// finally, write out the new metadata
irmd, err := fbo.config.MDOps().Put(ctx, md, session.VerifyingKey)
if err != nil {
// Don't allow garbage collection to put us into a conflicting
// state; just wait for the next period.
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
md.loadCachedBlockChanges(ctx, bps, fbo.log)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
return fbo.notifyBatchLocked(ctx, lState, irmd)
}
func checkDisallowedPrefixes(name string) error {
for _, prefix := range disallowedPrefixes {
if strings.HasPrefix(name, prefix) {
return DisallowedPrefixError{name, prefix}
}
}
return nil
}
func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata,
dirPath path, newName string) error {
// Check that the directory isn't past capacity already.
var currSize uint64
if dirPath.hasValidParent() {
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath)
if err != nil {
return err
}
currSize = de.Size
} else {
// dirPath is just the root.
currSize = md.data.Dir.Size
}
// Just an approximation since it doesn't include the size of the
// directory entry itself, but that's ok -- at worst it'll be an
// off-by-one-entry error, and since there's a maximum name length
// we can't get in too much trouble.
if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() {
return DirTooBigError{dirPath, currSize + uint64(len(newName)),
fbo.config.MaxDirBytes()}
}
return nil
}
// PathType returns path type
func (fbo *folderBranchOps) PathType() PathType {
switch fbo.folderBranch.Tlf.Type() {
case tlf.Public:
return PublicPathType
case tlf.Private:
return PrivatePathType
case tlf.SingleTeam:
return SingleTeamPathType
default:
panic(fmt.Sprintf("Unknown TLF type: %s", fbo.folderBranch.Tlf.Type()))
}
}
// canonicalPath returns full canonical path for dir node and name.
func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) {
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return "", err
}
return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil
}
func (fbo *folderBranchOps) signalWrite() {
select {
case fbo.syncNeededChan <- struct{}{}:
default:
}
// A local write always means any ongoing CR should be canceled,
// because the set of unmerged writes has changed.
fbo.cr.ForceCancel()
}
func (fbo *folderBranchOps) syncDirUpdateOrSignal(
ctx context.Context, lState *lockState) error {
if fbo.config.BGFlushDirOpBatchSize() == 1 {
return fbo.syncAllLocked(ctx, lState, NoExcl)
}
fbo.signalWrite()
return nil
}
func (fbo *folderBranchOps) checkForUnlinkedDir(dir Node) error {
// Disallow directory operations within an unlinked directory.
// Shells don't seem to allow it, and it will just pollute the dir
// entry cache with unsyncable entries.
if fbo.nodeCache.IsUnlinked(dir) {
dirPath := fbo.nodeCache.PathFromNode(dir).String()
return errors.WithStack(UnsupportedOpInUnlinkedDirError{dirPath})
}
return nil
}
// entryType must not by Sym.
func (fbo *folderBranchOps) createEntryLocked(
ctx context.Context, lState *lockState, dir Node, name string,
entryType EntryType, excl Excl) (childNode Node, de DirEntry, err error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(name); err != nil {
return nil, DirEntry{}, err
}
if uint32(len(name)) > fbo.config.MaxNameBytes() {
return nil, DirEntry{},
NameTooLongError{name, fbo.config.MaxNameBytes()}
}
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return nil, DirEntry{}, err
}
filename, err := fbo.canonicalPath(ctx, dir, name)
if err != nil {
return nil, DirEntry{}, err
}
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return nil, DirEntry{}, err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
dblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
if err != nil {
return nil, DirEntry{}, err
}
// does name already exist?
if _, ok := dblock.Children[name]; ok {
return nil, DirEntry{}, NameExistsError{name}
}
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), dirPath, name); err != nil {
return nil, DirEntry{}, err
}
parentPtr := dirPath.tailPointer()
co, err := newCreateOp(name, parentPtr, entryType)
if err != nil {
return nil, DirEntry{}, err
}
co.setFinalPath(dirPath)
// create new data block
var newBlock Block
if entryType == Dir {
newBlock = &DirBlock{
Children: make(map[string]DirEntry),
}
} else {
newBlock = &FileBlock{}
}
// Cache update and operations until batch happens. Make a new
// temporary ID and directory entry.
newID, err := fbo.config.cryptoPure().MakeTemporaryBlockID()
if err != nil {
return nil, DirEntry{}, err
}
chargedTo, err := chargedToForTLF(
ctx, fbo.config.KBPKI(), md.GetTlfHandle())
if err != nil {
return nil, DirEntry{}, err
}
newPtr := BlockPointer{
ID: newID,
KeyGen: md.LatestKeyGeneration(),
DataVer: fbo.config.DataVersion(),
DirectType: DirectBlock,
Context: kbfsblock.MakeFirstContext(
chargedTo, keybase1.BlockType_DATA),
}
co.AddRefBlock(newPtr)
co.AddSelfUpdate(parentPtr)
node, err := fbo.nodeCache.GetOrCreate(newPtr, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
err = fbo.config.DirtyBlockCache().Put(
fbo.id(), newPtr, fbo.branch(), newBlock)
if err != nil {
return nil, DirEntry{}, err
}
now := fbo.nowUnixNano()
de = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: newPtr,
EncodedSize: 0,
},
EntryInfo: EntryInfo{
Type: entryType,
Size: 0,
Mtime: now,
Ctime: now,
},
}
dirCacheUndoFn := fbo.blocks.AddDirEntryInCache(lState, dirPath, name, de)
fbo.dirOps = append(fbo.dirOps, cachedDirOp{co, []Node{dir, node}})
added := fbo.status.addDirtyNode(dir)
cleanupFn := func() {
if added {
fbo.status.rmDirtyNode(dir)
}
fbo.dirOps = fbo.dirOps[:len(fbo.dirOps)-1]
if dirCacheUndoFn != nil {
dirCacheUndoFn(lState)
}
// Delete should never fail.
_ = fbo.config.DirtyBlockCache().Delete(fbo.id(), newPtr, fbo.branch())
}
defer func() {
if err != nil && cleanupFn != nil {
cleanupFn()
}
}()
if entryType != Dir {
// Dirty the file with a zero-byte write, to ensure the new
// block is synced in SyncAll. TODO: remove this if we ever
// embed 0-byte files in the directory entry itself.
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), node, []byte{}, 0)
if err != nil {
return nil, DirEntry{}, err
}
oldCleanupFn := cleanupFn
cleanupFn = func() {
fbo.blocks.ClearCacheInfo(lState, fbo.nodeCache.PathFromNode(node))
oldCleanupFn()
}
}
// It's safe to notify before we've synced, since it is only
// sending invalidation notifications. At worst the upper layer
// will just have to refresh its cache needlessly.
err = fbo.notifyOneOp(ctx, lState, co, md.ReadOnly(), false)
if err != nil {
return nil, DirEntry{}, err
}
if excl == WithExcl {
// Sync this change to the server.
err := fbo.syncAllLocked(ctx, lState, WithExcl)
_, isNoUpdatesWhileDirty := errors.Cause(err).(NoUpdatesWhileDirtyError)
if isNoUpdatesWhileDirty {
// If an exclusive write hits a conflict, it will try to
// update, but won't be able to because of the dirty
// directory entries. We need to clean up the dirty
// entries here first before trying to apply the updates
// again. By returning `ExclOnUnmergedError` below, we
// force the caller to retry the whole operation again.
fbo.log.CDebugf(ctx, "Clearing dirty entry before applying new "+
"updates for exclusive write")
cleanupFn()
cleanupFn = nil
// Sync anything else that might be buffered (non-exclusively).
err = fbo.syncAllLocked(ctx, lState, NoExcl)
if err != nil {
return nil, DirEntry{}, err
}
// Now we should be in a clean state, so this should work.
err = fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdatesLocked)
if err != nil {
return nil, DirEntry{}, err
}
return nil, DirEntry{}, ExclOnUnmergedError{}
} else if err != nil {
return nil, DirEntry{}, err
}
} else {
err = fbo.syncDirUpdateOrSignal(ctx, lState)
if err != nil {
return nil, DirEntry{}, err
}
}
return node, de, nil
}
func (fbo *folderBranchOps) maybeWaitForSquash(
ctx context.Context, bid BranchID) {
if bid != PendingLocalSquashBranchID {
return
}
fbo.log.CDebugf(ctx, "Blocking until squash finishes")
// Limit the time we wait to just under the ctx deadline if there
// is one, or 10s if there isn't.
deadline, ok := ctx.Deadline()
if ok {
deadline = deadline.Add(-1 * time.Second)
} else {
// Can't use config.Clock() since context doesn't respect it.
deadline = time.Now().Add(10 * time.Second)
}
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
// Wait for CR to finish. Note that if the user is issuing
// concurrent writes, the current CR could be canceled, and when
// the call belows returns, the branch still won't be squashed.
// That's ok, this is just an optimization.
err := fbo.cr.Wait(ctx)
if err != nil {
fbo.log.CDebugf(ctx, "Error while waiting for CR: %+v", err)
}
}
func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context,
lState *lockState, fn func(lState *lockState) error) error {
doUnlock := false
defer func() {
if doUnlock {
bid := fbo.bid
fbo.mdWriterLock.Unlock(lState)
// Don't let a pending squash get too big.
fbo.maybeWaitForSquash(ctx, bid)
}
}()
for i := 0; ; i++ {
fbo.mdWriterLock.Lock(lState)
doUnlock = true
// Make sure we haven't been canceled before doing anything
// too serious.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := fn(lState)
if isRetriableError(err, i) {
fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err)
// Release the lock to give someone else a chance
doUnlock = false
fbo.mdWriterLock.Unlock(lState)
if _, ok := err.(ExclOnUnmergedError); ok {
if err = fbo.cr.Wait(ctx); err != nil {
return err
}
} else if _, ok := err.(UnmergedSelfConflictError); ok {
// We can only get here if we are already on an
// unmerged branch and an errored PutUnmerged did make
// it to the mdserver. Let's force sync, with a fresh
// context so the observer doesn't ignore the updates
// (but tie the cancels together).
newCtx := fbo.ctxWithFBOID(context.Background())
newCtx, cancel := context.WithCancel(newCtx)
defer cancel()
go func() {
select {
case <-ctx.Done():
cancel()
case <-newCtx.Done():
}
}()
fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+
"(%v); forcing a sync", err)
err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState)
if err != nil {
// TODO: we might be stuck at this point if we're
// ahead of the unmerged branch on the server, in
// which case we might want to just abandon any
// cached updates and force a sync to the head.
return err
}
cancel()
}
continue
} else if err != nil {
return err
}
return nil
}
}
func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled(
ctx context.Context, fn func(lState *lockState) error) error {
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
return fbo.doMDWriteWithRetry(ctx, lState, fn)
})
}
func (fbo *folderBranchOps) CreateDir(
ctx context.Context, dir Node, path string) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateDir %s %s", getNodeIDStr(dir), path)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateDir %s %s done: %v %+v",
getNodeIDStr(dir), path, getNodeIDStr(n), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl)
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) CreateFile(
ctx context.Context, dir Node, path string, isExec bool, excl Excl) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateFile %s %s isExec=%v Excl=%s",
getNodeIDStr(dir), path, isExec, excl)
defer func() {
fbo.deferLog.CDebugf(ctx,
"CreateFile %s %s isExec=%v Excl=%s done: %v %+v",
getNodeIDStr(dir), path, isExec, excl,
getNodeIDStr(n), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var entryType EntryType
if isExec {
entryType = Exec
} else {
entryType = File
}
// If journaling is turned on, an exclusive create may end up on a
// conflict branch.
if excl == WithExcl && TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.log.CDebugf(ctx, "Exclusive create status is being discarded.")
excl = NoExcl
}
if excl == WithExcl {
if err = fbo.cr.Wait(ctx); err != nil {
return nil, EntryInfo{}, err
}
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl)
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
// notifyAndSyncOrSignal caches an op in memory and dirties the
// relevant node, and then sends a notification for it. If batching
// is on, it signals the write; otherwise it syncs the change. It
// should only be called as the final instruction that can fail in a
// method.
func (fbo *folderBranchOps) notifyAndSyncOrSignal(
ctx context.Context, lState *lockState, undoFn dirCacheUndoFn,
nodesToDirty []Node, op op, md ReadOnlyRootMetadata) (err error) {
fbo.dirOps = append(fbo.dirOps, cachedDirOp{op, nodesToDirty})
var addedNodes []Node
for _, n := range nodesToDirty {
added := fbo.status.addDirtyNode(n)
if added {
addedNodes = append(addedNodes, n)
}
}
defer func() {
if err != nil {
for _, n := range addedNodes {
fbo.status.rmDirtyNode(n)
}
fbo.dirOps = fbo.dirOps[:len(fbo.dirOps)-1]
if undoFn != nil {
undoFn(lState)
}
}
}()
// It's safe to notify before we've synced, since it is only
// sending invalidation notifications. At worst the upper layer
// will just have to refresh its cache needlessly.
err = fbo.notifyOneOp(ctx, lState, op, md, false)
if err != nil {
return err
}
return fbo.syncDirUpdateOrSignal(ctx, lState)
}
func (fbo *folderBranchOps) createLinkLocked(
ctx context.Context, lState *lockState, dir Node, fromName string,
toPath string) (DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(fromName); err != nil {
return DirEntry{}, err
}
if uint32(len(fromName)) > fbo.config.MaxNameBytes() {
return DirEntry{},
NameTooLongError{fromName, fbo.config.MaxNameBytes()}
}
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return DirEntry{}, err
}
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return DirEntry{}, err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
dblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
if err != nil {
return DirEntry{}, err
}
// TODO: validate inputs
// does name already exist?
if _, ok := dblock.Children[fromName]; ok {
return DirEntry{}, NameExistsError{fromName}
}
if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(),
dirPath, fromName); err != nil {
return DirEntry{}, err
}
parentPtr := dirPath.tailPointer()
co, err := newCreateOp(fromName, parentPtr, Sym)
if err != nil {
return DirEntry{}, err
}
co.setFinalPath(dirPath)
co.AddSelfUpdate(parentPtr)
// Nothing below here can fail, so no need to clean up the dir
// entry cache on a failure. If this ever panics, we need to add
// cleanup code.
// Create a direntry for the link, and then sync
now := fbo.nowUnixNano()
de := DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
Size: uint64(len(toPath)),
SymPath: toPath,
Mtime: now,
Ctime: now,
},
}
dirCacheUndoFn := fbo.blocks.AddDirEntryInCache(
lState, dirPath, fromName, de)
err = fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{dir}, co, md.ReadOnly())
if err != nil {
return DirEntry{}, err
}
return de, nil
}
func (fbo *folderBranchOps) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateLink %s %s -> %s",
getNodeIDStr(dir), fromName, toPath)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateLink %s %s -> %s done: %+v",
getNodeIDStr(dir), fromName, toPath, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return EntryInfo{}, err
}
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set ei directly, as that can cause a race when
// the Create is canceled.
de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath)
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return EntryInfo{}, err
}
return retEntryInfo, nil
}
// unrefEntry modifies md to unreference all relevant blocks for the
// given entry.
func (fbo *folderBranchOps) unrefEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ro op, dir path, de DirEntry,
name string) error {
fbo.mdWriterLock.AssertLocked(lState)
if de.Type == Sym {
return nil
}
unrefsToAdd := make(map[BlockPointer]bool)
fbo.prepper.cacheBlockInfos([]BlockInfo{de.BlockInfo})
unrefsToAdd[de.BlockPointer] = true
// construct a path for the child so we can unlink with it.
childPath := dir.ChildPath(name, de.BlockPointer)
// If this is an indirect block, we need to delete all of its
// children as well. NOTE: non-empty directories can't be
// removed, so no need to check for indirect directory blocks
// here.
if de.Type == File || de.Type == Exec {
blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos(
ctx, lState, kmd, childPath)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
}
fbo.prepper.cacheBlockInfos(blockInfos)
for _, blockInfo := range blockInfos {
unrefsToAdd[blockInfo.BlockPointer] = true
}
}
// Any referenced blocks that were unreferenced since the last
// sync can just be forgotten about. Note that any updated
// pointers that are unreferenced will be fixed up during syncing.
for _, dirOp := range fbo.dirOps {
for i := len(dirOp.dirOp.Refs()) - 1; i >= 0; i-- {
ref := dirOp.dirOp.Refs()[i]
if _, ok := unrefsToAdd[ref]; ok {
dirOp.dirOp.DelRefBlock(ref)
delete(unrefsToAdd, ref)
}
}
}
for unref := range unrefsToAdd {
ro.AddUnrefBlock(unref)
}
return nil
}
func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata, dir Node, dirPath path,
name string) error {
fbo.mdWriterLock.AssertLocked(lState)
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
pblock, err := fbo.blocks.GetDirtyDir(ctx, lState, md, dirPath, blockRead)
if err != nil {
return err
}
// make sure the entry exists
de, ok := pblock.Children[name]
if !ok {
return NoSuchNameError{name}
}
parentPtr := dirPath.tailPointer()
ro, err := newRmOp(name, parentPtr)
if err != nil {
return err
}
ro.setFinalPath(dirPath)
ro.AddSelfUpdate(parentPtr)
err = fbo.unrefEntryLocked(ctx, lState, md, ro, dirPath, de, name)
if err != nil {
return err
}
dirCacheUndoFn := fbo.blocks.RemoveDirEntryInCache(
lState, dirPath, name, de)
if de.Type == Dir {
removedNode := fbo.nodeCache.Get(de.BlockPointer.Ref())
if removedNode != nil {
// If it was a dirty directory, the removed node no longer
// counts as dirty (it will never be sync'd). Note that
// removed files will still be synced since any data
// written to them via a handle stays in memory until the
// sync actually happens.
removed := fbo.status.rmDirtyNode(removedNode)
if removed {
oldUndoFn := dirCacheUndoFn
dirCacheUndoFn = func(lState *lockState) {
oldUndoFn(lState)
fbo.status.addDirtyNode(removedNode)
}
}
}
}
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{dir}, ro, md.ReadOnly())
}
func (fbo *folderBranchOps) removeDirLocked(ctx context.Context,
lState *lockState, dir Node, dirName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
pblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
de, ok := pblock.Children[dirName]
if !ok {
return NoSuchNameError{dirName}
}
// construct a path for the child so we can check for an empty dir
childPath := dirPath.ChildPath(dirName, de.BlockPointer)
childBlock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), childPath, blockRead)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
} else if len(childBlock.Children) > 0 {
return DirNotEmptyError{dirName}
}
return fbo.removeEntryLocked(
ctx, lState, md.ReadOnly(), dir, dirPath, dirName)
}
func (fbo *folderBranchOps) RemoveDir(
ctx context.Context, dir Node, dirName string) (err error) {
fbo.log.CDebugf(ctx, "RemoveDir %s %s", getNodeIDStr(dir), dirName)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveDir %s %s done: %+v",
getNodeIDStr(dir), dirName, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.removeDirLocked(ctx, lState, dir, dirName)
})
}
func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node,
name string) (err error) {
fbo.log.CDebugf(ctx, "RemoveEntry %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveEntry %s %s done: %+v",
getNodeIDStr(dir), name, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Verify we have permission to write (but no need to make
// a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
return fbo.removeEntryLocked(
ctx, lState, md.ReadOnly(), dir, dirPath, name)
})
}
func (fbo *folderBranchOps) renameLocked(
ctx context.Context, lState *lockState, oldParent Node, oldName string,
newParent Node, newName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := fbo.checkForUnlinkedDir(oldParent); err != nil {
return err
}
if err := fbo.checkForUnlinkedDir(newParent); err != nil {
return err
}
oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent)
if err != nil {
return err
}
newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent)
if err != nil {
return err
}
// Verify we have permission to write (but no need to make a
// successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
_, newPBlock, newDe, ro, err := fbo.blocks.PrepRename(
ctx, lState, md.ReadOnly(), oldParentPath, oldName, newParentPath,
newName)
if err != nil {
return err
}
// does name exist?
replacedDe, ok := newPBlock.Children[newName]
if ok {
// Usually higher-level programs check these, but just in case.
if replacedDe.Type == Dir && newDe.Type != Dir {
return NotDirError{newParentPath.ChildPathNoPtr(newName)}
} else if replacedDe.Type != Dir && newDe.Type == Dir {
return NotFileError{newParentPath.ChildPathNoPtr(newName)}
}
if replacedDe.Type == Dir {
// The directory must be empty.
oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState,
md.ReadOnly(), replacedDe.BlockPointer, newParentPath.Branch,
newParentPath.ChildPathNoPtr(newName))
if err != nil {
return err
}
if len(oldTargetDir.Children) != 0 {
fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+
" (%s/%s) not allowed.", newParentPath, newName)
return DirNotEmptyError{newName}
}
}
// Delete the old block pointed to by this direntry.
err := fbo.unrefEntryLocked(
ctx, lState, md.ReadOnly(), ro, newParentPath, replacedDe, newName)
if err != nil {
return err
}
}
// Only the ctime changes on the directory entry itself.
newDe.Ctime = fbo.nowUnixNano()
dirCacheUndoFn, err := fbo.blocks.RenameDirEntryInCache(
lState, oldParentPath, oldName, newParentPath, newName, newDe,
replacedDe)
if err != nil {
return err
}
nodesToDirty := []Node{oldParent}
if oldParent.GetID() != newParent.GetID() {
nodesToDirty = append(nodesToDirty, newParent)
}
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, nodesToDirty, ro, md.ReadOnly())
}
func (fbo *folderBranchOps) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) (err error) {
fbo.log.CDebugf(ctx, "Rename %s/%s -> %s/%s", getNodeIDStr(oldParent),
oldName, getNodeIDStr(newParent), newName)
defer func() {
fbo.deferLog.CDebugf(ctx, "Rename %s/%s -> %s/%s done: %+v",
getNodeIDStr(oldParent), oldName,
getNodeIDStr(newParent), newName, err)
}()
err = fbo.checkNode(newParent)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// only works for paths within the same topdir
if oldParent.GetFolderBranch() != newParent.GetFolderBranch() {
return RenameAcrossDirsError{}
}
return fbo.renameLocked(ctx, lState, oldParent, oldName,
newParent, newName)
})
}
func (fbo *folderBranchOps) Read(
ctx context.Context, file Node, dest []byte, off int64) (
n int64, err error) {
fbo.log.CDebugf(ctx, "Read %s %d %d", getNodeIDStr(file),
len(dest), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Read %s %d %d done: %+v",
getNodeIDStr(file), len(dest), off, err)
}()
err = fbo.checkNode(file)
if err != nil {
return 0, err
}
{
filePath, err := fbo.pathFromNodeForRead(file)
if err != nil {
return 0, err
}
// It seems git isn't handling EINTR from some of its read calls (likely
// fread), which causes it to get corrupted data (which leads to coredumps
// later) when a read system call on pack files gets interrupted. This
// enables delayed cancellation for Read if the file path contains `.git`.
//
// TODO: get a patch in git, wait for sufficiently long time for people to
// upgrade, and remove this.
// allow turning this feature off by env var to make life easier when we
// try to fix git.
if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet {
for _, n := range filePath.path {
if n.Name == ".git" {
EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod())
break
}
}
}
}
// Don't let the goroutine below write directly to the return
// variable, since if the context is canceled the goroutine might
// outlast this function call, and end up in a read/write race
// with the caller.
var bytesRead int64
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// verify we have permission to read
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
// Read using the `file` Node, not `filePath`, since the path
// could change until we take `blockLock` for reading.
bytesRead, err = fbo.blocks.Read(
ctx, lState, md.ReadOnly(), file, dest, off)
return err
})
if err != nil {
return 0, err
}
return bytesRead, nil
}
func (fbo *folderBranchOps) Write(
ctx context.Context, file Node, data []byte, off int64) (err error) {
fbo.log.CDebugf(ctx, "Write %s %d %d", getNodeIDStr(file),
len(data), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Write %s %d %d done: %+v",
getNodeIDStr(file), len(data), off, err)
}()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), file, data, off)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
fbo.signalWrite()
return nil
})
}
func (fbo *folderBranchOps) Truncate(
ctx context.Context, file Node, size uint64) (err error) {
fbo.log.CDebugf(ctx, "Truncate %s %d", getNodeIDStr(file), size)
defer func() {
fbo.deferLog.CDebugf(ctx, "Truncate %s %d done: %+v",
getNodeIDStr(file), size, err)
}()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Truncate(
ctx, lState, md.ReadOnly(), file, size)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
fbo.signalWrite()
return nil
})
}
func (fbo *folderBranchOps) setExLocked(
ctx context.Context, lState *lockState, file Node, ex bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
// Verify we have permission to write (no need to make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return
}
de, err := fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), filePath)
if err != nil {
return err
}
// If the file is a symlink, do nothing (to match ext4
// behavior).
if de.Type == Sym || de.Type == Dir {
fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type)
return nil
}
if ex && (de.Type == File) {
de.Type = Exec
} else if !ex && (de.Type == Exec) {
de.Type = File
} else {
// Treating this as a no-op, without updating the ctime, is a
// POSIX violation, but it's an important optimization to keep
// permissions-preserving rsyncs fast.
fbo.log.CDebugf(ctx, "Ignoring no-op setex")
return nil
}
de.Ctime = fbo.nowUnixNano()
parentPtr := filePath.parentPath().tailPointer()
sao, err := newSetAttrOp(filePath.tailName(), parentPtr,
exAttr, filePath.tailPointer())
if err != nil {
return err
}
sao.AddSelfUpdate(parentPtr)
// If the node has been unlinked, we can safely ignore this setex.
if fbo.nodeCache.IsUnlinked(file) {
fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v",
filePath.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
sao.setFinalPath(filePath)
dirCacheUndoFn := fbo.blocks.SetAttrInDirEntryInCache(
lState, filePath, de, sao.Attr)
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{file}, sao, md.ReadOnly())
}
func (fbo *folderBranchOps) SetEx(
ctx context.Context, file Node, ex bool) (err error) {
fbo.log.CDebugf(ctx, "SetEx %s %t", getNodeIDStr(file), ex)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetEx %s %t done: %+v",
getNodeIDStr(file), ex, err)
}()
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.setExLocked(ctx, lState, file, ex)
})
}
func (fbo *folderBranchOps) setMtimeLocked(
ctx context.Context, lState *lockState, file Node,
mtime *time.Time) error {
fbo.mdWriterLock.AssertLocked(lState)
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
// Verify we have permission to write (no need to make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
de, err := fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), filePath)
if err != nil {
return err
}
de.Mtime = mtime.UnixNano()
// setting the mtime counts as changing the file MD, so must set ctime too
de.Ctime = fbo.nowUnixNano()
parentPtr := filePath.parentPath().tailPointer()
sao, err := newSetAttrOp(filePath.tailName(), parentPtr,
mtimeAttr, filePath.tailPointer())
if err != nil {
return err
}
sao.AddSelfUpdate(parentPtr)
// If the node has been unlinked, we can safely ignore this
// setmtime.
if fbo.nodeCache.IsUnlinked(file) {
fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v",
filePath.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
sao.setFinalPath(filePath)
dirCacheUndoFn := fbo.blocks.SetAttrInDirEntryInCache(
lState, filePath, de, sao.Attr)
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{file}, sao, md.ReadOnly())
}
func (fbo *folderBranchOps) SetMtime(
ctx context.Context, file Node, mtime *time.Time) (err error) {
fbo.log.CDebugf(ctx, "SetMtime %s %v", getNodeIDStr(file), mtime)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetMtime %s %v done: %+v",
getNodeIDStr(file), mtime, err)
}()
if mtime == nil {
// Can happen on some OSes (e.g. OSX) when trying to set the atime only
return nil
}
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.setMtimeLocked(ctx, lState, file, mtime)
})
}
type cleanupFn func(context.Context, *lockState, []BlockPointer, error)
// startSyncLocked readies the blocks and other state needed to sync a
// single file. It returns:
//
// * `doSync`: Whether or not the sync should actually happen.
// * `stillDirty`: Whether the file should still be considered dirty when
// this function returns. (That is, if `doSync` is false, and `stillDirty`
// is true, then the file has outstanding changes but the sync was vetoed for
// some other reason.)
// * `fblock`: the root file block for the file being sync'd.
// * `lbc`: A local block cache consisting of a dirtied version of the parent
// directory for this file.
// * `bps`: All the blocks that need to be put to the server.
// * `syncState`: Must be passed to the `FinishSyncLocked` call after the
// update completes.
// * `cleanupFn`: A function that, if non-nil, must be called after the sync
// is done. `cleanupFn` should be passed the set of bad blocks that couldn't
// be sync'd (if any), and the error.
// * `err`: The best, greatest return value, everyone says it's absolutely
// stunning.
func (fbo *folderBranchOps) startSyncLocked(ctx context.Context,
lState *lockState, md *RootMetadata, node Node, file path) (
doSync, stillDirty bool, fblock *FileBlock, lbc localBcache,
bps *blockPutState, syncState fileSyncState,
cleanup cleanupFn, err error) {
fbo.mdWriterLock.AssertLocked(lState)
// if the cache for this file isn't dirty, we're done
if !fbo.blocks.IsDirty(lState, file) {
return false, false, nil, nil, nil, fileSyncState{}, nil, nil
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this sync.
if fbo.nodeCache.IsUnlinked(node) {
fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v",
file.tailPointer())
// Removing the cached info here is a little sketchy,
// since there's no guarantee that this sync comes
// from closing the file, and we still want to serve
// stat calls accurately if the user still has an open
// handle to this file.
//
// Note in particular that if a file just had a dirty
// directory entry cached (due to an attribute change on a
// removed file, for example), this will clear that attribute
// change. If there's still an open file handle, the user
// won't be able to see the change anymore.
//
// TODO: Hook this in with the node cache GC logic to be
// perfectly accurate (but at the same time, we'd then have to
// fix up the intentional panic in the background flusher to
// be more tolerant of long-lived dirty, removed files).
err := fbo.blocks.ClearCacheInfo(lState, file)
if err != nil {
return false, false, nil, nil, nil, fileSyncState{}, nil, err
}
fbo.status.rmDirtyNode(node)
return false, true, nil, nil, nil, fileSyncState{}, nil, nil
}
if file.isValidForNotification() {
// notify the daemon that a write is being performed
fbo.config.Reporter().Notify(ctx, writeNotification(file, false))
defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true))
}
fblock, bps, lbc, syncState, err =
fbo.blocks.StartSync(ctx, lState, md, file)
cleanup = func(ctx context.Context, lState *lockState,
blocksToRemove []BlockPointer, err error) {
fbo.blocks.CleanupSyncState(
ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err)
}
if err != nil {
return false, true, nil, nil, nil, fileSyncState{}, cleanup, err
}
return true, true, fblock, lbc, bps, syncState, cleanup, nil
}
func addSelfUpdatesAndParent(
p path, op op, parentsToAddChainsFor map[BlockPointer]bool) {
for i, pn := range p.path {
if i == len(p.path)-1 {
op.AddSelfUpdate(pn.BlockPointer)
} else {
parentsToAddChainsFor[pn.BlockPointer] = true
}
}
}
func (fbo *folderBranchOps) syncAllLocked(
ctx context.Context, lState *lockState, excl Excl) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
dirtyDirs := fbo.blocks.GetDirtyDirBlockRefs(lState)
if len(dirtyFiles) == 0 && len(dirtyDirs) == 0 {
return nil
}
ctx = fbo.config.MaybeStartTrace(ctx, "FBO.SyncAll",
fmt.Sprintf("%d files, %d dirs", len(dirtyFiles), len(dirtyDirs)))
defer func() { fbo.config.MaybeFinishTrace(ctx, err) }()
// Verify we have permission to write. We do this after the dirty
// check because otherwise readers who call syncAll would get an
// error.
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
bps := newBlockPutState(0)
resolvedPaths := make(map[BlockPointer]path)
lbc := make(localBcache)
var cleanups []func(context.Context, *lockState, error)
defer func() {
for _, cf := range cleanups {
cf(ctx, lState, err)
}
}()
fbo.log.LazyTrace(ctx, "Syncing %d dir(s)", len(dirtyDirs))
// First prep all the directories.
fbo.log.CDebugf(ctx, "Syncing %d dir(s)", len(dirtyDirs))
for _, ref := range dirtyDirs {
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
dir := fbo.nodeCache.PathFromNode(node)
dblock, err := fbo.blocks.GetDirtyDir(ctx, lState, md, dir, blockWrite)
if err != nil {
return err
}
lbc[dir.tailPointer()] = dblock
if !fbo.nodeCache.IsUnlinked(node) {
resolvedPaths[dir.tailPointer()] = dir
}
// On a successful sync, clean up the cached entries and the
// dirty blocks.
cleanups = append(cleanups,
func(ctx context.Context, lState *lockState, err error) {
if err != nil {
return
}
fbo.blocks.ClearCachedDirEntry(lState, dir)
fbo.status.rmDirtyNode(node)
})
}
defer func() {
// If the sync is successful, we can clear out all buffered
// directory operations.
if err == nil {
fbo.dirOps = nil
}
}()
fbo.log.LazyTrace(ctx, "Processing %d op(s)", len(fbo.dirOps))
newBlocks := make(map[BlockPointer]bool)
fileBlocks := make(fileBlockMap)
parentsToAddChainsFor := make(map[BlockPointer]bool)
for _, dop := range fbo.dirOps {
// Copy the op before modifying it, in case there's an error
// and we have to retry with the original ops.
newOp := dop.dirOp.deepCopy()
md.AddOp(newOp)
// Add "updates" for all the op updates, and make chains for
// the rest of the parent directories, so they're treated like
// updates during the prepping.
for _, n := range dop.nodes {
p := fbo.nodeCache.PathFromNode(n)
if _, ok := newOp.(*setAttrOp); ok {
// For a setattr, the node is the file, but that
// doesn't get updated, so use the current parent
// node.
p = *p.parentPath()
}
addSelfUpdatesAndParent(p, newOp, parentsToAddChainsFor)
}
var ref BlockRef
switch realOp := newOp.(type) {
case *createOp:
if realOp.Type == Sym {
continue
}
// New files and directories explicitly need
// pointer-updating, because the sync process will turn
// them into simple refs and will forget about the local,
// temporary ID.
newNode := dop.nodes[1]
newPath := fbo.nodeCache.PathFromNode(newNode)
newPointer := newPath.tailPointer()
newBlocks[newPointer] = true
if realOp.Type != Dir {
continue
}
dblock, ok := lbc[newPointer]
if !ok {
// New directories that aren't otherwise dirty need to
// be added to both the `lbc` and `resolvedPaths` so
// they are properly synced.
dblock, err = fbo.blocks.GetDirtyDir(
ctx, lState, md, newPath, blockWrite)
if err != nil {
return err
}
lbc[newPointer] = dblock
if !fbo.nodeCache.IsUnlinked(newNode) {
resolvedPaths[newPointer] = newPath
}
}
if len(dblock.Children) > 0 {
continue
}
// If the directory is empty, we need to explicitly clean
// up its entry after syncing.
ref = newPath.tailRef()
case *renameOp:
ref = realOp.Renamed.Ref()
case *setAttrOp:
ref = realOp.File.Ref()
default:
continue
}
// For create, rename and setattr ops, the target will have a
// dirty entry, but may not have any outstanding operations on
// it, so it needs to be cleaned up manually.
defer func() {
if err != nil {
return
}
wasCleared := fbo.blocks.ClearCachedRef(lState, ref)
if wasCleared {
node := fbo.nodeCache.Get(ref)
if node != nil {
fbo.status.rmDirtyNode(node)
}
}
}()
}
var blocksToRemove []BlockPointer
// TODO: find a way to avoid so many dynamic closure dispatches.
var afterUpdateFns []func() error
afterUpdateFns = append(afterUpdateFns, func() error {
// Any new files or directories need their pointers explicitly
// updated, because the sync will be treating them as a new
// ref, and not an update.
for _, bs := range bps.blockStates {
if newBlocks[bs.oldPtr] {
fbo.blocks.updatePointer(
md.ReadOnly(), bs.oldPtr, bs.blockPtr, false)
}
}
return nil
})
fbo.log.LazyTrace(ctx, "Syncing %d file(s)", len(dirtyFiles))
fbo.log.CDebugf(ctx, "Syncing %d file(s)", len(dirtyFiles))
fileSyncBlocks := newBlockPutState(1)
for _, ref := range dirtyFiles {
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
file := fbo.nodeCache.PathFromNode(node)
fbo.log.CDebugf(ctx, "Syncing file %v (%s)", ref, file)
// Start the sync for this dirty file.
doSync, stillDirty, fblock, newLbc, newBps, syncState, cleanup, err :=
fbo.startSyncLocked(ctx, lState, md, node, file)
if cleanup != nil {
// Note: This passes the same `blocksToRemove` into each
// cleanup function. That's ok, as only the ones
// pertaining to a particular syncing file will be acted
// on.
cleanups = append(cleanups,
func(ctx context.Context, lState *lockState, err error) {
cleanup(ctx, lState, blocksToRemove, err)
})
}
if err != nil {
return err
}
if !doSync {
if !stillDirty {
fbo.status.rmDirtyNode(node)
}
continue
}
// Merge the per-file sync info into the batch sync info.
bps.mergeOtherBps(newBps)
fileSyncBlocks.mergeOtherBps(newBps)
resolvedPaths[file.tailPointer()] = file
parent := file.parentPath().tailPointer()
if _, ok := fileBlocks[parent]; !ok {
fileBlocks[parent] = make(map[string]*FileBlock)
}
fileBlocks[parent][file.tailName()] = fblock
// Collect its `afterUpdateFn` along with all the others, so
// they all get invoked under the same lock, to avoid any
// weird races.
afterUpdateFns = append(afterUpdateFns, func() error {
// This will be called after the node cache is updated, so
// this newPath will be correct.
newPath := fbo.nodeCache.PathFromNode(node)
stillDirty, err := fbo.blocks.FinishSyncLocked(
ctx, lState, file, newPath, md.ReadOnly(), syncState, fbo.fbm)
if !stillDirty {
fbo.status.rmDirtyNode(node)
}
return err
})
// Add an "update" for all the parent directory updates, and
// make a chain for the file itself, so they're treated like
// updates during the prepping.
lastOp := md.Data().Changes.Ops[len(md.Data().Changes.Ops)-1]
addSelfUpdatesAndParent(file, lastOp, parentsToAddChainsFor)
// Update the combined local block cache with this file's
// dirty entry.
parentPtr := file.parentPath().tailPointer()
if _, ok := lbc[parentPtr]; ok {
lbc[parentPtr].Children[file.tailName()] =
newLbc[parentPtr].Children[file.tailName()]
} else {
lbc[parentPtr] = newLbc[parentPtr]
}
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
tempIRMD := ImmutableRootMetadata{
ReadOnlyRootMetadata: md.ReadOnly(),
lastWriterVerifyingKey: session.VerifyingKey,
}
fbo.log.LazyTrace(ctx, "Prepping update")
// Create a set of chains for this batch, a succinct summary of
// the file and directory blocks that need to change during this
// sync.
syncChains, err := newCRChains(
ctx, fbo.config.Codec(), []chainMetadata{tempIRMD}, &fbo.blocks, false)
if err != nil {
return err
}
for ptr := range parentsToAddChainsFor {
syncChains.addNoopChain(ptr)
}
// All originals never made it to the server, so don't unmerged
// them.
syncChains.doNotUnrefPointers = syncChains.createdOriginals
head, _ := fbo.getHead(lState)
dummyHeadChains := newCRChainsEmpty()
dummyHeadChains.mostRecentChainMDInfo = mostRecentChainMetadataInfo{
head, head.Data().Dir.BlockInfo}
// Squash the batch of updates together into a set of blocks and
// ready `md` for putting to the server.
md.AddOp(newResolutionOp())
_, newBps, blocksToDelete, err := fbo.prepper.prepUpdateForPaths(
ctx, lState, md, syncChains, dummyHeadChains, tempIRMD, head,
resolvedPaths, lbc, fileBlocks, fbo.config.DirtyBlockCache(),
prepFolderDontCopyIndirectFileBlocks)
if err != nil {
return err
}
if len(blocksToDelete) > 0 {
return errors.Errorf("Unexpectedly found unflushed blocks to delete "+
"during syncAllLocked: %v", blocksToDelete)
}
bps.mergeOtherBps(newBps)
defer func() {
if err != nil {
// Remove any blocks that are covered by file syncs --
// those might get reused upon sync retry. All other
// blocks are fair game for cleanup though.
bps.removeOtherBps(fileSyncBlocks)
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
// Put all the blocks.
blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, fbo.deferLog, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return err
}
// Call this under the same blockLock as when the pointers are
// updated, so there's never any point in time where a read or
// write might slip in after the pointers are updated, but before
// the deferred writes are re-applied.
afterUpdateFn := func() error {
var errs []error
for _, auf := range afterUpdateFns {
err := auf()
if err != nil {
errs = append(errs, err)
}
}
if len(errs) == 1 {
return errs[0]
} else if len(errs) > 1 {
return errors.Errorf("Got errors %+v", errs)
}
return nil
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl,
func(md ImmutableRootMetadata) error {
// Just update the pointers using the resolutionOp, all
// the ops have already been notified.
err = fbo.blocks.UpdatePointers(
md, lState, md.data.Changes.Ops[0], false, afterUpdateFn)
if err != nil {
return err
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md})
return nil
})
}
func (fbo *folderBranchOps) syncAllUnlocked(
ctx context.Context, lState *lockState) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.syncAllLocked(ctx, lState, NoExcl)
}
// SyncAll implements the KBFSOps interface for folderBranchOps.
func (fbo *folderBranchOps) SyncAll(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncAll")
defer func() { fbo.deferLog.CDebugf(ctx, "SyncAll done: %+v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.syncAllLocked(ctx, lState, NoExcl)
})
}
func (fbo *folderBranchOps) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) {
fbo.log.CDebugf(ctx, "Status")
defer func() { fbo.deferLog.CDebugf(ctx, "Status done: %+v", err) }()
if folderBranch != fbo.folderBranch {
return FolderBranchStatus{}, nil,
WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.status.getStatus(ctx, &fbo.blocks)
}
func (fbo *folderBranchOps) Status(
ctx context.Context) (
fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) {
return KBFSStatus{}, nil, InvalidOpError{}
}
// RegisterForChanges registers a single Observer to receive
// notifications about this folder/branch.
func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error {
// It's the caller's responsibility to make sure
// RegisterForChanges isn't called twice for the same Observer
fbo.observers.add(obs)
return nil
}
// UnregisterFromChanges stops an Observer from getting notifications
// about the folder/branch.
func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error {
fbo.observers.remove(obs)
return nil
}
// notifyBatchLocked sends out a notification for all the ops in md.
func (fbo *folderBranchOps) notifyBatchLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.headLock.AssertLocked(lState)
for _, op := range md.data.Changes.Ops {
err := fbo.notifyOneOpLocked(ctx, lState, op, md.ReadOnly(), false)
if err != nil {
return err
}
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md})
return nil
}
// searchForNode tries to figure out the path to the given
// blockPointer, using only the block updates that happened as part of
// a given MD update operation.
func (fbo *folderBranchOps) searchForNode(ctx context.Context,
ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) {
// Record which pointers are new to this update, and thus worth
// searching.
newPtrs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, update := range op.allUpdates() {
newPtrs[update.Ref] = true
}
for _, ref := range op.Refs() {
newPtrs[ref] = true
}
}
nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache,
[]BlockPointer{ptr}, newPtrs, md, md.data.Dir.BlockPointer)
if err != nil {
return nil, err
}
n, ok := nodeMap[ptr]
if !ok {
return nil, NodeNotFoundError{ptr}
}
return n, nil
}
func (fbo *folderBranchOps) getUnlinkPathBeforeUpdatingPointers(
ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, op op) (
unlinkPath path, unlinkDe DirEntry, toUnlink bool, err error) {
if len(md.data.Changes.Ops) == 0 {
return path{}, DirEntry{}, false, errors.New("md needs at least one op")
}
var node Node
var childName string
requireResFix := false
switch realOp := op.(type) {
case *rmOp:
if realOp.Dir.Ref == realOp.Dir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.Dir.Unref.Ref())
childName = realOp.OldName
case *renameOp:
if realOp.NewDir.Unref != zeroPtr {
// moving to a new dir
if realOp.NewDir.Ref == realOp.NewDir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.NewDir.Unref.Ref())
} else {
// moving to the same dir
if realOp.OldDir.Ref == realOp.OldDir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.OldDir.Unref.Ref())
}
childName = realOp.NewName
}
if node == nil {
return path{}, DirEntry{}, false, nil
}
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return path{}, DirEntry{}, false, err
}
// If the first op in this MD update is a resolutionOp, we need to
// inspect it to look for the *real* original pointer for this
// node.
if resOp, ok := md.data.Changes.Ops[0].(*resolutionOp); ok {
for _, update := range resOp.allUpdates() {
if update.Ref == p.tailPointer() {
fbo.log.CDebugf(ctx,
"Backing up ptr %v in op %s to original pointer %v",
p.tailPointer(), op, update.Unref)
p.path[len(p.path)-1].BlockPointer = update.Unref
requireResFix = false
break
}
}
}
if requireResFix {
// If we didn't fix up the pointer using a resolutionOp, the
// directory was likely created during this md update, and so
// no unlinking is needed.
fbo.log.CDebugf(ctx,
"Ignoring unlink when resolutionOp never fixed up %v",
p.tailPointer())
return path{}, DirEntry{}, false, nil
}
// If the original (clean) parent block is already GC'd from the
// server, this might not work, but hopefully we'd be
// fast-forwarding in that case anyway.
dblock, err := fbo.blocks.GetDir(ctx, lState, md, p, blockRead)
if err != nil {
fbo.log.CDebugf(ctx, "Couldn't get the dir entry for %s in %v: %+v",
childName, p.tailPointer(), err)
return path{}, DirEntry{}, false, nil
}
de, ok := dblock.Children[childName]
if !ok {
return path{}, DirEntry{}, false, nil
}
childPath := p.ChildPath(childName, de.BlockPointer)
return childPath, de, true, nil
}
func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
lState *lockState, op op, md ReadOnlyRootMetadata,
shouldPrefetch bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.config.Mode() == InitMinimal {
// There is no node cache in minimal mode, so there's nothing
// to update.
return nil
}
// We need to get unlinkPath before calling UpdatePointers so that
// nodeCache.Unlink can properly update cachedPath.
unlinkPath, unlinkDe, toUnlink, err :=
fbo.getUnlinkPathBeforeUpdatingPointers(ctx, lState, md.ReadOnly(), op)
if err != nil {
return err
}
err = fbo.blocks.UpdatePointers(md, lState, op, shouldPrefetch, nil)
if err != nil {
return err
}
var changes []NodeChange
switch realOp := op.(type) {
default:
fbo.log.CDebugf(ctx, "Unknown op: %s", op)
return nil
case *createOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %s",
realOp.NewName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.NewName},
})
case *rmOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %s",
realOp.OldName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.OldName},
})
// If this node exists, then the child node might exist too,
// and we need to unlink it in the node cache.
if toUnlink {
_ = fbo.nodeCache.Unlink(unlinkDe.Ref(), unlinkPath, unlinkDe)
}
case *renameOp:
oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.Ref())
if oldNode != nil {
changes = append(changes, NodeChange{
Node: oldNode,
DirUpdated: []string{realOp.OldName},
})
}
var newNode Node
if realOp.NewDir.Ref != zeroPtr {
newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.Ref())
if newNode != nil {
changes = append(changes, NodeChange{
Node: newNode,
DirUpdated: []string{realOp.NewName},
})
}
} else {
newNode = oldNode
if oldNode != nil {
// Add another name to the existing NodeChange.
changes[len(changes)-1].DirUpdated =
append(changes[len(changes)-1].DirUpdated, realOp.NewName)
}
}
if oldNode != nil {
fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%s to %s/%s",
realOp.Renamed, realOp.OldName, getNodeIDStr(oldNode),
realOp.NewName, getNodeIDStr(newNode))
if newNode == nil {
if childNode :=
fbo.nodeCache.Get(realOp.Renamed.Ref()); childNode != nil {
// if the childNode exists, we still have to update
// its path to go through the new node. That means
// creating nodes for all the intervening paths.
// Unfortunately we don't have enough information to
// know what the newPath is; we have to guess it from
// the updates.
var err error
newNode, err =
fbo.searchForNode(ctx, realOp.NewDir.Ref, md)
if newNode == nil {
fbo.log.CErrorf(ctx, "Couldn't find the new node: %v",
err)
}
}
}
if newNode != nil {
if toUnlink {
_ = fbo.nodeCache.Unlink(
unlinkDe.Ref(), unlinkPath, unlinkDe)
}
_, err := fbo.nodeCache.Move(
realOp.Renamed.Ref(), newNode, realOp.NewName)
if err != nil {
return err
}
}
}
case *syncOp:
node := fbo.nodeCache.Get(realOp.File.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %s",
len(realOp.Writes), getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
FileUpdated: realOp.Writes,
})
case *setAttrOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %s",
realOp.Attr, realOp.Name, getNodeIDStr(node))
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
childNode, err := fbo.blocks.UpdateCachedEntryAttributes(
ctx, lState, md, p, realOp)
if err != nil {
return err
}
if childNode == nil {
return nil // Nothing to do.
}
changes = append(changes, NodeChange{
Node: childNode,
})
case *GCOp:
// Unreferenced blocks in a GCOp mean that we shouldn't cache
// them anymore
fbo.log.CDebugf(ctx, "notifyOneOp: GCOp with latest rev %d and %d unref'd blocks", realOp.LatestRev, len(realOp.Unrefs()))
bcache := fbo.config.BlockCache()
idsToDelete := make([]kbfsblock.ID, 0, len(realOp.Unrefs()))
for _, ptr := range realOp.Unrefs() {
idsToDelete = append(idsToDelete, ptr.ID)
if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil {
fbo.log.CDebugf(ctx,
"Couldn't delete transient entry for %v: %v", ptr, err)
}
}
diskCache := fbo.config.DiskBlockCache()
if diskCache != nil {
go diskCache.Delete(ctx, idsToDelete)
}
case *resolutionOp:
// If there are any unrefs of blocks that have a node, this is an
// implied rmOp (see KBFS-1424).
reverseUpdates := make(map[BlockPointer]BlockPointer)
for _, unref := range op.Unrefs() {
node := fbo.nodeCache.Get(unref.Ref())
if node == nil {
// TODO: even if we don't have the node that was
// unreferenced, we might have its parent, and that
// parent might need an invalidation.
continue
}
// If there is a node, unlink and invalidate.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get path: %v", err)
continue
}
if !p.hasValidParent() {
fbo.log.CErrorf(ctx, "Removed node %s has no parent", p)
continue
}
parentPath := p.parentPath()
parentNode := fbo.nodeCache.Get(parentPath.tailRef())
if parentNode != nil {
changes = append(changes, NodeChange{
Node: parentNode,
DirUpdated: []string{p.tailName()},
})
}
fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %s",
p.tailPointer(), getNodeIDStr(node))
// Revert the path back to the original BlockPointers,
// before the updates were applied.
if len(reverseUpdates) == 0 {
for _, update := range op.allUpdates() {
reverseUpdates[update.Ref] = update.Unref
}
}
for i, pNode := range p.path {
if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok {
p.path[i].BlockPointer = oldPtr
}
}
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, p)
if err != nil {
fbo.log.CDebugf(ctx,
"Couldn't get the dir entry for %s/%v: %+v",
p, p.tailPointer(), err)
}
_ = fbo.nodeCache.Unlink(p.tailRef(), p, de)
}
if len(changes) == 0 {
return nil
}
}
fbo.observers.batchChanges(ctx, changes)
return nil
}
func (fbo *folderBranchOps) notifyOneOp(ctx context.Context,
lState *lockState, op op, md ReadOnlyRootMetadata,
shouldPrefetch bool) error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
return fbo.notifyOneOpLocked(ctx, lState, op, md, shouldPrefetch)
}
func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) kbfsmd.Revision {
fbo.headLock.AssertAnyLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head.Revision()
}
return kbfsmd.RevisionUninitialized
}
func (fbo *folderBranchOps) getCurrMDRevision(
lState *lockState) kbfsmd.Revision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.getCurrMDRevisionLocked(lState)
}
type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error
func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// If there's anything in the journal, don't apply these MDs.
// Wait for CR to happen.
if fbo.isMasterBranchLocked(lState) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return err
}
if mergedRev != kbfsmd.RevisionUninitialized {
if len(rmds) > 0 {
// We should update our view of the merged master though,
// to avoid re-registering for the same updates again.
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(
ctx, lState, rmds[len(rmds)-1].Revision(), false)
}()
}
fbo.log.CDebugf(ctx,
"Ignoring fetched revisions while MDs are in journal")
return nil
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// if we have staged changes, ignore all updates until conflict
// resolution kicks in. TODO: cache these for future use.
if !fbo.isMasterBranchLocked(lState) {
if len(rmds) > 0 {
latestMerged := rmds[len(rmds)-1]
// If we're running a journal, don't trust our own updates
// here because they might have come from our own journal
// before the conflict was detected. Assume we'll hear
// about the conflict via callbacks from the journal.
if TLFJournalEnabled(fbo.config, fbo.id()) {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
if session.VerifyingKey == latestMerged.LastModifyingWriterVerifyingKey() {
return UnmergedError{}
}
}
// setHeadLocked takes care of merged case
fbo.setLatestMergedRevisionLocked(
ctx, lState, latestMerged.Revision(), false)
unmergedRev := kbfsmd.RevisionUninitialized
if fbo.head != (ImmutableRootMetadata{}) {
unmergedRev = fbo.head.Revision()
}
fbo.cr.Resolve(ctx, unmergedRev, latestMerged.Revision())
}
return UnmergedError{}
}
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return errors.WithStack(NoUpdatesWhileDirtyError{})
}
appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds))
for _, rmd := range rmds {
// check that we're applying the expected MD revision
if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) {
// Already caught up!
continue
}
if err := isReadableOrError(ctx, fbo.config.KBPKI(), rmd.ReadOnly()); err != nil {
return err
}
err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false)
if err != nil {
return err
}
// No new operations in these.
if rmd.IsWriterMetadataCopiedSet() {
continue
}
for _, op := range rmd.data.Changes.Ops {
err := fbo.notifyOneOpLocked(ctx, lState, op, rmd.ReadOnly(), true)
if err != nil {
return err
}
}
appliedRevs = append(appliedRevs, rmd)
}
if len(appliedRevs) > 0 {
fbo.editHistory.UpdateHistory(ctx, appliedRevs)
}
return nil
}
func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// go backwards through the updates
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
// on undo, it's ok to re-apply the current revision since you
// need to invert all of its ops.
//
// This duplicates a check in
// fbo.setHeadPredecessorLocked. TODO: Remove this
// duplication.
if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) &&
rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 {
return MDUpdateInvertError{rmd.Revision(),
fbo.getCurrMDRevisionLocked(lState)}
}
// TODO: Check that the revisions are equal only for
// the first iteration.
if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) {
err := fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
}
// iterate the ops in reverse and invert each one
ops := rmd.data.Changes.Ops
for j := len(ops) - 1; j >= 0; j-- {
io, err := invertOpForLocalNotifications(ops[j])
if err != nil {
fbo.log.CWarningf(ctx,
"got error %v when invert op %v; "+
"skipping. Open file handles "+
"may now be in an invalid "+
"state, which can be fixed by "+
"either closing them all or "+
"restarting KBFS.",
err, ops[j])
continue
}
err = fbo.notifyOneOpLocked(ctx, lState, io, rmd.ReadOnly(), false)
if err != nil {
return err
}
}
}
// TODO: update the edit history?
return nil
}
func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.applyMDUpdatesLocked(ctx, lState, rmds)
}
func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) kbfsmd.Revision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.latestMergedRevision
}
// caller should have held fbo.headLock
func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev kbfsmd.Revision, allowBackward bool) {
fbo.headLock.AssertLocked(lState)
if rev == kbfsmd.RevisionUninitialized {
panic("Cannot set latest merged revision to an uninitialized value")
}
if fbo.latestMergedRevision < rev || allowBackward {
fbo.latestMergedRevision = rev
fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev)
} else {
fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+
"the new revision (%d); won't update.", fbo.latestMergedRevision, rev)
}
}
// Assumes all necessary locking is either already done by caller, or
// is done by applyFunc.
func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
lState *lockState, applyFunc applyMDUpdatesFunc) error {
// first look up all MD revisions newer than my current head
start := fbo.getLatestMergedRevision(lState) + 1
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start)
if err != nil {
return err
}
err = applyFunc(ctx, lState, rmds)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context,
lState *lockState) error {
fbo.log.CDebugf(ctx, "Fetching the newest unmerged head")
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
// We can only ever be at most one revision behind, so fetch the
// latest unmerged revision and apply it as a successor.
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid)
if err != nil {
return err
}
if md == (ImmutableRootMetadata{}) {
// There is no unmerged revision, oops!
return errors.New("Couldn't find an unmerged head")
}
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if fbo.bid != bid {
// The branches switched (apparently CR completed), so just
// try again.
fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head")
return nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil {
return err
}
if err := fbo.notifyBatchLocked(ctx, lState, md); err != nil {
return err
}
if err := fbo.config.MDCache().Put(md); err != nil {
return err
}
return nil
}
// getUnmergedMDUpdates returns a slice of the unmerged MDs for this
// TLF's current unmerged branch and unmerged branch, between the
// merge point for the branch and the current head. The returned MDs
// are the same instances that are stored in the MD cache, so they
// should be modified with care.
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
kbfsmd.Revision, []ImmutableRootMetadata, error) {
// acquire mdWriterLock to read the current branch ID.
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
bid, fbo.getCurrMDRevision(lState))
}
func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) (
kbfsmd.Revision, []ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
fbo.bid, fbo.getCurrMDRevision(lState))
}
// Returns a list of block pointers that were created during the
// staged era.
func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) ([]BlockPointer, error) {
fbo.mdWriterLock.AssertLocked(lState)
currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return nil, err
}
err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds)
if err != nil {
return nil, err
}
// We have arrived at the branch point. The new root is
// the previous revision from the current head. Find it
// and apply. TODO: somehow fake the current head into
// being currHead-1, so that future calls to
// applyMDUpdates will fetch this along with the rest of
// the updates.
fbo.setBranchIDLocked(lState, NullBranchID)
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
currHead, Merged)
if err != nil {
return nil, err
}
err = func() error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true)
return nil
}()
if err != nil {
return nil, err
}
// Return all new refs
var unmergedPtrs []BlockPointer
for _, rmd := range unmergedRmds {
for _, op := range rmd.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr != zeroPtr {
unmergedPtrs = append(unmergedPtrs, ptr)
}
}
for _, update := range op.allUpdates() {
if update.Ref != zeroPtr {
unmergedPtrs = append(unmergedPtrs, update.Ref)
}
}
}
}
return unmergedPtrs, nil
}
func (fbo *folderBranchOps) unstageLocked(ctx context.Context,
lState *lockState) error {
fbo.mdWriterLock.AssertLocked(lState)
// fetch all of my unstaged updates, and undo them one at a time
bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState)
unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return err
}
// let the server know we no longer have need
if !wasMasterBranch {
err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid)
if err != nil {
return err
}
}
// now go forward in time, if possible
err = fbo.getAndApplyMDUpdates(ctx, lState,
fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
// Finally, create a resolutionOp with the newly-unref'd pointers.
resOp := newResolutionOp()
for _, ptr := range unmergedPtrs {
resOp.AddUnrefBlock(ptr)
}
md.AddOp(resOp)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl,
func(md ImmutableRootMetadata) error {
return fbo.notifyBatchLocked(ctx, lState, md)
})
}
// TODO: remove once we have automatic conflict resolution
func (fbo *folderBranchOps) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "UnstageForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx, "UnstageForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
if fbo.isMasterBranch(lState) {
// no-op
return nil
}
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// launch unstaging in a new goroutine, because we don't want to
// use the provided context because upper layers might ignore our
// notifications if we do. But we still want to wait for the
// context to cancel.
c := make(chan error, 1)
freshCtx, cancel := fbo.newCtxWithFBOID()
defer cancel()
fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting")
go func() {
lState := makeFBOLockState()
c <- fbo.doMDWriteWithRetry(ctx, lState,
func(lState *lockState) error {
return fbo.unstageLocked(freshCtx, lState)
})
}()
select {
case err := <-c:
return err
case <-ctx.Done():
return ctx.Err()
}
})
}
// mdWriterLock must be taken by the caller.
func (fbo *folderBranchOps) rekeyLocked(ctx context.Context,
lState *lockState, promptPaper bool) (res RekeyResult, err error) {
fbo.log.CDebugf(ctx, "rekeyLocked")
defer func() {
fbo.deferLog.CDebugf(ctx, "rekeyLocked done: %+v %+v", res, err)
}()
fbo.mdWriterLock.AssertLocked(lState)
if !fbo.isMasterBranchLocked(lState) {
return RekeyResult{}, errors.New("can't rekey while staged")
}
// untrusted head is ok here.
head, _ := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) {
// If we already have a cached revision, make sure we're
// up-to-date with the latest revision before inspecting the
// metadata, since Rekey doesn't let us go into CR mode, and
// we don't actually get folder update notifications when the
// rekey bit is set, just a "folder needs rekey" update.
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdatesLocked); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); !ok ||
applyErr.rev != applyErr.curr {
return RekeyResult{}, err
}
}
}
md, lastWriterVerifyingKey, rekeyWasSet, err :=
fbo.getMDForRekeyWriteLocked(ctx, lState)
if err != nil {
return RekeyResult{}, err
}
currKeyGen := md.LatestKeyGeneration()
rekeyDone, tlfCryptKey, err := fbo.config.KeyManager().
Rekey(ctx, md, promptPaper)
stillNeedsRekey := false
switch err.(type) {
case nil:
// TODO: implement a "forced" option that rekeys even when the
// devices haven't changed?
if !rekeyDone {
fbo.log.CDebugf(ctx, "No rekey necessary")
return RekeyResult{
DidRekey: false,
NeedsPaperKey: false,
}, nil
}
// Clear the rekey bit if any.
md.clearRekeyBit()
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return RekeyResult{}, err
}
// Readers can't clear the last revision, because:
// 1) They don't have access to the writer metadata, so can't clear the
// block changes.
// 2) Readers need the MetadataFlagWriterMetadataCopied bit set for
// MDServer to authorize the write.
// Without this check, MDServer returns an Unauthorized error.
if md.GetTlfHandle().IsWriter(session.UID) {
md.clearLastRevision()
}
case RekeyIncompleteError:
if !rekeyDone && rekeyWasSet {
// The rekey bit was already set, and there's nothing else
// we can to do, so don't put any new revisions.
fbo.log.CDebugf(ctx, "No further rekey possible by this user.")
return RekeyResult{
DidRekey: false,
NeedsPaperKey: false,
}, nil
}
// Rekey incomplete, fallthrough without early exit, to ensure
// we write the metadata with any potential changes
fbo.log.CDebugf(ctx,
"Rekeyed reader devices, but still need writer rekey")
case NeedOtherRekeyError, NeedSelfRekeyError:
stillNeedsRekey = true
default:
if err == context.DeadlineExceeded {
fbo.log.CDebugf(ctx, "Paper key prompt timed out")
// Reschedule the prompt in the timeout case.
stillNeedsRekey = true
} else {
return RekeyResult{}, err
}
}
if stillNeedsRekey {
fbo.log.CDebugf(ctx, "Device doesn't have access to rekey")
// If we didn't have read access, then we don't have any
// unlocked paper keys. Wait for some time, and then if we
// still aren't rekeyed, try again but this time prompt the
// user for any known paper keys. We do this even if the
// rekey bit is already set, since we may have restarted since
// the previous rekey attempt, before prompting for the paper
// key. Only schedule this as a one-time event, since direct
// folder accesses from the user will also cause a
// rekeyWithPrompt.
if rekeyWasSet {
// Devices not yet keyed shouldn't set the rekey bit again
fbo.log.CDebugf(ctx, "Rekey bit already set")
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: true,
}, nil
}
// This device hasn't been keyed yet, fall through to set the rekey bit
}
// add an empty operation to satisfy assumptions elsewhere
md.AddOp(newRekeyOp())
// we still let readers push a new md block that we validate against reader
// permissions
err = fbo.finalizeMDRekeyWriteLocked(
ctx, lState, md, lastWriterVerifyingKey)
if err != nil {
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
keyGen := md.LatestKeyGeneration()
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, err
}
}
// send rekey finish notification
handle := md.GetTlfHandle()
if currKeyGen >= FirstValidKeyGen {
fbo.config.Reporter().Notify(ctx,
rekeyNotification(ctx, fbo.config, handle, true))
}
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, nil
}
func (fbo *folderBranchOps) RequestRekey(_ context.Context, tlf tlf.ID) {
fb := FolderBranch{tlf, MasterBranch}
if fb != fbo.folderBranch {
// TODO: log instead of panic?
panic(WrongOpsError{fbo.folderBranch, fb})
}
fbo.rekeyFSM.Event(NewRekeyRequestEvent())
}
func (fbo *folderBranchOps) SyncFromServerForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncFromServerForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx,
"SyncFromServerForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
// Make sure everything outstanding syncs to disk at least.
if err := fbo.syncAllUnlocked(ctx, lState); err != nil {
return err
}
// A journal flush before CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
// Loop until we're fully updated on the master branch.
for {
if !fbo.isMasterBranch(lState) {
if err := fbo.cr.Wait(ctx); err != nil {
return err
}
// If we are still staged after the wait, then we have a problem.
if !fbo.isMasterBranch(lState) {
return errors.Errorf("Conflict resolution didn't take us out " +
"of staging.")
}
}
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
if len(dirtyFiles) > 0 {
for _, ref := range dirtyFiles {
fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref)
}
return errors.New("can't sync from server while dirty")
}
// A journal flush after CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdates); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); ok {
if applyErr.rev == applyErr.curr {
fbo.log.CDebugf(ctx, "Already up-to-date with server")
return nil
}
}
if _, isUnmerged := err.(UnmergedError); isUnmerged {
continue
} else if err == errNoMergedRevWhileStaged {
continue
}
return err
}
break
}
// Wait for all the asynchronous block archiving and quota
// reclamation to hit the block server.
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
if err := fbo.editHistory.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil {
return err
}
// A second journal flush if needed, to clear out any
// archive/remove calls caused by the above operations.
return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log)
}
// CtxFBOTagKey is the type used for unique context tags within folderBranchOps
type CtxFBOTagKey int
const (
// CtxFBOIDKey is the type of the tag for unique operation IDs
// within folderBranchOps.
CtxFBOIDKey CtxFBOTagKey = iota
)
// CtxFBOOpID is the display name for the unique operation
// folderBranchOps ID tag.
const CtxFBOOpID = "FBOID"
func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context {
return ctxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log)
}
func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) {
// No need to call NewContextReplayable since ctxWithFBOID calls
// ctxWithRandomIDReplayable, which attaches replayably.
ctx := fbo.ctxWithFBOID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
ctx, err := NewContextWithCancellationDelayer(ctx)
if err != nil {
panic(err)
}
return ctx, cancelFunc
}
// Run the passed function with a context that's canceled on shutdown.
func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbo.shutdownChan:
return ShutdownHappenedError{}
}
}
func (fbo *folderBranchOps) doFastForwardLocked(ctx context.Context,
lState *lockState, currHead ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
fbo.log.CDebugf(ctx, "Fast-forwarding from rev %d to rev %d",
fbo.latestMergedRevision, currHead.Revision())
changes, err := fbo.blocks.FastForwardAllNodes(
ctx, lState, currHead.ReadOnly())
if err != nil {
return err
}
err = fbo.setHeadSuccessorLocked(ctx, lState, currHead, true /*rebase*/)
if err != nil {
return err
}
// Invalidate all the affected nodes.
if len(changes) > 0 {
fbo.observers.batchChanges(ctx, changes)
}
// Reset the edit history. TODO: notify any listeners that we've
// done this.
fbo.editHistory.Shutdown()
fbo.editHistory = NewTlfEditHistory(fbo.config, fbo, fbo.log)
return nil
}
func (fbo *folderBranchOps) maybeFastForward(ctx context.Context,
lState *lockState, lastUpdate time.Time, currUpdate time.Time) (
fastForwardDone bool, err error) {
// Has it been long enough to try fast-forwarding?
if currUpdate.Before(lastUpdate.Add(fastForwardTimeThresh)) ||
!fbo.isMasterBranch(lState) {
return false, nil
}
fbo.log.CDebugf(ctx, "Checking head for possible "+
"fast-forwarding (last update time=%s)", lastUpdate)
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id())
if err != nil {
return false, err
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// If the journal has anything in it, don't fast-forward since we
// haven't finished flushing yet. If there was really a remote
// update on the server, we'll end up in CR eventually.
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return false, err
}
if mergedRev != kbfsmd.RevisionUninitialized {
return false, nil
}
if !fbo.isMasterBranchLocked(lState) {
// Don't update if we're staged.
return false, nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if currHead.Revision() < fbo.latestMergedRevision+fastForwardRevThresh {
// Might as well fetch all the revisions.
return false, nil
}
err = fbo.doFastForwardLocked(ctx, lState, currHead)
if err != nil {
return false, err
}
return true, nil
}
func (fbo *folderBranchOps) locallyFinalizeTLF(ctx context.Context) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return
}
// It's safe to give this a finalized number of 1 and a fake user
// name. The whole point here is to move the old finalized TLF
// name away to a new name, where the user won't be able to access
// it anymore, and if there's a conflict with a previously-moved
// TLF that shouldn't matter.
now := fbo.config.Clock().Now()
finalizedInfo, err := tlf.NewHandleExtension(
tlf.HandleExtensionFinalized, 1, libkb.NormalizedUsername("<unknown>"),
now)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't make finalized info: %+v", err)
return
}
fakeSignedHead := &RootMetadataSigned{MD: fbo.head.bareMd}
finalRmd, err := fakeSignedHead.MakeFinalCopy(
fbo.config.Codec(), now, finalizedInfo)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't finalize MD: %+v", err)
return
}
// Construct the data needed to fake a new head.
mdID, err := kbfsmd.MakeID(fbo.config.Codec(), finalRmd.MD)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized MD ID: %+v", err)
return
}
bareHandle, err := finalRmd.MD.MakeBareTlfHandle(fbo.head.Extra())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized bare handle: %+v", err)
return
}
handle, err := MakeTlfHandle(ctx, bareHandle, fbo.config.KBPKI())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized handle: %+v", err)
return
}
finalBrmd, ok := finalRmd.MD.(MutableBareRootMetadata)
if !ok {
fbo.log.CErrorf(ctx, "Couldn't get finalized mutable bare MD: %+v", err)
return
}
// We don't have a way to sign this with a valid key (and we might
// be logged out anyway), so just directly make the md immutable.
finalIrmd := ImmutableRootMetadata{
ReadOnlyRootMetadata: makeRootMetadata(
finalBrmd, fbo.head.Extra(), handle).ReadOnly(),
mdID: mdID,
}
// This will trigger the handle change notification to observers.
err = fbo.setHeadSuccessorLocked(ctx, lState, finalIrmd, false)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't set finalized MD: %+v", err)
return
}
}
func (fbo *folderBranchOps) registerAndWaitForUpdates() {
defer close(fbo.updateDoneChan)
childDone := make(chan struct{})
var lastUpdate time.Time
err := fbo.runUnlessShutdown(func(ctx context.Context) error {
defer close(childDone)
// If we fail to register for or process updates, try again
// with an exponential backoff, so we don't overwhelm the
// server or ourselves with too many attempts in a hopeless
// situation.
expBackoff := backoff.NewExponentialBackOff()
// Never give up hope until we shut down
expBackoff.MaxElapsedTime = 0
// Register and wait in a loop unless we hit an unrecoverable error
fbo.cancelUpdatesLock.Lock()
if fbo.cancelUpdates != nil {
// It should be impossible to get here without having
// already called the cancel function, but just in case
// call it here again.
fbo.cancelUpdates()
}
ctx, fbo.cancelUpdates = context.WithCancel(ctx)
fbo.cancelUpdatesLock.Unlock()
for {
err := backoff.RetryNotifyWithContext(ctx, func() error {
// Replace the FBOID one with a fresh id for every attempt
newCtx := fbo.ctxWithFBOID(ctx)
updateChan, err := fbo.registerForUpdates(newCtx)
if err != nil {
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
}
currUpdate, err := fbo.waitForAndProcessUpdates(
newCtx, lastUpdate, updateChan)
switch errors.Cause(err).(type) {
case UnmergedError:
// skip the back-off timer and continue directly to next
// registerForUpdates
return nil
case NewMetadataVersionError:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the newest metadata: %+v", err)
fbo.status.setPermErr(err)
// No need to lock here, since `cancelUpdates` is
// only set within this same goroutine.
fbo.cancelUpdates()
return context.Canceled
case kbfsmd.ServerErrorCannotReadFinalizedTLF:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the finalized metadata for this TLF: %+v", err)
fbo.status.setPermErr(err)
// Locally finalize the TLF so new accesses
// through to the old folder name will find the
// new folder.
fbo.locallyFinalizeTLF(newCtx)
// No need to lock here, since `cancelUpdates` is
// only set within this same goroutine.
fbo.cancelUpdates()
return context.Canceled
}
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
if err == nil {
lastUpdate = currUpdate
}
return err
}
},
expBackoff,
func(err error, nextTime time.Duration) {
fbo.log.CDebugf(ctx,
"Retrying registerForUpdates in %s due to err: %v",
nextTime, err)
})
if err != nil {
return err
}
}
})
if err != nil && err != context.Canceled {
fbo.log.CWarningf(context.Background(),
"registerAndWaitForUpdates failed unexpectedly with an error: %v",
err)
}
<-childDone
}
func (fbo *folderBranchOps) registerForUpdatesShouldFireNow() bool {
fbo.muLastGetHead.Lock()
defer fbo.muLastGetHead.Unlock()
return fbo.config.Clock().Now().Sub(fbo.lastGetHead) < registerForUpdatesFireNowThreshold
}
func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) (
updateChan <-chan error, err error) {
lState := makeFBOLockState()
currRev := fbo.getLatestMergedRevision(lState)
fireNow := false
if fbo.registerForUpdatesShouldFireNow() {
ctx = rpc.WithFireNow(ctx)
fireNow = true
}
fbo.log.CDebugf(ctx,
"Registering for updates (curr rev = %d, fire now = %v)",
currRev, fireNow)
defer func() {
fbo.deferLog.CDebugf(ctx,
"Registering for updates (curr rev = %d, fire now = %v) done: %+v",
currRev, fireNow, err)
}()
// RegisterForUpdate will itself retry on connectivity issues
return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), currRev)
}
func (fbo *folderBranchOps) waitForAndProcessUpdates(
ctx context.Context, lastUpdate time.Time,
updateChan <-chan error) (currUpdate time.Time, err error) {
// successful registration; now, wait for an update or a shutdown
fbo.log.CDebugf(ctx, "Waiting for updates")
defer func() {
fbo.deferLog.CDebugf(ctx, "Waiting for updates done: %+v", err)
}()
lState := makeFBOLockState()
for {
select {
case err := <-updateChan:
fbo.log.CDebugf(ctx, "Got an update: %v", err)
if err != nil {
return time.Time{}, err
}
// Getting and applying the updates requires holding
// locks, so make sure it doesn't take too long.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
defer cancel()
currUpdate := fbo.config.Clock().Now()
ffDone, err :=
fbo.maybeFastForward(ctx, lState, lastUpdate, currUpdate)
if err != nil {
return time.Time{}, err
}
if ffDone {
return currUpdate, nil
}
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates)
if err != nil {
fbo.log.CDebugf(ctx, "Got an error while applying "+
"updates: %v", err)
return time.Time{}, err
}
return currUpdate, nil
case unpause := <-fbo.updatePauseChan:
fbo.log.CInfof(ctx, "Updates paused")
// wait to be unpaused
select {
case <-unpause:
fbo.log.CInfof(ctx, "Updates unpaused")
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
}
}
func (fbo *folderBranchOps) getCachedDirOpsCount(lState *lockState) int {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return len(fbo.dirOps)
}
func (fbo *folderBranchOps) backgroundFlusher() {
lState := makeFBOLockState()
var prevDirtyFileMap map[BlockRef]bool
sameDirtyFileCount := 0
for {
doSelect := true
if fbo.blocks.GetState(lState) == dirtyState &&
fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) &&
sameDirtyFileCount < 10 {
// We have dirty files, and the system has a full buffer,
// so don't bother waiting for a signal, just get right to
// the main attraction.
doSelect = false
} else if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
doSelect = false
}
if doSelect {
// Wait until we really have a write waiting.
doWait := true
select {
case <-fbo.syncNeededChan:
if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
doWait = false
}
case <-fbo.forceSyncChan:
doWait = false
case <-fbo.shutdownChan:
return
}
if doWait {
timer := time.NewTimer(fbo.config.BGFlushPeriod())
// Loop until either a tick's worth of time passes,
// the batch size of directory ops is full, a sync is
// forced, or a shutdown happens.
loop:
for {
select {
case <-timer.C:
break loop
case <-fbo.syncNeededChan:
if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
break loop
}
case <-fbo.forceSyncChan:
break loop
case <-fbo.shutdownChan:
return
}
}
}
}
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
dirOpsCount := fbo.getCachedDirOpsCount(lState)
if len(dirtyFiles) == 0 && dirOpsCount == 0 {
sameDirtyFileCount = 0
continue
}
// Make sure we are making some progress
currDirtyFileMap := make(map[BlockRef]bool)
for _, ref := range dirtyFiles {
currDirtyFileMap[ref] = true
}
if reflect.DeepEqual(currDirtyFileMap, prevDirtyFileMap) {
sameDirtyFileCount++
} else {
sameDirtyFileCount = 0
}
prevDirtyFileMap = currDirtyFileMap
fbo.runUnlessShutdown(func(ctx context.Context) (err error) {
// Denote that these are coming from a background
// goroutine, not directly from any user.
ctx = NewContextReplayable(ctx,
func(ctx context.Context) context.Context {
return context.WithValue(ctx, CtxBackgroundSyncKey, "1")
})
fbo.log.CDebugf(ctx, "Background sync triggered: %d dirty files, "+
"%d dir ops in batch", len(dirtyFiles), dirOpsCount)
if sameDirtyFileCount >= 100 {
// If the local journal is full, we might not be able to
// make progress until more data is flushed to the
// servers, so just warn here rather than just an outright
// panic.
fbo.log.CWarningf(ctx, "Making no Sync progress on dirty "+
"files after %d attempts: %v", sameDirtyFileCount,
dirtyFiles)
}
// Just in case network access or a bug gets stuck for a
// long time, time out the sync eventually.
longCtx, longCancel :=
context.WithTimeout(ctx, backgroundTaskTimeout)
defer longCancel()
err = fbo.SyncAll(longCtx, fbo.folderBranch)
if err != nil {
// Just log the warning and keep trying to
// sync the rest of the dirty files.
fbo.log.CWarningf(ctx, "Couldn't sync all: %+v", err)
}
return nil
})
}
}
func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Lock(lState)
}
func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Unlock(lState)
}
func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
fbo.mdWriterLock.AssertLocked(lState)
// Put the blocks into the cache so that, even if we fail below,
// future attempts may reuse the blocks.
err := fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
irmd, err := fbo.config.MDOps().ResolveBranch(ctx, fbo.id(), fbo.bid,
blocksToDelete, md, session.VerifyingKey)
doUnmergedPut := isRevisionConflict(err)
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR")
return err
}
if err != nil {
return err
}
// Queue a rekey if the bit was set.
if md.IsRekeySet() {
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
// Set the head to the new MD.
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+
"successful put: %v", err)
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
mdCopyWithLocalOps, err := md.deepCopy(fbo.config.Codec())
if err != nil {
return err
}
mdCopyWithLocalOps.data.Changes.Ops = newOps
// notifyOneOp for every fixed-up merged op.
for _, op := range newOps {
err := fbo.notifyOneOpLocked(
ctx, lState, op, mdCopyWithLocalOps.ReadOnly(), false)
if err != nil {
return err
}
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd})
return nil
}
// finalizeResolution caches all the blocks, and writes the new MD to
// the merged branch, failing if there is a conflict. It also sends
// out the given newOps notifications locally. This is used for
// completing conflict resolution.
func (fbo *folderBranchOps) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.finalizeResolutionLocked(
ctx, lState, md, bps, newOps, blocksToDelete)
}
func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context,
lState *lockState) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure",
fbo.bid)
return fbo.unstageLocked(ctx, lState)
}
func (fbo *folderBranchOps) handleTLFBranchChange(ctx context.Context,
newBID BranchID) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID)
if !fbo.isMasterBranchLocked(lState) {
if fbo.bid == newBID {
fbo.log.CDebugf(ctx, "Already on branch %s", newBID)
return
}
panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s",
newBID, fbo.bid))
}
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID)
if err != nil {
fbo.log.CWarningf(ctx,
"No unmerged head on journal branch change (bid=%s)", newBID)
return
}
if md == (ImmutableRootMetadata{}) || md.MergedStatus() != Unmerged ||
md.BID() != newBID {
// This can happen if CR got kicked off in some other way and
// completed before we took the lock to process this
// notification.
fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d",
md, newBID)
return
}
// Everything we thought we knew about quota reclamation is now
// called into question.
fbo.fbm.clearLastQRData()
// Kick off conflict resolution and set the head to the correct branch.
fbo.setBranchIDLocked(lState, newBID)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/)
if err != nil {
fbo.log.CWarningf(ctx,
"Could not set head on journal branch change: %v", err)
return
}
}
func (fbo *folderBranchOps) onTLFBranchChange(newBID BranchID) {
fbo.branchChanges.Add(1)
go func() {
defer fbo.branchChanges.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
// This only happens on a `PruneBranch` call, in which case we
// would have already updated fbo's local view of the branch/head.
if newBID == NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring branch change back to master")
return
}
fbo.handleTLFBranchChange(ctx, newBID)
}()
}
func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid BranchID,
rev kbfsmd.Revision) {
fbo.log.CDebugf(ctx, "Considering archiving references for flushed MD revision %d", rev)
lState := makeFBOLockState()
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState, rev, false)
}()
// Get that revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
rev, Merged)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v",
rev, err)
return
}
if err := isArchivableMDOrError(rmd.ReadOnly()); err != nil {
fbo.log.CDebugf(
ctx, "Skipping archiving references for flushed MD revision %d: %s", rev, err)
return
}
fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly())
}
func (fbo *folderBranchOps) onMDFlush(bid BranchID, rev kbfsmd.Revision) {
fbo.mdFlushes.Add(1)
go func() {
defer fbo.mdFlushes.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
if bid != NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for "+
"revision %d", bid, rev)
return
}
fbo.handleMDFlush(ctx, bid, rev)
}()
}
// GetUpdateHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
fbo.log.CDebugf(ctx, "GetUpdateHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetUpdateHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch}
}
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(),
kbfsmd.RevisionInitial)
if err != nil {
return TLFUpdateHistory{}, err
}
if len(rmds) > 0 {
rmd := rmds[len(rmds)-1]
history.ID = rmd.TlfID().String()
history.Name = rmd.GetTlfHandle().GetCanonicalPath()
}
history.Updates = make([]UpdateSummary, 0, len(rmds))
writerNames := make(map[keybase1.UID]string)
for _, rmd := range rmds {
writer, ok := writerNames[rmd.LastModifyingWriter()]
if !ok {
name, err := fbo.config.KBPKI().GetNormalizedUsername(
ctx, rmd.LastModifyingWriter().AsUserOrTeam())
if err != nil {
return TLFUpdateHistory{}, err
}
writer = string(name)
writerNames[rmd.LastModifyingWriter()] = writer
}
updateSummary := UpdateSummary{
Revision: rmd.Revision(),
Date: time.Unix(0, rmd.data.Dir.Mtime),
Writer: writer,
LiveBytes: rmd.DiskUsage(),
Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)),
}
for _, op := range rmd.data.Changes.Ops {
opSummary := OpSummary{
Op: op.String(),
Refs: make([]string, 0, len(op.Refs())),
Unrefs: make([]string, 0, len(op.Unrefs())),
Updates: make(map[string]string),
}
for _, ptr := range op.Refs() {
opSummary.Refs = append(opSummary.Refs, ptr.String())
}
for _, ptr := range op.Unrefs() {
opSummary.Unrefs = append(opSummary.Unrefs, ptr.String())
}
for _, update := range op.allUpdates() {
opSummary.Updates[update.Unref.String()] = update.Ref.String()
}
updateSummary.Ops = append(updateSummary.Ops, opSummary)
}
history.Updates = append(history.Updates, updateSummary)
}
return history, nil
}
// GetEditHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetEditHistory(ctx context.Context,
folderBranch FolderBranch) (edits TlfWriterEdits, err error) {
fbo.log.CDebugf(ctx, "GetEditHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetEditHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return nil, WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
head, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return nil, err
}
return fbo.editHistory.GetComplete(ctx, head)
}
// PushStatusChange forces a new status be fetched by status listeners.
func (fbo *folderBranchOps) PushStatusChange() {
fbo.config.KBFSOps().PushStatusChange()
}
// ClearPrivateFolderMD implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) ClearPrivateFolderMD(ctx context.Context) {
if fbo.folderBranch.Tlf.Type() == tlf.Public {
return
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// Nothing to clear.
return
}
fbo.log.CDebugf(ctx, "Clearing folder MD")
// First cancel the background goroutine that's registered for
// updates, because the next time we set the head in this FBO
// we'll launch another one.
fbo.cancelUpdatesLock.Lock()
defer fbo.cancelUpdatesLock.Unlock()
if fbo.cancelUpdates != nil {
fbo.cancelUpdates()
select {
case <-fbo.updateDoneChan:
case <-ctx.Done():
fbo.log.CDebugf(
ctx, "Context canceled before updater was canceled")
return
}
fbo.config.MDServer().CancelRegistration(ctx, fbo.id())
}
fbo.head = ImmutableRootMetadata{}
fbo.headStatus = headUntrusted
fbo.latestMergedRevision = kbfsmd.RevisionUninitialized
fbo.hasBeenCleared = true
}
// ForceFastForward implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) ForceFastForward(ctx context.Context) {
lState := makeFBOLockState()
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
// We're already up to date.
return
}
if !fbo.hasBeenCleared {
// No reason to fast-forward here if it hasn't ever been
// cleared.
return
}
fbo.forcedFastForwards.Add(1)
go func() {
defer fbo.forcedFastForwards.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
fbo.log.CDebugf(ctx, "Forcing a fast-forward")
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id())
if err != nil {
fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err)
return
}
if currHead == (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "No MD yet")
return
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
// We're already up to date.
fbo.log.CDebugf(ctx, "Already up-to-date: %v", err)
return
}
err = fbo.doFastForwardLocked(ctx, lState, currHead)
if err != nil {
fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err)
}
}()
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) {
fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus)
}
| 1 | 17,092 | In slack I mentioned we should only do this if the update is from some other device, to make sure our own updates don't cause issues. Maybe the revision check above is good enough to prevent this, but I just want to make sure you thought about it. | keybase-kbfs | go |
@@ -66,12 +66,6 @@ RSpec.configure do |config|
# particularly slow.
config.profile_examples = 10
- # Run specs in random order to surface order dependencies. If you find an
- # order dependency and want to debug it, you can fix the order by providing
- # the seed, which is printed after each run.
- # --seed 1234
- config.order = :random
-
# Seed global randomization in this process using the `--seed` CLI option.
# Setting this allows you to use `--seed` to deterministically reproduce
# test failures related to randomization by passing the same `--seed` value | 1 | # This file was generated by the `rspec --init` command. Conventionally, all
# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`.
# The generated `.rspec` file contains `--require spec_helper` which will cause
# this file to always be loaded, without a need to explicitly require it in any
# files.
#
# Given that it is always loaded, you are encouraged to keep this file as
# light-weight as possible. Requiring heavyweight dependencies from this file
# will add to the boot time of your test suite on EVERY test run, even for an
# individual file that may not need all of that loaded. Instead, consider making
# a separate helper file that requires the additional dependencies and performs
# the additional setup, and require it from the spec files that actually need
# it.
#
# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
RSpec.configure do |config|
# rspec-expectations config goes here. You can use an alternate
# assertion/expectation library such as wrong or minitest
# assertions if you prefer.
config.expect_with :rspec do |expectations|
# The default output length is 200, you can increase it to make the
# output more verbose, or decrease it to make it more concise.
expectations.max_formatted_output_length = 200
end
# rspec-mocks config goes here. You can use an alternate test double
# library (such as bogus or mocha) by changing the `mock_with` option here.
config.mock_with :rspec do |mocks|
# Prevents you from mocking or stubbing a method that does not exist on
# a real object. This is generally recommended, and is the default since
# RSpec 4.
mocks.verify_partial_doubles = true
end
# The settings below are suggested to provide a good initial experience
# with RSpec, but feel free to customize to your heart's content.
=begin
# This allows you to limit a spec run to individual examples or groups
# you care about by tagging them with `:focus` metadata. When nothing
# is tagged with `:focus`, all examples get run. RSpec also provides
# aliases for `it`, `describe`, and `context` that include `:focus`
# metadata: `fit`, `fdescribe` and `fcontext`, respectively.
config.filter_run_when_matching :focus
# Allows RSpec to persist some state between runs in order to support
# the `--only-failures` and `--next-failure` CLI options. We recommend
# you configure your source control system to ignore this file.
config.example_status_persistence_file_path = "spec/examples.txt"
# This setting enables warnings. It's recommended, but in some cases may
# be too noisy due to issues in dependencies.
config.warnings = true
# Many RSpec users commonly either run the entire suite or an individual
# file, and it's useful to allow more verbose output when running an
# individual spec file.
if config.files_to_run.one?
# Use the documentation formatter for detailed output,
# unless a formatter has already been configured
# (e.g. via a command-line flag).
config.default_formatter = "doc"
end
# Print the 10 slowest examples and example groups at the
# end of the spec run, to help surface which specs are running
# particularly slow.
config.profile_examples = 10
# Run specs in random order to surface order dependencies. If you find an
# order dependency and want to debug it, you can fix the order by providing
# the seed, which is printed after each run.
# --seed 1234
config.order = :random
# Seed global randomization in this process using the `--seed` CLI option.
# Setting this allows you to use `--seed` to deterministically reproduce
# test failures related to randomization by passing the same `--seed` value
# as the one that triggered the failure.
Kernel.srand config.seed
=end
end
| 1 | 18,403 | I would instead explain that random is the default, but you can switch back to defined, or another ordering scheme. | rspec-rspec-core | rb |
@@ -156,11 +156,15 @@ function isVisible(el, screenReader, recursed) {
}
// hidden from visual users
+ const elHeight = parseInt(style.getPropertyValue('height'));
if (
!screenReader &&
(isClipped(style) ||
style.getPropertyValue('opacity') === '0' ||
- (getScroll(el) && parseInt(style.getPropertyValue('height')) === 0))
+ (getScroll(el) && elHeight === 0) ||
+ (style.getPropertyValue('position') === 'absolute' &&
+ elHeight <= 1 &&
+ style.getPropertyValue('overflow') === 'hidden'))
) {
return false;
} | 1 | import getRootNode from './get-root-node';
import isOffscreen from './is-offscreen';
import findUp from './find-up';
import {
getScroll,
getNodeFromTree,
querySelectorAll,
escapeSelector
} from '../../core/utils';
const clipRegex = /rect\s*\(([0-9]+)px,?\s*([0-9]+)px,?\s*([0-9]+)px,?\s*([0-9]+)px\s*\)/;
const clipPathRegex = /(\w+)\((\d+)/;
/**
* Determines if an element is hidden with a clip or clip-path technique
* @method isClipped
* @memberof axe.commons.dom
* @private
* @param {CSSStyleDeclaration} style Computed style
* @return {Boolean}
*/
function isClipped(style) {
const matchesClip = style.getPropertyValue('clip').match(clipRegex);
const matchesClipPath = style
.getPropertyValue('clip-path')
.match(clipPathRegex);
if (matchesClip && matchesClip.length === 5) {
return (
matchesClip[3] - matchesClip[1] <= 0 &&
matchesClip[2] - matchesClip[4] <= 0
);
}
if (matchesClipPath) {
const type = matchesClipPath[1];
const value = parseInt(matchesClipPath[2], 10);
switch (type) {
case 'inset':
return value >= 50;
case 'circle':
return value === 0;
default:
}
}
return false;
}
/**
* Check `AREA` element is visible
* - validate if it is a child of `map`
* - ensure `map` is referred by `img` using the `usemap` attribute
* @param {Element} areaEl `AREA` element
* @retruns {Boolean}
*/
function isAreaVisible(el, screenReader, recursed) {
/**
* Note:
* - Verified that `map` element cannot refer to `area` elements across different document trees
* - Verified that `map` element does not get affected by altering `display` property
*/
const mapEl = findUp(el, 'map');
if (!mapEl) {
return false;
}
const mapElName = mapEl.getAttribute('name');
if (!mapElName) {
return false;
}
/**
* `map` element has to be in light DOM
*/
const mapElRootNode = getRootNode(el);
if (!mapElRootNode || mapElRootNode.nodeType !== 9) {
return false;
}
const refs = querySelectorAll(
// TODO: es-module-_tree
axe._tree,
`img[usemap="#${escapeSelector(mapElName)}"]`
);
if (!refs || !refs.length) {
return false;
}
return refs.some(({ actualNode }) =>
isVisible(actualNode, screenReader, recursed)
);
}
/**
* Determine whether an element is visible
* @method isVisible
* @memberof axe.commons.dom
* @instance
* @param {HTMLElement} el The HTMLElement
* @param {Boolean} screenReader When provided, will evaluate visibility from the perspective of a screen reader
* @param {Boolean} recursed
* @return {Boolean} The element's visibilty status
*/
function isVisible(el, screenReader, recursed) {
if (!el) {
throw new TypeError(
'Cannot determine if element is visible for non-DOM nodes'
);
}
const vNode = getNodeFromTree(el);
const cacheName = '_isVisible' + (screenReader ? 'ScreenReader' : '');
// 9 === Node.DOCUMENT
if (el.nodeType === 9) {
return true;
}
// 11 === Node.DOCUMENT_FRAGMENT_NODE
if (el.nodeType === 11) {
el = el.host; // grab the host Node
}
if (vNode && typeof vNode[cacheName] !== 'undefined') {
return vNode[cacheName];
}
const style = window.getComputedStyle(el, null);
if (style === null) {
return false;
}
const nodeName = el.nodeName.toUpperCase();
/**
* check visibility of `AREA`
* Note:
* Firefox's user-agent always sets `AREA` element to `display:none`
* hence excluding the edge case, for visibility computation
*/
if (nodeName === 'AREA') {
return isAreaVisible(el, screenReader, recursed);
}
// always hidden
if (
style.getPropertyValue('display') === 'none' ||
['STYLE', 'SCRIPT', 'NOSCRIPT', 'TEMPLATE'].includes(nodeName)
) {
return false;
}
// hidden from screen readers
if (screenReader && el.getAttribute('aria-hidden') === 'true') {
return false;
}
// hidden from visual users
if (
!screenReader &&
(isClipped(style) ||
style.getPropertyValue('opacity') === '0' ||
(getScroll(el) && parseInt(style.getPropertyValue('height')) === 0))
) {
return false;
}
// visibility is only accurate on the first element and
// position does not matter if it was already calculated
if (
!recursed &&
(style.getPropertyValue('visibility') === 'hidden' ||
(!screenReader && isOffscreen(el)))
) {
return false;
}
const parent = el.assignedSlot ? el.assignedSlot : el.parentNode;
let visible = false;
if (parent) {
visible = isVisible(parent, screenReader, true);
}
if (vNode) {
vNode[cacheName] = visible;
}
return visible;
}
export default isVisible;
| 1 | 16,618 | Is there a reason you left this at 0? | dequelabs-axe-core | js |
@@ -267,6 +267,11 @@ public final class RememberMeConfigurer<H extends HttpSecurityBuilder<H>>
validateInput();
String key = getKey();
RememberMeServices rememberMeServices = getRememberMeServices(http, key);
+ if (key == null) {
+ if (rememberMeServices instanceof AbstractRememberMeServices) {
+ key = ((AbstractRememberMeServices) rememberMeServices).getKey();
+ }
+ }
http.setSharedObject(RememberMeServices.class, rememberMeServices);
LogoutConfigurer<H> logoutConfigurer = http.getConfigurer(LogoutConfigurer.class);
if (logoutConfigurer != null && this.logoutHandler != null) { | 1 | /*
* Copyright 2002-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.config.annotation.web.configurers;
import java.util.UUID;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.RememberMeAuthenticationProvider;
import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
import org.springframework.security.config.annotation.web.HttpSecurityBuilder;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.web.authentication.AuthenticationSuccessHandler;
import org.springframework.security.web.authentication.RememberMeServices;
import org.springframework.security.web.authentication.logout.LogoutHandler;
import org.springframework.security.web.authentication.rememberme.AbstractRememberMeServices;
import org.springframework.security.web.authentication.rememberme.PersistentTokenBasedRememberMeServices;
import org.springframework.security.web.authentication.rememberme.PersistentTokenRepository;
import org.springframework.security.web.authentication.rememberme.RememberMeAuthenticationFilter;
import org.springframework.security.web.authentication.rememberme.TokenBasedRememberMeServices;
import org.springframework.security.web.authentication.ui.DefaultLoginPageGeneratingFilter;
/**
* Configures Remember Me authentication. This typically involves the user checking a box
* when they enter their username and password that states to "Remember Me".
*
* <h2>Security Filters</h2>
*
* The following Filters are populated
*
* <ul>
* <li>{@link RememberMeAuthenticationFilter}</li>
* </ul>
*
* <h2>Shared Objects Created</h2>
*
* The following shared objects are populated
*
* <ul>
* <li>
* {@link HttpSecurity#authenticationProvider(org.springframework.security.authentication.AuthenticationProvider)}
* is populated with a {@link RememberMeAuthenticationProvider}</li>
* <li>{@link RememberMeServices} is populated as a shared object and available on
* {@link HttpSecurity#getSharedObject(Class)}</li>
* <li>{@link LogoutConfigurer#addLogoutHandler(LogoutHandler)} is used to add a logout
* handler to clean up the remember me authentication.</li>
* </ul>
*
* <h2>Shared Objects Used</h2>
*
* The following shared objects are used:
*
* <ul>
* <li>{@link AuthenticationManager}</li>
* <li>{@link UserDetailsService} if no {@link #userDetailsService(UserDetailsService)}
* was specified.</li>
* <li>{@link DefaultLoginPageGeneratingFilter} - if present will be populated with
* information from the configuration</li>
* </ul>
*
* @author Rob Winch
* @author Eddú Meléndez
* @since 3.2
*/
public final class RememberMeConfigurer<H extends HttpSecurityBuilder<H>>
extends AbstractHttpConfigurer<RememberMeConfigurer<H>, H> {
/**
* The default name for remember me parameter name and remember me cookie name
*/
private static final String DEFAULT_REMEMBER_ME_NAME = "remember-me";
private AuthenticationSuccessHandler authenticationSuccessHandler;
private String key;
private RememberMeServices rememberMeServices;
private LogoutHandler logoutHandler;
private String rememberMeParameter = DEFAULT_REMEMBER_ME_NAME;
private String rememberMeCookieName = DEFAULT_REMEMBER_ME_NAME;
private String rememberMeCookieDomain;
private PersistentTokenRepository tokenRepository;
private UserDetailsService userDetailsService;
private Integer tokenValiditySeconds;
private Boolean useSecureCookie;
private Boolean alwaysRemember;
/**
* Creates a new instance
*/
public RememberMeConfigurer() {
}
/**
* Allows specifying how long (in seconds) a token is valid for
*
* @param tokenValiditySeconds
* @return {@link RememberMeConfigurer} for further customization
* @see AbstractRememberMeServices#setTokenValiditySeconds(int)
*/
public RememberMeConfigurer<H> tokenValiditySeconds(int tokenValiditySeconds) {
this.tokenValiditySeconds = tokenValiditySeconds;
return this;
}
/**
* Whether the cookie should be flagged as secure or not. Secure cookies can only be
* sent over an HTTPS connection and thus cannot be accidentally submitted over HTTP
* where they could be intercepted.
* <p>
* By default the cookie will be secure if the request is secure. If you only want to
* use remember-me over HTTPS (recommended) you should set this property to
* {@code true}.
*
* @param useSecureCookie set to {@code true} to always user secure cookies,
* {@code false} to disable their use.
* @return the {@link RememberMeConfigurer} for further customization
* @see AbstractRememberMeServices#setUseSecureCookie(boolean)
*/
public RememberMeConfigurer<H> useSecureCookie(boolean useSecureCookie) {
this.useSecureCookie = useSecureCookie;
return this;
}
/**
* Specifies the {@link UserDetailsService} used to look up the {@link UserDetails}
* when a remember me token is valid. The default is to use the
* {@link UserDetailsService} found by invoking
* {@link HttpSecurity#getSharedObject(Class)} which is set when using
* {@link WebSecurityConfigurerAdapter#configure(AuthenticationManagerBuilder)}.
* Alternatively, one can populate {@link #rememberMeServices(RememberMeServices)}.
*
* @param userDetailsService the {@link UserDetailsService} to configure
* @return the {@link RememberMeConfigurer} for further customization
* @see AbstractRememberMeServices
*/
public RememberMeConfigurer<H> userDetailsService(
UserDetailsService userDetailsService) {
this.userDetailsService = userDetailsService;
return this;
}
/**
* Specifies the {@link PersistentTokenRepository} to use. The default is to use
* {@link TokenBasedRememberMeServices} instead.
*
* @param tokenRepository the {@link PersistentTokenRepository} to use
* @return the {@link RememberMeConfigurer} for further customization
*/
public RememberMeConfigurer<H> tokenRepository(
PersistentTokenRepository tokenRepository) {
this.tokenRepository = tokenRepository;
return this;
}
/**
* Sets the key to identify tokens created for remember me authentication. Default is
* a secure randomly generated key.
*
* @param key the key to identify tokens created for remember me authentication
* @return the {@link RememberMeConfigurer} for further customization
*/
public RememberMeConfigurer<H> key(String key) {
this.key = key;
return this;
}
/**
* The HTTP parameter used to indicate to remember the user at time of login.
*
* @param rememberMeParameter the HTTP parameter used to indicate to remember the user
* @return the {@link RememberMeConfigurer} for further customization
*/
public RememberMeConfigurer<H> rememberMeParameter(String rememberMeParameter) {
this.rememberMeParameter = rememberMeParameter;
return this;
}
/**
* The name of cookie which store the token for remember me authentication. Defaults
* to 'remember-me'.
*
* @param rememberMeCookieName the name of cookie which store the token for remember
* me authentication
* @return the {@link RememberMeConfigurer} for further customization
* @since 4.0.1
*/
public RememberMeConfigurer<H> rememberMeCookieName(String rememberMeCookieName) {
this.rememberMeCookieName = rememberMeCookieName;
return this;
}
/**
* The domain name within which the remember me cookie is visible.
*
* @param rememberMeCookieDomain the domain name within which the remember me cookie
* is visible.
* @return the {@link RememberMeConfigurer} for further customization
* @since 4.1.0
*/
public RememberMeConfigurer<H> rememberMeCookieDomain(String rememberMeCookieDomain) {
this.rememberMeCookieDomain = rememberMeCookieDomain;
return this;
}
/**
* Allows control over the destination a remembered user is sent to when they are
* successfully authenticated. By default, the filter will just allow the current
* request to proceed, but if an {@code AuthenticationSuccessHandler} is set, it will
* be invoked and the {@code doFilter()} method will return immediately, thus allowing
* the application to redirect the user to a specific URL, regardless of what the
* original request was for.
*
* @param authenticationSuccessHandler the strategy to invoke immediately before
* returning from {@code doFilter()}.
* @return {@link RememberMeConfigurer} for further customization
* @see RememberMeAuthenticationFilter#setAuthenticationSuccessHandler(AuthenticationSuccessHandler)
*/
public RememberMeConfigurer<H> authenticationSuccessHandler(
AuthenticationSuccessHandler authenticationSuccessHandler) {
this.authenticationSuccessHandler = authenticationSuccessHandler;
return this;
}
/**
* Specify the {@link RememberMeServices} to use.
* @param rememberMeServices the {@link RememberMeServices} to use
* @return the {@link RememberMeConfigurer} for further customizations
* @see RememberMeServices
*/
public RememberMeConfigurer<H> rememberMeServices(
RememberMeServices rememberMeServices) {
this.rememberMeServices = rememberMeServices;
return this;
}
/**
* Whether the cookie should always be created even if the remember-me parameter is
* not set.
* <p>
* By default this will be set to {@code false}.
*
* @param alwaysRemember set to {@code true} to always trigger remember me,
* {@code false} to use the remember-me parameter.
* @return the {@link RememberMeConfigurer} for further customization
* @see AbstractRememberMeServices#setAlwaysRemember(boolean)
*/
public RememberMeConfigurer<H> alwaysRemember(boolean alwaysRemember) {
this.alwaysRemember = alwaysRemember;
return this;
}
@SuppressWarnings("unchecked")
@Override
public void init(H http) throws Exception {
validateInput();
String key = getKey();
RememberMeServices rememberMeServices = getRememberMeServices(http, key);
http.setSharedObject(RememberMeServices.class, rememberMeServices);
LogoutConfigurer<H> logoutConfigurer = http.getConfigurer(LogoutConfigurer.class);
if (logoutConfigurer != null && this.logoutHandler != null) {
logoutConfigurer.addLogoutHandler(this.logoutHandler);
}
RememberMeAuthenticationProvider authenticationProvider = new RememberMeAuthenticationProvider(
key);
authenticationProvider = postProcess(authenticationProvider);
http.authenticationProvider(authenticationProvider);
initDefaultLoginFilter(http);
}
@Override
public void configure(H http) throws Exception {
RememberMeAuthenticationFilter rememberMeFilter = new RememberMeAuthenticationFilter(
http.getSharedObject(AuthenticationManager.class),
this.rememberMeServices);
if (this.authenticationSuccessHandler != null) {
rememberMeFilter
.setAuthenticationSuccessHandler(this.authenticationSuccessHandler);
}
rememberMeFilter = postProcess(rememberMeFilter);
http.addFilter(rememberMeFilter);
}
/**
* Validate rememberMeServices and rememberMeCookieName have not been set at
* the same time.
*/
private void validateInput() {
if (this.rememberMeServices != null && this.rememberMeCookieName != DEFAULT_REMEMBER_ME_NAME) {
throw new IllegalArgumentException("Can not set rememberMeCookieName " +
"and custom rememberMeServices.");
}
}
/**
* Returns the HTTP parameter used to indicate to remember the user at time of login.
* @return the HTTP parameter used to indicate to remember the user
*/
private String getRememberMeParameter() {
return this.rememberMeParameter;
}
/**
* If available, initializes the {@link DefaultLoginPageGeneratingFilter} shared
* object.
*
* @param http the {@link HttpSecurityBuilder} to use
*/
private void initDefaultLoginFilter(H http) {
DefaultLoginPageGeneratingFilter loginPageGeneratingFilter = http
.getSharedObject(DefaultLoginPageGeneratingFilter.class);
if (loginPageGeneratingFilter != null) {
loginPageGeneratingFilter.setRememberMeParameter(getRememberMeParameter());
}
}
/**
* Gets the {@link RememberMeServices} or creates the {@link RememberMeServices}.
* @param http the {@link HttpSecurity} to lookup shared objects
* @param key the {@link #key(String)}
* @return the {@link RememberMeServices} to use
* @throws Exception
*/
private RememberMeServices getRememberMeServices(H http, String key)
throws Exception {
if (this.rememberMeServices != null) {
if (this.rememberMeServices instanceof LogoutHandler
&& this.logoutHandler == null) {
this.logoutHandler = (LogoutHandler) this.rememberMeServices;
}
return this.rememberMeServices;
}
AbstractRememberMeServices tokenRememberMeServices = createRememberMeServices(
http, key);
tokenRememberMeServices.setParameter(this.rememberMeParameter);
tokenRememberMeServices.setCookieName(this.rememberMeCookieName);
if (this.rememberMeCookieDomain != null) {
tokenRememberMeServices.setCookieDomain(this.rememberMeCookieDomain);
}
if (this.tokenValiditySeconds != null) {
tokenRememberMeServices.setTokenValiditySeconds(this.tokenValiditySeconds);
}
if (this.useSecureCookie != null) {
tokenRememberMeServices.setUseSecureCookie(this.useSecureCookie);
}
if (this.alwaysRemember != null) {
tokenRememberMeServices.setAlwaysRemember(this.alwaysRemember);
}
tokenRememberMeServices.afterPropertiesSet();
this.logoutHandler = tokenRememberMeServices;
this.rememberMeServices = tokenRememberMeServices;
return tokenRememberMeServices;
}
/**
* Creates the {@link RememberMeServices} to use when none is provided. The result is
* either {@link PersistentTokenRepository} (if a {@link PersistentTokenRepository} is
* specified, else {@link TokenBasedRememberMeServices}.
*
* @param http the {@link HttpSecurity} to lookup shared objects
* @param key the {@link #key(String)}
* @return the {@link RememberMeServices} to use
* @throws Exception
*/
private AbstractRememberMeServices createRememberMeServices(H http, String key)
throws Exception {
return this.tokenRepository == null
? createTokenBasedRememberMeServices(http, key)
: createPersistentRememberMeServices(http, key);
}
/**
* Creates {@link TokenBasedRememberMeServices}
*
* @param http the {@link HttpSecurity} to lookup shared objects
* @param key the {@link #key(String)}
* @return the {@link TokenBasedRememberMeServices}
*/
private AbstractRememberMeServices createTokenBasedRememberMeServices(H http,
String key) {
UserDetailsService userDetailsService = getUserDetailsService(http);
return new TokenBasedRememberMeServices(key, userDetailsService);
}
/**
* Creates {@link PersistentTokenBasedRememberMeServices}
*
* @param http the {@link HttpSecurity} to lookup shared objects
* @param key the {@link #key(String)}
* @return the {@link PersistentTokenBasedRememberMeServices}
*/
private AbstractRememberMeServices createPersistentRememberMeServices(H http,
String key) {
UserDetailsService userDetailsService = getUserDetailsService(http);
return new PersistentTokenBasedRememberMeServices(key, userDetailsService,
this.tokenRepository);
}
/**
* Gets the {@link UserDetailsService} to use. Either the explicitly configure
* {@link UserDetailsService} from {@link #userDetailsService(UserDetailsService)} or
* a shared object from {@link HttpSecurity#getSharedObject(Class)}.
*
* @param http {@link HttpSecurity} to get the shared {@link UserDetailsService}
* @return the {@link UserDetailsService} to use
*/
private UserDetailsService getUserDetailsService(H http) {
if (this.userDetailsService == null) {
this.userDetailsService = http.getSharedObject(UserDetailsService.class);
}
if (this.userDetailsService == null) {
throw new IllegalStateException("userDetailsService cannot be null. Invoke "
+ RememberMeConfigurer.class.getSimpleName()
+ "#userDetailsService(UserDetailsService) or see its javadoc for alternative approaches.");
}
return this.userDetailsService;
}
/**
* Gets the key to use for validating remember me tokens. Either the value passed into
* {@link #key(String)}, or a secure random string if none was specified.
*
* @return the remember me key to use
*/
private String getKey() {
if (this.key == null) {
this.key = UUID.randomUUID().toString();
}
return this.key;
}
} | 1 | 10,625 | The value of `key` will never be `null` at this point, because `getKey()` will generate a value if it is `null`. | spring-projects-spring-security | java |
@@ -88,11 +88,18 @@ namespace Datadog.Trace.ClrProfiler
return null;
}
+ var commandType = command.GetType();
+ if (tracer.Settings.AdoNetExcludedTypes.Contains(commandType.FullName))
+ {
+ // AdoNet type disabled, don't create a scope, skip this trace
+ return null;
+ }
+
Scope scope = null;
try
{
- string dbType = GetDbType(command.GetType().Name);
+ string dbType = GetDbType(commandType.Name);
if (dbType == null)
{ | 1 | using System;
using System.Data;
using Datadog.Trace.ClrProfiler.Integrations.AdoNet;
using Datadog.Trace.Configuration;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Logging;
using Datadog.Trace.Tagging;
using Datadog.Trace.Util;
namespace Datadog.Trace.ClrProfiler
{
/// <summary>
/// Convenience class that creates scopes and populates them with some standard details.
/// </summary>
internal static class ScopeFactory
{
public const string OperationName = "http.request";
public const string ServiceName = "http-client";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(ScopeFactory));
/// <summary>
/// Creates a scope for outbound http requests and populates some common details.
/// </summary>
/// <param name="tracer">The tracer instance to use to create the new scope.</param>
/// <param name="httpMethod">The HTTP method used by the request.</param>
/// <param name="requestUri">The URI requested by the request.</param>
/// <param name="integrationId">The id of the integration creating this scope.</param>
/// <param name="tags">The tags associated to the scope</param>
/// <returns>A new pre-populated scope.</returns>
public static Scope CreateOutboundHttpScope(Tracer tracer, string httpMethod, Uri requestUri, IntegrationInfo integrationId, out HttpTags tags)
{
tags = null;
if (!tracer.Settings.IsIntegrationEnabled(integrationId))
{
// integration disabled, don't create a scope, skip this trace
return null;
}
Scope scope = null;
try
{
Span parent = tracer.ActiveScope?.Span;
if (parent != null &&
parent.Type == SpanTypes.Http &&
parent.GetTag(Tags.InstrumentationName) != null)
{
// we are already instrumenting this,
// don't instrument nested methods that belong to the same stacktrace
// e.g. HttpClientHandler.SendAsync() -> SocketsHttpHandler.SendAsync()
return null;
}
string resourceUrl = requestUri != null ? UriHelpers.CleanUri(requestUri, removeScheme: true, tryRemoveIds: true) : null;
string httpUrl = requestUri != null ? UriHelpers.CleanUri(requestUri, removeScheme: false, tryRemoveIds: false) : null;
tags = new HttpTags();
scope = tracer.StartActiveWithTags(OperationName, tags: tags, serviceName: $"{tracer.DefaultServiceName}-{ServiceName}");
var span = scope.Span;
span.Type = SpanTypes.Http;
span.ResourceName = $"{httpMethod} {resourceUrl}";
tags.HttpMethod = httpMethod?.ToUpperInvariant();
tags.HttpUrl = httpUrl;
tags.InstrumentationName = IntegrationRegistry.GetName(integrationId);
tags.SetAnalyticsSampleRate(integrationId, tracer.Settings, enabledWithGlobalSetting: false);
}
catch (Exception ex)
{
Log.Error(ex, "Error creating or populating scope.");
}
// always returns the scope, even if it's null because we couldn't create it,
// or we couldn't populate it completely (some tags is better than no tags)
return scope;
}
public static Scope CreateDbCommandScope(Tracer tracer, IDbCommand command)
{
if (!tracer.Settings.IsIntegrationEnabled(AdoNetConstants.IntegrationId))
{
// integration disabled, don't create a scope, skip this trace
return null;
}
Scope scope = null;
try
{
string dbType = GetDbType(command.GetType().Name);
if (dbType == null)
{
// don't create a scope, skip this trace
return null;
}
Span parent = tracer.ActiveScope?.Span;
if (parent != null &&
parent.Type == SpanTypes.Sql &&
parent.GetTag(Tags.DbType) == dbType &&
parent.ResourceName == command.CommandText)
{
// we are already instrumenting this,
// don't instrument nested methods that belong to the same stacktrace
// e.g. ExecuteReader() -> ExecuteReader(commandBehavior)
return null;
}
string serviceName = $"{tracer.DefaultServiceName}-{dbType}";
string operationName = $"{dbType}.query";
var tags = new SqlTags();
scope = tracer.StartActiveWithTags(operationName, tags: tags, serviceName: serviceName);
var span = scope.Span;
tags.DbType = dbType;
span.AddTagsFromDbCommand(command);
tags.SetAnalyticsSampleRate(AdoNetConstants.IntegrationId, tracer.Settings, enabledWithGlobalSetting: false);
}
catch (Exception ex)
{
Log.Error(ex, "Error creating or populating scope.");
}
return scope;
}
public static string GetDbType(string commandTypeName)
{
switch (commandTypeName)
{
case "SqlCommand":
return "sql-server";
case "NpgsqlCommand":
return "postgres";
case "MySqlCommand":
return "mysql";
case "OracleCommand":
return "oracle";
case "InterceptableDbCommand":
case "ProfiledDbCommand":
// don't create spans for these
return null;
default:
const string commandSuffix = "Command";
// remove "Command" suffix if present
return commandTypeName.EndsWith(commandSuffix)
? commandTypeName.Substring(0, commandTypeName.Length - commandSuffix.Length).ToLowerInvariant()
: commandTypeName.ToLowerInvariant();
}
}
}
}
| 1 | 18,543 | NIT: You should first check if AdoNetExcludedTypes.Count is greater than 0 before calling Contains (fast-path) | DataDog-dd-trace-dotnet | .cs |
@@ -18,6 +18,9 @@ package v1alpha1
import (
ndm "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1"
+
+ "strings"
+
errors "github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
ndm "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1"
errors "github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// bdTagKey defines the label selector key
// used for grouping block devices using a tag.
bdTagKey = "openebs.io/block-device-tag"
)
// Builder is the builder object for BlockDeviceClaim
type Builder struct {
BDC *BlockDeviceClaim
errs []error
}
// NewBuilder returns an empty instance of the Builder object
func NewBuilder() *Builder {
return &Builder{
BDC: &BlockDeviceClaim{&ndm.BlockDeviceClaim{}, ""},
errs: []error{},
}
}
// BuilderForObject returns an instance of the Builder object based on block
// device object
func BuilderForObject(BlockDeviceClaim *BlockDeviceClaim) *Builder {
return &Builder{
BDC: BlockDeviceClaim,
errs: []error{},
}
}
// BuilderForAPIObject returns an instance of the Builder object based on block
// device claim api object.
func BuilderForAPIObject(bdc *ndm.BlockDeviceClaim) *Builder {
return &Builder{
BDC: &BlockDeviceClaim{bdc, ""},
errs: []error{},
}
}
// WithConfigPath sets the path for k8s config
func (b *Builder) WithConfigPath(configpath string) *Builder {
b.BDC.configPath = configpath
return b
}
// Build returns the BlockDeviceClaim instance
func (b *Builder) Build() (*BlockDeviceClaim, error) {
if len(b.errs) > 0 {
return nil, errors.Errorf("%+v", b.errs)
}
return b.BDC, nil
}
// WithName sets the Name field of BDC with provided value.
func (b *Builder) WithName(name string) *Builder {
if len(name) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing BDC name"),
)
return b
}
b.BDC.Object.Name = name
return b
}
// WithNamespace sets the Namespace field of BDC provided arguments
func (b *Builder) WithNamespace(namespace string) *Builder {
if len(namespace) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing BDC namespace"),
)
return b
}
b.BDC.Object.Namespace = namespace
return b
}
// WithAnnotationsNew sets the Annotations field of BDC with provided arguments
func (b *Builder) WithAnnotationsNew(annotations map[string]string) *Builder {
if len(annotations) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing annotations"),
)
return b
}
b.BDC.Object.Annotations = make(map[string]string)
for key, value := range annotations {
b.BDC.Object.Annotations[key] = value
}
return b
}
// WithAnnotations appends or overwrites existing Annotations
// values of BDC with provided arguments
func (b *Builder) WithAnnotations(annotations map[string]string) *Builder {
if len(annotations) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing annotations"),
)
return b
}
if b.BDC.Object.Annotations == nil {
return b.WithAnnotationsNew(annotations)
}
for key, value := range annotations {
b.BDC.Object.Annotations[key] = value
}
return b
}
// WithLabelsNew sets the Labels field of BDC with provided arguments
func (b *Builder) WithLabelsNew(labels map[string]string) *Builder {
if len(labels) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing labels"),
)
return b
}
b.BDC.Object.Labels = make(map[string]string)
for key, value := range labels {
b.BDC.Object.Labels[key] = value
}
return b
}
// WithLabels appends or overwrites existing Labels
// values of BDC with provided arguments
func (b *Builder) WithLabels(labels map[string]string) *Builder {
if len(labels) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing labels"),
)
return b
}
if b.BDC.Object.Labels == nil {
return b.WithLabelsNew(labels)
}
for key, value := range labels {
b.BDC.Object.Labels[key] = value
}
return b
}
// WithBlockDeviceName sets the BlockDeviceName field of BDC provided arguments
func (b *Builder) WithBlockDeviceName(bdName string) *Builder {
if len(bdName) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing BlockDevice name"),
)
return b
}
b.BDC.Object.Spec.BlockDeviceName = bdName
return b
}
// WithDeviceType sets the DeviceType field of BDC provided arguments
func (b *Builder) WithDeviceType(dType string) *Builder {
if len(dType) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing device type"),
)
return b
}
b.BDC.Object.Spec.DeviceType = dType
return b
}
// WithHostName sets the hostName field of BDC provided arguments
func (b *Builder) WithHostName(hName string) *Builder {
if len(hName) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing host name"),
)
return b
}
b.BDC.Object.Spec.BlockDeviceNodeAttributes.HostName = hName
return b
}
// WithNodeName sets the node name field of BDC provided arguments
func (b *Builder) WithNodeName(nName string) *Builder {
if len(nName) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing node name"),
)
return b
}
b.BDC.Object.Spec.BlockDeviceNodeAttributes.NodeName = nName
return b
}
// WithCapacity sets the Capacity field in BDC with provided arguments
func (b *Builder) WithCapacity(capacity string) *Builder {
resCapacity, err := resource.ParseQuantity(capacity)
if err != nil {
b.errs = append(
b.errs,
errors.Wrapf(
err, "failed to build BDC object: failed to parse capacity {%s}",
capacity,
),
)
return b
}
resourceList := corev1.ResourceList{
corev1.ResourceName(ndm.ResourceStorage): resCapacity,
}
b.BDC.Object.Spec.Resources.Requests = resourceList
return b
}
// WithOwnerReference sets the OwnerReference field in BDC with required
//fields
func (b *Builder) WithOwnerReference(owner metav1.OwnerReference) *Builder {
b.BDC.Object.OwnerReferences = append(b.BDC.Object.OwnerReferences, owner)
return b
}
// WithFinalizer sets the finalizer field in the BDC
func (b *Builder) WithFinalizer(finalizers ...string) *Builder {
if len(finalizers) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing finalizer"),
)
return b
}
b.BDC.Object.Finalizers = append(b.BDC.Object.Finalizers, finalizers...)
return b
}
// WithBlockVolumeMode sets the volumeMode as volumeModeBlock,
// if persistentVolumeMode is set to "Block"
func (b *Builder) WithBlockVolumeMode(mode corev1.PersistentVolumeMode) *Builder {
if len(mode) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing PersistentVolumeMode"),
)
}
if mode == corev1.PersistentVolumeBlock {
b.BDC.Object.Spec.Details.BlockVolumeMode = ndm.VolumeModeBlock
}
return b
}
// WithBlockDeviceTag appends (or creates) the BDC Label Selector
// by setting the provided value to the fixed key
// openebs.io/block-device-tag
// This will enable the NDM to pick only devices that
// match the node (hostname) and block device tag value.
func (b *Builder) WithBlockDeviceTag(bdTagValue string) *Builder {
if len(bdTagValue) == 0 {
b.errs = append(
b.errs,
errors.New("failed to build BDC object: missing block device tag value"),
)
return b
}
if b.BDC.Object.Spec.Selector == nil {
b.BDC.Object.Spec.Selector = &metav1.LabelSelector{}
}
if b.BDC.Object.Spec.Selector.MatchLabels == nil {
b.BDC.Object.Spec.Selector.MatchLabels = map[string]string{}
}
b.BDC.Object.Spec.Selector.MatchLabels[bdTagKey] = bdTagValue
return b
}
| 1 | 18,628 | should rearrange so as to follow import ordering. | openebs-maya | go |
@@ -65,7 +65,7 @@ func (s *validateOperationWorkflowModeStateSuite) TestCreateMode_UpdateCurrent()
stateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false,
- enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true,
+ enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package persistence
import (
"fmt"
"testing"
"github.com/stretchr/testify/suite"
enumsspb "go.temporal.io/server/api/enums/v1"
persistencespb "go.temporal.io/server/api/persistence/v1"
)
type (
validateOperationWorkflowModeStateSuite struct {
suite.Suite
}
)
func TestValidateOperationWorkflowModeStateSuite(t *testing.T) {
s := new(validateOperationWorkflowModeStateSuite)
suite.Run(t, s)
}
func (s *validateOperationWorkflowModeStateSuite) SetupSuite() {
}
func (s *validateOperationWorkflowModeStateSuite) TearDownSuite() {
}
func (s *validateOperationWorkflowModeStateSuite) SetupTest() {
}
func (s *validateOperationWorkflowModeStateSuite) TearDownTest() {
}
func (s *validateOperationWorkflowModeStateSuite) TestCreateMode_UpdateCurrent() {
stateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
creatModes := []CreateWorkflowMode{
CreateWorkflowModeBrandNew,
CreateWorkflowModeWorkflowIDReuse,
}
for state, expectError := range stateToError {
testSnapshot := s.newTestWorkflowSnapshot(state)
for _, createMode := range creatModes {
err := ValidateCreateWorkflowModeState(createMode, testSnapshot)
if !expectError {
s.NoError(err, err)
} else {
s.Error(err, err)
}
}
}
}
func (s *validateOperationWorkflowModeStateSuite) TestCreateMode_BypassCurrent() {
stateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
for state, expectError := range stateToError {
testSnapshot := s.newTestWorkflowSnapshot(state)
err := ValidateCreateWorkflowModeState(CreateWorkflowModeZombie, testSnapshot)
if !expectError {
s.NoError(err, err)
} else {
s.Error(err, err)
}
}
}
func (s *validateOperationWorkflowModeStateSuite) TestUpdateMode_UpdateCurrent() {
// only current workflow
stateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
for state, expectError := range stateToError {
testCurrentMutation := s.newTestWorkflowMutation(state)
err := ValidateUpdateWorkflowModeState(
UpdateWorkflowModeUpdateCurrent,
testCurrentMutation,
nil,
)
if !expectError {
s.NoError(err, err)
} else {
s.Error(err, err)
}
}
// current workflow & new workflow
currentStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
newStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
for currentState, currentExpectError := range currentStateToError {
for newState, newExpectError := range newStateToError {
testCurrentMutation := s.newTestWorkflowMutation(currentState)
testNewSnapshot := s.newTestWorkflowSnapshot(newState)
err := ValidateUpdateWorkflowModeState(
UpdateWorkflowModeUpdateCurrent,
testCurrentMutation,
&testNewSnapshot,
)
if currentExpectError || newExpectError {
s.Error(err, err)
} else {
s.NoError(err, err)
}
}
}
}
func (s *validateOperationWorkflowModeStateSuite) TestUpdateMode_BypassCurrent() {
// only current workflow
stateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
for state, expectError := range stateToError {
testMutation := s.newTestWorkflowMutation(state)
err := ValidateUpdateWorkflowModeState(
UpdateWorkflowModeBypassCurrent,
testMutation,
nil,
)
if !expectError {
s.NoError(err, err)
} else {
s.Error(err, err)
}
}
// current workflow & new workflow
currentStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
newStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
for currentState, currentExpectError := range currentStateToError {
for newState, newExpectError := range newStateToError {
testCurrentMutation := s.newTestWorkflowMutation(currentState)
testNewSnapshot := s.newTestWorkflowSnapshot(newState)
err := ValidateUpdateWorkflowModeState(
UpdateWorkflowModeBypassCurrent,
testCurrentMutation,
&testNewSnapshot,
)
if currentExpectError || newExpectError {
s.Error(err, err)
} else {
s.NoError(err, err)
}
}
}
}
func (s *validateOperationWorkflowModeStateSuite) TestConflictResolveMode_UpdateCurrent() {
// only reset workflow
stateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
for state, expectError := range stateToError {
testSnapshot := s.newTestWorkflowSnapshot(state)
err := ValidateConflictResolveWorkflowModeState(
ConflictResolveWorkflowModeUpdateCurrent,
testSnapshot,
nil,
nil,
)
if !expectError {
s.NoError(err, err)
} else {
s.Error(err, err)
}
}
// reset workflow & new workflow
resetStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
newStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
for resetState, resetExpectError := range resetStateToError {
for newState, newExpectError := range newStateToError {
testResetSnapshot := s.newTestWorkflowSnapshot(resetState)
testNewSnapshot := s.newTestWorkflowSnapshot(newState)
err := ValidateConflictResolveWorkflowModeState(
ConflictResolveWorkflowModeUpdateCurrent,
testResetSnapshot,
&testNewSnapshot,
nil,
)
if resetExpectError || newExpectError {
s.Error(err, err)
} else {
s.NoError(err, err)
}
}
}
// reset workflow & current workflow
resetStateToError = map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
currentStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
for resetState, resetExpectError := range resetStateToError {
for currentState, currentExpectError := range currentStateToError {
testResetSnapshot := s.newTestWorkflowSnapshot(resetState)
testCurrentSnapshot := s.newTestWorkflowMutation(currentState)
err := ValidateConflictResolveWorkflowModeState(
ConflictResolveWorkflowModeUpdateCurrent,
testResetSnapshot,
nil,
&testCurrentSnapshot,
)
if resetExpectError || currentExpectError {
s.Error(err, err)
} else {
s.NoError(err, err)
}
}
}
// reset workflow & new workflow & current workflow
resetStateToError = map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
newStateToError = map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
currentStateToError = map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
for resetState, resetExpectError := range resetStateToError {
for newState, newExpectError := range newStateToError {
for currentState, currentExpectError := range currentStateToError {
testResetSnapshot := s.newTestWorkflowSnapshot(resetState)
testNewSnapshot := s.newTestWorkflowSnapshot(newState)
testCurrentSnapshot := s.newTestWorkflowMutation(currentState)
err := ValidateConflictResolveWorkflowModeState(
ConflictResolveWorkflowModeUpdateCurrent,
testResetSnapshot,
&testNewSnapshot,
&testCurrentSnapshot,
)
if resetExpectError || newExpectError || currentExpectError {
s.Error(err, err)
} else {
s.NoError(err, err)
}
}
}
}
}
func (s *validateOperationWorkflowModeStateSuite) TestConflictResolveMode_BypassCurrent() {
// only reset workflow
stateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
for state, expectError := range stateToError {
testSnapshot := s.newTestWorkflowSnapshot(state)
err := ValidateConflictResolveWorkflowModeState(
ConflictResolveWorkflowModeBypassCurrent,
testSnapshot,
nil,
nil,
)
if !expectError {
s.NoError(err, err)
} else {
s.Error(err, err)
}
}
// reset workflow & new workflow
resetStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true,
}
newStateToError := map[enumsspb.WorkflowExecutionState]bool{
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true,
enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false,
}
for resetState, resetExpectError := range resetStateToError {
for newState, newExpectError := range newStateToError {
testResetSnapshot := s.newTestWorkflowSnapshot(resetState)
testNewSnapshot := s.newTestWorkflowSnapshot(newState)
err := ValidateConflictResolveWorkflowModeState(
ConflictResolveWorkflowModeBypassCurrent,
testResetSnapshot,
&testNewSnapshot,
nil,
)
if resetExpectError || newExpectError {
if err == nil {
fmt.Print("##")
}
s.Error(err, err)
} else {
s.NoError(err, err)
}
}
}
}
func (s *validateOperationWorkflowModeStateSuite) newTestWorkflowSnapshot(
state enumsspb.WorkflowExecutionState,
) WorkflowSnapshot {
return WorkflowSnapshot{
ExecutionInfo: &persistencespb.WorkflowExecutionInfo{},
ExecutionState: &persistencespb.WorkflowExecutionState{State: state},
}
}
func (s *validateOperationWorkflowModeStateSuite) newTestWorkflowMutation(
state enumsspb.WorkflowExecutionState,
) WorkflowMutation {
return WorkflowMutation{
ExecutionInfo: &persistencespb.WorkflowExecutionInfo{},
ExecutionState: &persistencespb.WorkflowExecutionState{State: state},
}
}
| 1 | 13,516 | what is the use case for create workflow with completed state? | temporalio-temporal | go |
@@ -0,0 +1,5 @@
+package org.openqa.selenium.grid.distributor.remote;
+
+public class RemoteDistributorTest {
+
+} | 1 | 1 | 16,857 | Probably best not to have an empty test.... | SeleniumHQ-selenium | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.