max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
416 | // This file is distributed under the MIT license.
// See the LICENSE file for details.
#pragma once
#ifndef VSNRAY_CUDA_ARRAY_H
#define VSNRAY_CUDA_ARRAY_H 1
#include <cstddef>
#include <cstring> // memset
#include <cuda_runtime_api.h>
namespace visionaray
{
namespace cuda
{
//-------------------------------------------------------------------------------------------------
//
//
class array
{
public:
// width and height are always *elements*
array() = default;
array(cudaChannelFormatDesc const& desc, size_t width, size_t height = 0, unsigned flags = 0)
{
allocate(desc, width, height, flags);
}
array(cudaChannelFormatDesc const& desc, size_t width, size_t height, size_t depth, unsigned flags)
{
allocate3D(desc, width, height, depth, flags);
}
array(array&& rhs)
: array_ptr_(rhs.release())
{
}
~array()
{
reset();
}
array& operator=(array&& rhs)
{
reset( rhs.release() );
return *this;
}
// NOT copyable
array(array const& rhs) = delete;
array& operator=(array const& rhs) = delete;
cudaArray_t get() const
{
return array_ptr_;
}
cudaError_t allocate(cudaChannelFormatDesc const& desc, size_t width, size_t height = 0, unsigned flags = 0)
{
cudaFree(array_ptr_);
auto err = cudaMallocArray(
&array_ptr_,
&desc,
width,
height,
flags
);
if (err != cudaSuccess)
{
array_ptr_ = nullptr;
}
return err;
}
cudaError_t allocate3D(cudaChannelFormatDesc const& desc, size_t width, size_t height, size_t depth, unsigned int flags = 0)
{
cudaFree(array_ptr_);
cudaExtent extent { width, height, depth };
auto err = cudaMalloc3DArray(&array_ptr_, &desc, extent, flags);
if (err != cudaSuccess)
{
array_ptr_ = nullptr;
}
return err;
}
template <typename T>
cudaError_t upload(T const* host_data, size_t count)
{
return cudaMemcpyToArray(
array_ptr_,
0,
0,
host_data,
count,
cudaMemcpyHostToDevice
);
}
template <typename T>
cudaError_t upload(T const* host_data, size_t width, size_t height, size_t depth)
{
cudaMemcpy3DParms copy_params;
memset(©_params, 0, sizeof(copy_params));
copy_params.srcPtr = make_cudaPitchedPtr(
const_cast<T*>(host_data),
width * sizeof(T),
width,
height
);
copy_params.dstArray = array_ptr_;
copy_params.extent = { width, height, depth };
copy_params.kind = cudaMemcpyHostToDevice;
return cudaMemcpy3D(©_params);
}
private:
cudaArray_t array_ptr_ = nullptr;
cudaArray_t release()
{
cudaArray_t ptr = array_ptr_;
array_ptr_ = nullptr;
return ptr;
}
void reset(cudaArray_t ptr = nullptr)
{
if (array_ptr_)
{
cudaFree(array_ptr_);
}
array_ptr_ = ptr;
}
};
} // cuda
} // visionaray
#endif // VSNRAY_CUDA_ARRAY_H
| 1,658 |
4,816 | <filename>include/retdec/bin2llvmir/optimizations/main_detection/main_detection.h
/**
* @file include/retdec/bin2llvmir/optimizations/main_detection/main_detection.h
* @brief Detect main function.
* @copyright (c) 2017 Avast Software, licensed under the MIT license
*/
#ifndef RETDEC_BIN2LLVMIR_OPTIMIZATIONS_MAIN_DETECTION_MAIN_DETECTION_H
#define RETDEC_BIN2LLVMIR_OPTIMIZATIONS_MAIN_DETECTION_MAIN_DETECTION_H
#include <llvm/IR/Function.h>
#include <llvm/IR/Instructions.h>
#include <llvm/IR/Module.h>
#include <llvm/Pass.h>
#include "retdec/common/address.h"
#include "retdec/bin2llvmir/providers/config.h"
#include "retdec/bin2llvmir/providers/fileimage.h"
#include "retdec/bin2llvmir/providers/names.h"
namespace retdec {
namespace bin2llvmir {
class MainDetection : public llvm::ModulePass
{
public:
static char ID;
MainDetection();
virtual bool runOnModule(llvm::Module& M) override;
bool runOnModuleCustom(
llvm::Module& m,
Config* c,
FileImage* img = nullptr,
NameContainer* names = nullptr);
private:
bool run();
bool skipAnalysis();
void removeStaticallyLinked();
retdec::common::Address getFromFunctionNames();
retdec::common::Address getFromContext();
retdec::common::Address getFromEntryPointOffset(int offset);
retdec::common::Address getFromCrtSetCheckCount();
retdec::common::Address getFromInterlockedExchange();
bool applyResult(retdec::common::Address mainAddr);
private:
llvm::Module* _module = nullptr;
Config* _config = nullptr;
FileImage* _image = nullptr;
NameContainer* _names = nullptr;
};
} // namespace bin2llvmir
} // namespace retdec
#endif
| 607 |
1,355 | // Copyright (c) 2018 <NAME>
//
// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.
#ifndef INCLUDE_JET_DETAIL_VECTOR_EXPRESSION_INL_H
#define INCLUDE_JET_DETAIL_VECTOR_EXPRESSION_INL_H
#include <jet/vector_expression.h>
namespace jet {
// MARK: VectorExpression
template <typename T, typename E>
size_t VectorExpression<T, E>::size() const {
return static_cast<const E&>(*this).size();
}
template <typename T, typename E>
const E& VectorExpression<T, E>::operator()() const {
return static_cast<const E&>(*this);
}
// MARK: VectorUnaryOp
template <typename T, typename E, typename Op>
VectorUnaryOp<T, E, Op>::VectorUnaryOp(const E& u) : _u(u) {}
template <typename T, typename E, typename Op>
size_t VectorUnaryOp<T, E, Op>::size() const {
return _u.size();
}
template <typename T, typename E, typename Op>
T VectorUnaryOp<T, E, Op>::operator[](size_t i) const {
return _op(_u[i]);
}
// MARK: VectorBinaryOp
template <typename T, typename E1, typename E2, typename Op>
VectorBinaryOp<T, E1, E2, Op>::VectorBinaryOp(const E1& u, const E2& v)
: _u(u), _v(v) {
JET_ASSERT(u.size() == v.size());
}
template <typename T, typename E1, typename E2, typename Op>
size_t VectorBinaryOp<T, E1, E2, Op>::size() const {
return _v.size();
}
template <typename T, typename E1, typename E2, typename Op>
T VectorBinaryOp<T, E1, E2, Op>::operator[](size_t i) const {
return _op(_u[i], _v[i]);
}
template <typename T, typename E, typename Op>
VectorScalarBinaryOp<T, E, Op>::VectorScalarBinaryOp(const E& u, const T& v)
: _u(u), _v(v) {}
template <typename T, typename E, typename Op>
size_t VectorScalarBinaryOp<T, E, Op>::size() const {
return _u.size();
}
template <typename T, typename E, typename Op>
T VectorScalarBinaryOp<T, E, Op>::operator[](size_t i) const {
return _op(_u[i], _v);
}
// MARK: Global Functions
template <typename T, typename E>
VectorScalarAdd<T, E> operator+(const T& a, const VectorExpression<T, E>& b) {
return VectorScalarAdd<T, E>(b(), a);
}
template <typename T, typename E>
VectorScalarAdd<T, E> operator+(const VectorExpression<T, E>& a, const T& b) {
return VectorScalarAdd<T, E>(a(), b);
}
template <typename T, typename E1, typename E2>
VectorAdd<T, E1, E2> operator+(const VectorExpression<T, E1>& a,
const VectorExpression<T, E2>& b) {
return VectorAdd<T, E1, E2>(a(), b());
}
template <typename T, typename E>
VectorScalarRSub<T, E> operator-(const T& a, const VectorExpression<T, E>& b) {
return VectorScalarRSub<T, E>(b(), a);
}
template <typename T, typename E>
VectorScalarSub<T, E> operator-(const VectorExpression<T, E>& a, const T& b) {
return VectorScalarSub<T, E>(a(), b);
}
template <typename T, typename E1, typename E2>
VectorSub<T, E1, E2> operator-(const VectorExpression<T, E1>& a,
const VectorExpression<T, E2>& b) {
return VectorSub<T, E1, E2>(a(), b());
}
template <typename T, typename E>
VectorScalarMul<T, E> operator*(const T& a, const VectorExpression<T, E>& b) {
return VectorScalarMul<T, E>(b(), a);
}
template <typename T, typename E>
VectorScalarMul<T, E> operator*(const VectorExpression<T, E>& a, const T& b) {
return VectorScalarMul<T, E>(a(), b);
}
template <typename T, typename E1, typename E2>
VectorMul<T, E1, E2> operator*(const VectorExpression<T, E1>& a,
const VectorExpression<T, E2>& b) {
return VectorMul<T, E1, E2>(a(), b());
}
template <typename T, typename E>
VectorScalarRDiv<T, E> operator/(const T& a, const VectorExpression<T, E>& b) {
return VectorScalarRDiv<T, E>(b(), a);
}
template <typename T, typename E>
VectorScalarDiv<T, E> operator/(const VectorExpression<T, E>& a, const T& b) {
return VectorScalarDiv<T, E>(a(), b);
}
template <typename T, typename E1, typename E2>
VectorDiv<T, E1, E2> operator/(const VectorExpression<T, E1>& a,
const VectorExpression<T, E2>& b) {
return VectorDiv<T, E1, E2>(a(), b());
}
} // namespace jet
#endif // INCLUDE_JET_DETAIL_VECTOR_EXPRESSION_INL_H
| 1,834 |
353 | #import <UIKit/UIKit.h>
//! Project version number for moa.
FOUNDATION_EXPORT double moaVersionNumber;
//! Project version string for moa.
FOUNDATION_EXPORT const unsigned char moaVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <Moa/PublicHeader.h>
| 95 |
1,032 | /*
* Copyright (c) 2011, <NAME> (http://tdistler.com)
* All rights reserved.
*
* The BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of the tdistler.com nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bmp.h"
int load_bmp(const char *fname, struct bmp *b)
{
long len=0;
unsigned char *hdr=0;
FILE *file;
int row;
unsigned char *flip, *orig, *btm;
file = fopen(fname, "rb");
if (!file)
return 1;
fseek(file, 0, SEEK_END);
len = ftell(file);
fseek(file, 0, SEEK_SET);
b->raw = (unsigned char*)malloc(len);
if (!b->raw) {
fclose(file);
return 2;
}
if (len != fread(b->raw, 1, len, file)) {
free(b->raw);
fclose(file);
return 3;
}
fclose(file);
/* Parse the headers */
hdr = b->raw + sizeof(struct bmp_magic);
orig = b->raw + ((struct bmp_header*)hdr)->bmp_offset;
hdr += sizeof(struct bmp_header);
b->w = ((struct bmp_info_hdr*)hdr)->width;
b->h = ((struct bmp_info_hdr*)hdr)->height;
b->bit_depth = ((struct bmp_info_hdr*)hdr)->bitspp;
b->stride = (b->w * (b->bit_depth/8) + 3) & ~3;
/* Bitmaps are stored bottom-up... so flip. */
flip = b->img = (unsigned char*)malloc(b->h * b->stride);
if (!b->img) {
free_bmp(b);
return 4;
}
btm = orig + ((b->h - 1) * b->stride); /* Point to bottom row */
for (row=0; row < b->h; ++row) {
memcpy(flip, btm, b->stride);
flip += b->stride;
btm -= b->stride;
}
return 0;
}
void free_bmp(struct bmp *b)
{
if (b) {
if (b->raw)
free(b->raw);
if (b->img)
free(b->img);
}
}
| 1,352 |
1,573 | <reponame>Playfloor/bonobo
import datetime
import time
import bonobo
def extract():
"""Placeholder, change, rename, remove... """
for x in range(60):
if x:
time.sleep(1)
yield datetime.datetime.now()
def get_graph():
graph = bonobo.Graph()
graph.add_chain()
return graph
if __name__ == "__main__":
parser = bonobo.get_argument_parser()
with bonobo.parse_args(parser):
bonobo.run(get_graph())
| 192 |
2,151 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/third_party/quiche/src/quic/core/http/quic_client_promised_info.h"
#include <memory>
#include <string>
#include "net/third_party/quiche/src/quic/core/http/quic_spdy_client_session.h"
#include "net/third_party/quiche/src/quic/core/http/spdy_utils.h"
#include "net/third_party/quiche/src/quic/core/quic_utils.h"
#include "net/third_party/quiche/src/quic/core/tls_client_handshaker.h"
#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
#include "net/third_party/quiche/src/quic/platform/api/quic_ptr_util.h"
#include "net/third_party/quiche/src/quic/platform/api/quic_socket_address.h"
#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
#include "net/third_party/quiche/src/quic/test_tools/crypto_test_utils.h"
#include "net/third_party/quiche/src/quic/test_tools/quic_client_promised_info_peer.h"
#include "net/third_party/quiche/src/quic/test_tools/quic_spdy_session_peer.h"
#include "net/third_party/quiche/src/quic/test_tools/quic_test_utils.h"
using spdy::SpdyHeaderBlock;
using testing::_;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
class MockQuicSpdyClientSession : public QuicSpdyClientSession {
public:
explicit MockQuicSpdyClientSession(
const ParsedQuicVersionVector& supported_versions,
QuicConnection* connection,
QuicClientPushPromiseIndex* push_promise_index)
: QuicSpdyClientSession(DefaultQuicConfig(),
supported_versions,
connection,
QuicServerId("example.com", 443, false),
&crypto_config_,
push_promise_index),
crypto_config_(crypto_test_utils::ProofVerifierForTesting(),
TlsClientHandshaker::CreateSslCtx()),
authorized_(true) {}
MockQuicSpdyClientSession(const MockQuicSpdyClientSession&) = delete;
MockQuicSpdyClientSession& operator=(const MockQuicSpdyClientSession&) =
delete;
~MockQuicSpdyClientSession() override {}
bool IsAuthorized(const std::string& authority) override {
return authorized_;
}
void set_authorized(bool authorized) { authorized_ = authorized; }
MOCK_METHOD1(CloseStream, void(QuicStreamId stream_id));
private:
QuicCryptoClientConfig crypto_config_;
bool authorized_;
};
class QuicClientPromisedInfoTest : public QuicTest {
public:
class StreamVisitor;
QuicClientPromisedInfoTest()
: connection_(new StrictMock<MockQuicConnection>(&helper_,
&alarm_factory_,
Perspective::IS_CLIENT)),
session_(connection_->supported_versions(),
connection_,
&push_promise_index_),
body_("hello world"),
promise_id_(
QuicUtils::GetInvalidStreamId(connection_->transport_version())) {
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
session_.Initialize();
headers_[":status"] = "200";
headers_["content-length"] = "11";
stream_ = QuicMakeUnique<QuicSpdyClientStream>(
GetNthClientInitiatedBidirectionalStreamId(
connection_->transport_version(), 0),
&session_, BIDIRECTIONAL);
stream_visitor_ = QuicMakeUnique<StreamVisitor>();
stream_->set_visitor(stream_visitor_.get());
push_promise_[":path"] = "/bar";
push_promise_[":authority"] = "www.google.com";
push_promise_[":version"] = "HTTP/1.1";
push_promise_[":method"] = "GET";
push_promise_[":scheme"] = "https";
promise_url_ = SpdyUtils::GetPromisedUrlFromHeaders(push_promise_);
client_request_ = push_promise_.Clone();
promise_id_ = GetNthServerInitiatedUnidirectionalStreamId(
connection_->transport_version(), 0);
}
class StreamVisitor : public QuicSpdyClientStream::Visitor {
void OnClose(QuicSpdyStream* stream) override {
QUIC_DVLOG(1) << "stream " << stream->id();
}
};
void ReceivePromise(QuicStreamId id) {
auto headers = AsHeaderList(push_promise_);
stream_->OnPromiseHeaderList(id, headers.uncompressed_header_bytes(),
headers);
}
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
StrictMock<MockQuicConnection>* connection_;
QuicClientPushPromiseIndex push_promise_index_;
MockQuicSpdyClientSession session_;
std::unique_ptr<QuicSpdyClientStream> stream_;
std::unique_ptr<StreamVisitor> stream_visitor_;
std::unique_ptr<QuicSpdyClientStream> promised_stream_;
SpdyHeaderBlock headers_;
std::string body_;
SpdyHeaderBlock push_promise_;
QuicStreamId promise_id_;
std::string promise_url_;
SpdyHeaderBlock client_request_;
};
TEST_F(QuicClientPromisedInfoTest, PushPromise) {
ReceivePromise(promise_id_);
// Verify that the promise is in the unclaimed streams map.
EXPECT_NE(session_.GetPromisedById(promise_id_), nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseCleanupAlarm) {
ReceivePromise(promise_id_);
// Verify that the promise is in the unclaimed streams map.
QuicClientPromisedInfo* promised = session_.GetPromisedById(promise_id_);
ASSERT_NE(promised, nullptr);
// Fire the alarm that will cancel the promised stream.
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_,
OnStreamReset(promise_id_, QUIC_PUSH_STREAM_TIMED_OUT));
alarm_factory_.FireAlarm(QuicClientPromisedInfoPeer::GetAlarm(promised));
// Verify that the promise is gone after the alarm fires.
EXPECT_EQ(session_.GetPromisedById(promise_id_), nullptr);
EXPECT_EQ(session_.GetPromisedByUrl(promise_url_), nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseInvalidMethod) {
// Promise with an unsafe method
push_promise_[":method"] = "PUT";
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_,
OnStreamReset(promise_id_, QUIC_INVALID_PROMISE_METHOD));
ReceivePromise(promise_id_);
// Verify that the promise headers were ignored
EXPECT_EQ(session_.GetPromisedById(promise_id_), nullptr);
EXPECT_EQ(session_.GetPromisedByUrl(promise_url_), nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseMissingMethod) {
// Promise with a missing method
push_promise_.erase(":method");
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_,
OnStreamReset(promise_id_, QUIC_INVALID_PROMISE_METHOD));
ReceivePromise(promise_id_);
// Verify that the promise headers were ignored
EXPECT_EQ(session_.GetPromisedById(promise_id_), nullptr);
EXPECT_EQ(session_.GetPromisedByUrl(promise_url_), nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseInvalidUrl) {
// Remove required header field to make URL invalid
push_promise_.erase(":authority");
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_,
OnStreamReset(promise_id_, QUIC_INVALID_PROMISE_URL));
ReceivePromise(promise_id_);
// Verify that the promise headers were ignored
EXPECT_EQ(session_.GetPromisedById(promise_id_), nullptr);
EXPECT_EQ(session_.GetPromisedByUrl(promise_url_), nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseUnauthorizedUrl) {
session_.set_authorized(false);
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_,
OnStreamReset(promise_id_, QUIC_UNAUTHORIZED_PROMISE_URL));
ReceivePromise(promise_id_);
QuicClientPromisedInfo* promised = session_.GetPromisedById(promise_id_);
ASSERT_EQ(promised, nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseMismatch) {
ReceivePromise(promise_id_);
QuicClientPromisedInfo* promised = session_.GetPromisedById(promise_id_);
ASSERT_NE(promised, nullptr);
// Need to send the promised response headers and initiate the
// rendezvous for secondary validation to proceed.
QuicSpdyClientStream* promise_stream = static_cast<QuicSpdyClientStream*>(
session_.GetOrCreateStream(promise_id_));
auto headers = AsHeaderList(headers_);
promise_stream->OnStreamHeaderList(false, headers.uncompressed_header_bytes(),
headers);
TestPushPromiseDelegate delegate(/*match=*/false);
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_,
OnStreamReset(promise_id_, QUIC_PROMISE_VARY_MISMATCH));
EXPECT_CALL(session_, CloseStream(promise_id_));
promised->HandleClientRequest(client_request_, &delegate);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseVaryWaits) {
ReceivePromise(promise_id_);
QuicClientPromisedInfo* promised = session_.GetPromisedById(promise_id_);
EXPECT_FALSE(promised->is_validating());
ASSERT_NE(promised, nullptr);
// Now initiate rendezvous.
TestPushPromiseDelegate delegate(/*match=*/true);
promised->HandleClientRequest(client_request_, &delegate);
EXPECT_TRUE(promised->is_validating());
// Promise is still there, waiting for response.
EXPECT_NE(session_.GetPromisedById(promise_id_), nullptr);
// Send Response, should trigger promise validation and complete rendezvous
QuicSpdyClientStream* promise_stream = static_cast<QuicSpdyClientStream*>(
session_.GetOrCreateStream(promise_id_));
ASSERT_NE(promise_stream, nullptr);
auto headers = AsHeaderList(headers_);
promise_stream->OnStreamHeaderList(false, headers.uncompressed_header_bytes(),
headers);
// Promise is gone
EXPECT_EQ(session_.GetPromisedById(promise_id_), nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseVaryNoWait) {
ReceivePromise(promise_id_);
QuicClientPromisedInfo* promised = session_.GetPromisedById(promise_id_);
ASSERT_NE(promised, nullptr);
QuicSpdyClientStream* promise_stream = static_cast<QuicSpdyClientStream*>(
session_.GetOrCreateStream(promise_id_));
ASSERT_NE(promise_stream, nullptr);
// Send Response, should trigger promise validation and complete rendezvous
auto headers = AsHeaderList(headers_);
promise_stream->OnStreamHeaderList(false, headers.uncompressed_header_bytes(),
headers);
// Now initiate rendezvous.
TestPushPromiseDelegate delegate(/*match=*/true);
promised->HandleClientRequest(client_request_, &delegate);
// Promise is gone
EXPECT_EQ(session_.GetPromisedById(promise_id_), nullptr);
// Have a push stream
EXPECT_TRUE(delegate.rendezvous_fired());
EXPECT_NE(delegate.rendezvous_stream(), nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseWaitCancels) {
ReceivePromise(promise_id_);
QuicClientPromisedInfo* promised = session_.GetPromisedById(promise_id_);
ASSERT_NE(promised, nullptr);
// Now initiate rendezvous.
TestPushPromiseDelegate delegate(/*match=*/true);
promised->HandleClientRequest(client_request_, &delegate);
// Promise is still there, waiting for response.
EXPECT_NE(session_.GetPromisedById(promise_id_), nullptr);
// Create response stream, but no data yet.
session_.GetOrCreateStream(promise_id_);
// Cancel the promised stream.
EXPECT_CALL(session_, CloseStream(promise_id_));
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(promise_id_, QUIC_STREAM_CANCELLED));
promised->Cancel();
// Promise is gone
EXPECT_EQ(session_.GetPromisedById(promise_id_), nullptr);
}
TEST_F(QuicClientPromisedInfoTest, PushPromiseDataClosed) {
ReceivePromise(promise_id_);
QuicClientPromisedInfo* promised = session_.GetPromisedById(promise_id_);
ASSERT_NE(promised, nullptr);
QuicSpdyClientStream* promise_stream = static_cast<QuicSpdyClientStream*>(
session_.GetOrCreateStream(promise_id_));
ASSERT_NE(promise_stream, nullptr);
// Send response, rendezvous will be able to finish synchronously.
auto headers = AsHeaderList(headers_);
promise_stream->OnStreamHeaderList(false, headers.uncompressed_header_bytes(),
headers);
EXPECT_CALL(session_, CloseStream(promise_id_));
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_,
OnStreamReset(promise_id_, QUIC_STREAM_PEER_GOING_AWAY));
session_.SendRstStream(promise_id_, QUIC_STREAM_PEER_GOING_AWAY, 0);
// Now initiate rendezvous.
TestPushPromiseDelegate delegate(/*match=*/true);
EXPECT_EQ(promised->HandleClientRequest(client_request_, &delegate),
QUIC_FAILURE);
// Got an indication of the stream failure, client should retry
// request.
EXPECT_FALSE(delegate.rendezvous_fired());
EXPECT_EQ(delegate.rendezvous_stream(), nullptr);
// Promise is gone
EXPECT_EQ(session_.GetPromisedById(promise_id_), nullptr);
}
} // namespace
} // namespace test
} // namespace quic
| 4,929 |
3,508 | package com.fishercoder;
import com.fishercoder.solutions._1716;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class _1716Test {
private static _1716.Solution1 solution1;
@BeforeClass
public static void setup() {
solution1 = new _1716.Solution1();
}
@Test
public void test1() {
assertEquals(10, solution1.totalMoney(4));
}
@Test
public void test2() {
assertEquals(37, solution1.totalMoney(10));
}
@Test
public void test3() {
assertEquals(96, solution1.totalMoney(20));
}
} | 256 |
1,014 | <gh_stars>1000+
{
"MVID.ME": {
"short_name": "M VIDEO PJSC",
"long_name": "Public Joint Stock Company M.video",
"summary": "Public Joint Stock Company M.video, together with its subsidiaries, operates a chain of consumer electronic outlets and online internet stores in Russia. The company engages in the sale of television, audio, video, and Hi-Fi products, as well as home appliances and digital equipment; and the provision of related services. As of December 31, 2019, it had a network of 1,038 stores, including 983 leased stores and 55 owned stores. The company was founded in 1993 and is headquartered in Moscow, Russia. Public Joint Stock Company M.video operates as a subsidiary of Ericaria Holdings Limited.",
"currency": "RUB",
"sector": "Consumer Cyclical",
"industry": "Specialty Retail",
"exchange": "MCX",
"market": "ru_market",
"country": "Russia",
"state": null,
"city": "Moscow",
"zipcode": "105066",
"website": "http://www.mvideo.ru",
"market_cap": "Large Cap"
}
} | 395 |
3,655 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifndef PADDLE_MODEL_PROTECT_UTIL_CRYPTO_AES_GCM_H
#define PADDLE_MODEL_PROTECT_UTIL_CRYPTO_AES_GCM_H
#include <openssl/aes.h>
#include <openssl/evp.h>
#include <iostream>
#include <string>
#include "encryption/util/include/crypto/basic.h"
namespace util {
namespace crypto {
// aes key 32 byte for 256 bit
#define AES_GCM_KEY_LENGTH 32
// aes tag 16 byte for 128 bit
#define AES_GCM_TAG_LENGTH 16
// aes iv 12 byte for 96 bit
#define AES_GCM_IV_LENGTH 16
class AesGcm {
public:
/**
* \brief initial aes-gcm-256 context use key & iv
*
* \note initial aes-gcm-256 context use key & iv. gcm mode
* will generate a tag(16 byte), so the ciphertext's length
* should be longer 16 byte than plaintext.
*
*
* \param plaintext plain text to be encrypted(in)
* \param len plain text's length(in)
* \param key aes key (in)
* \param iv aes iv (in)
* \param ciphertext encrypted text(out)
* \param out_len encrypted length(out)
*
* \return return 0 if successful
* -1 EVP_CIPHER_CTX_new or aes_gcm_key error
* -2 EVP_EncryptUpdate error
* -3 EVP_EncryptFinal_ex error
* -4 EVP_CIPHER_CTX_ctrl error
*/
static int encrypt_aes_gcm(const unsigned char* plaintext, const int& len,
const unsigned char* key, const unsigned char* iv,
unsigned char* ciphertext,
int& out_len); // NOLINT
/**
* \brief encrypt using aes-gcm-256
*
* \note encrypt using aes-gcm-256
*
* \param ciphertext cipher text to be decrypted(in)
* \param len plain text's length(in)
* \param key aes key (in)
* \param iv aes iv (in)
* \param plaintext decrypted text(out)
* \param out_len decrypted length(out)
*
* \return return 0 if successful
* -1 EVP_CIPHER_CTX_new or aes_gcm_key error
* -2 EVP_DecryptUpdate error
* -3 EVP_CIPHER_CTX_ctrl error
* -4 EVP_DecryptFinal_ex error
*/
static int decrypt_aes_gcm(const unsigned char* ciphertext, const int& len,
const unsigned char* key, const unsigned char* iv,
unsigned char* plaintext, int& out_len); // NOLINT
private:
/**
* \brief initial aes-gcm-256 context use key & iv
*
* \note initial aes-gcm-256 context use key & iv
*
* \param key aes key (in)
* \param iv aes iv (in)
* \param e_ctx encryption context(out)
* \param d_ctx decryption context(out)
*
* \return return 0 if successful
* -1 EVP_xxcryptInit_ex error
* -2 EVP_CIPHER_CTX_ctrl error
* -3 EVP_xxcryptInit_ex error
*/
static int aes_gcm_key(const unsigned char* key, const unsigned char* iv,
EVP_CIPHER_CTX* e_ctx, EVP_CIPHER_CTX* d_ctx);
/**
* \brief initial aes-gcm-256 context use key & iv
*
* \note initial aes-gcm-256 context use key & iv
*
* \param key aes key (in)
* \param iv aes iv (in)
* \param e_ctx encryption context(out)
* \param d_ctx decryption context(out)
*
* \return return 0 if successful
* -1 EVP_xxcryptInit_ex error
* -2 EVP_CIPHER_CTX_ctrl error
* -3 EVP_xxcryptInit_ex error
* -4 invalid key length or iv length
* -5 hex_to_byte error
*/
static int aes_gcm_key(const std::string& key_hex, const std::string& iv_hex,
EVP_CIPHER_CTX* e_ctx, EVP_CIPHER_CTX* d_ctx);
};
} // namespace crypto
} // namespace util
#endif // PADDLE_MODEL_PROTECT_UTIL_CRYPTO_AES_GCM_H
| 2,243 |
892 | <reponame>github/advisory-database
{
"schema_version": "1.2.0",
"id": "GHSA-87m6-hv55-49cw",
"modified": "2022-05-13T01:21:09Z",
"published": "2022-05-13T01:21:09Z",
"aliases": [
"CVE-2019-0007"
],
"details": "The vMX Series software uses a predictable IP ID Sequence Number. This leaves the system as well as clients connecting through the device susceptible to a family of attacks which rely on the use of predictable IP ID sequence numbers as their base method of attack. This issue was found during internal product security testing. Affected releases are Juniper Networks Junos OS: 15.1 versions prior to 15.1F5 on vMX Series.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2019-0007"
},
{
"type": "WEB",
"url": "https://kb.juniper.net/JSA10903"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/106564"
}
],
"database_specific": {
"cwe_ids": [
"CWE-330"
],
"severity": "CRITICAL",
"github_reviewed": false
}
} | 523 |
679 | <reponame>Grosskopf/openoffice<filename>main/sw/source/ui/shells/textglos.cxx
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_sw.hxx"
#include <sfx2/request.hxx>
#include <svl/eitem.hxx>
#include <svl/stritem.hxx>
#include "errhdl.hxx"
#include "view.hxx"
#include "initui.hxx"
#include "cmdid.h"
#include "textsh.hxx"
#include "initui.hxx"
#include "gloshdl.hxx"
#include "glosdoc.hxx"
#include "gloslst.hxx"
#include "swabstdlg.hxx"
#include <misc.hrc>
// STATIC DATA -----------------------------------------------------------
void SwTextShell::ExecGlossary(SfxRequest &rReq)
{
sal_uInt16 nSlot = rReq.GetSlot();
::GetGlossaries()->UpdateGlosPath(!rReq.IsAPI() ||
FN_GLOSSARY_DLG == nSlot );
SwGlossaryHdl* pGlosHdl = GetView().GetGlosHdl();
// SwGlossaryList updaten?
sal_Bool bUpdateList = sal_False;
const SfxItemSet *pArgs = rReq.GetArgs();
const SfxPoolItem* pItem = 0;
if(pArgs)
pArgs->GetItemState(nSlot, sal_False, &pItem );
switch( nSlot )
{
case FN_GLOSSARY_DLG:
pGlosHdl->GlossaryDlg();
bUpdateList = sal_True;
rReq.Ignore();
break;
case FN_EXPAND_GLOSSARY:
{
sal_Bool bReturn;
bReturn = pGlosHdl->ExpandGlossary();
rReq.SetReturnValue( SfxBoolItem( nSlot, bReturn ) );
rReq.Done();
}
break;
case FN_NEW_GLOSSARY:
if(pItem && pArgs->Count() == 3 )
{
String aGroup = (( const SfxStringItem *)pItem)->GetValue();
String aName;
if(SFX_ITEM_SET == pArgs->GetItemState(FN_PARAM_1, sal_False, &pItem ))
aName = (( const SfxStringItem *)pItem)->GetValue();
String aShortName;
if(SFX_ITEM_SET == pArgs->GetItemState(FN_PARAM_2, sal_False, &pItem ))
aShortName = (( const SfxStringItem *)pItem)->GetValue();
SwAbstractDialogFactory* pFact = SwAbstractDialogFactory::Create();
DBG_ASSERT(pFact, "Dialogdiet fail!");
::GlossarySetActGroup fnSetActGroup = pFact->SetGlossaryActGroupFunc( DLG_RENAME_GLOS );
if ( fnSetActGroup )
(*fnSetActGroup)( aGroup );
pGlosHdl->SetCurGroup(aGroup, sal_True);
//eingestellte Gruppe muss in NewGlossary ggf. erzeugt werden!
pGlosHdl->NewGlossary( aName, aShortName, sal_True );
rReq.Done();
}
bUpdateList = sal_True;
break;
case FN_SET_ACT_GLOSSARY:
if(pItem)
{
String aGroup = (( const SfxStringItem *)pItem)->GetValue();
SwAbstractDialogFactory* pFact = SwAbstractDialogFactory::Create();
DBG_ASSERT(pFact, "Dialogdiet fail!");
::GlossarySetActGroup fnSetActGroup = pFact->SetGlossaryActGroupFunc( DLG_RENAME_GLOS );
if ( fnSetActGroup )
(*fnSetActGroup)( aGroup );
rReq.Done();
}
break;
case FN_INSERT_GLOSSARY:
{
if(pItem && pArgs->Count() > 1)
{
String aGroup = (( const SfxStringItem *)pItem)->GetValue();
String aName;
if(SFX_ITEM_SET == pArgs->GetItemState(FN_PARAM_1, sal_False, &pItem ))
aName = (( const SfxStringItem *)pItem)->GetValue();
SwAbstractDialogFactory* pFact = SwAbstractDialogFactory::Create();
DBG_ASSERT(pFact, "Dialogdiet fail!");
::GlossarySetActGroup fnSetActGroup = pFact->SetGlossaryActGroupFunc( DLG_RENAME_GLOS );
if ( fnSetActGroup )
(*fnSetActGroup)( aGroup );
pGlosHdl->SetCurGroup(aGroup, sal_True);
rReq.SetReturnValue(SfxBoolItem(nSlot, pGlosHdl->InsertGlossary( aName )));
rReq.Done();
}
}
break;
default:
ASSERT(sal_False, falscher Dispatcher);
return;
}
if(bUpdateList)
{
SwGlossaryList* pList = ::GetGlossaryList();
if(pList->IsActive())
pList->Update();
}
}
| 1,910 |
2,338 | # -*- Python -*- vim: set ft=python ts=4 sw=4 expandtab tw=79:
# Configuration file for the 'lit' test runner.
import os
import site
site.addsitedir(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'utils'))
from libcxx.test.googlebenchmark import GoogleBenchmark
# Tell pylint that we know config and lit_config exist somewhere.
if 'PYLINT_IMPORT' in os.environ:
config = object()
lit_config = object()
# name: The name of this test suite.
config.name = 'libc++ benchmarks'
config.suffixes = []
config.test_exec_root = os.path.join(config.libcxx_obj_root, 'benchmarks')
config.test_source_root = config.test_exec_root
config.test_format = GoogleBenchmark(test_sub_dirs='.',
test_suffix='.libcxx.out',
benchmark_args=config.benchmark_args) | 343 |
4,569 | package com.brianway.learning.java.jvm.classloading;
/**
* Created by brian on 17/3/19.
* 非主动使用类字段演示
* 1. 静态字段只有直接定义这个字段的类才会被初始化
* 2. 通过数组来定义引用类,不会触发类的初始化
* (-XX:+TraceClassLoading,对于 HotSpot 虚拟机可观察到子类的加载)
*/
public class NoInitialization {
public static void main(String[] args) {
System.out.println(SubClass.value);
SubClass[] sca = new SubClass[10];
System.out.println(ConstClass.HELLOWORLD);
}
}
/*
SuperClass init!
123
*/ | 305 |
9,959 | // version 14:
import lombok.NonNull;
record NonNullOnRecord2(@NonNull String a) {
public NonNullOnRecord2 {
System.out.println("Hello");
}
} | 55 |
30,023 | <reponame>domwillcode/home-assistant
"""Tests for the wake_on_lan component."""
| 28 |
326 | /*-------------------------------------------------------------------------
*
* execRemote.c
*
* Functions to execute commands on remote Datanodes
*
*
* Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
* Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
* src/backend/pgxc/pool/execRemote.c
*
*-------------------------------------------------------------------------
*/
#include <time.h>
#include "postgres.h"
#include "access/twophase.h"
#include "access/gtm.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "access/xact.h"
#include "access/relscan.h"
#include "catalog/pg_type.h"
#include "catalog/pgxc_node.h"
#include "commands/prepare.h"
#ifdef PGXC
#include "commands/trigger.h"
#endif
#include "executor/executor.h"
#include "gtm/gtm_c.h"
#include "libpq/libpq.h"
#include "miscadmin.h"
#include "pgxc/execRemote.h"
#include "nodes/nodes.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/var.h"
#include "pgxc/copyops.h"
#include "pgxc/nodemgr.h"
#include "pgxc/poolmgr.h"
#include "storage/ipc.h"
#include "utils/datum.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/tuplesort.h"
#include "utils/snapmgr.h"
#include "utils/builtins.h"
#include "pgxc/locator.h"
#include "pgxc/pgxc.h"
#include "parser/parse_type.h"
#include "parser/parsetree.h"
#include "pgxc/xc_maintenance_mode.h"
/* Enforce the use of two-phase commit when temporary objects are used */
bool EnforceTwoPhaseCommit = true;
/*
* Max to begin flushing data to datanodes, max to stop flushing data to datanodes.
*/
#define MAX_SIZE_TO_FORCE_FLUSH (2^10 * 64 * 2)
#define MAX_SIZE_TO_STOP_FLUSH (2^10 * 64)
#define END_QUERY_TIMEOUT 20
#define ROLLBACK_RESP_LEN 9
typedef enum RemoteXactNodeStatus
{
RXACT_NODE_NONE, /* Initial state */
RXACT_NODE_PREPARE_SENT, /* PREPARE request sent */
RXACT_NODE_PREPARE_FAILED, /* PREPARE failed on the node */
RXACT_NODE_PREPARED, /* PREPARED successfully on the node */
RXACT_NODE_COMMIT_SENT, /* COMMIT sent successfully */
RXACT_NODE_COMMIT_FAILED, /* failed to COMMIT on the node */
RXACT_NODE_COMMITTED, /* COMMITTed successfully on the node */
RXACT_NODE_ABORT_SENT, /* ABORT sent successfully */
RXACT_NODE_ABORT_FAILED, /* failed to ABORT on the node */
RXACT_NODE_ABORTED /* ABORTed successfully on the node */
} RemoteXactNodeStatus;
typedef enum RemoteXactStatus
{
RXACT_NONE, /* Initial state */
RXACT_PREPARE_FAILED, /* PREPARE failed */
RXACT_PREPARED, /* PREPARED succeeded on all nodes */
RXACT_COMMIT_FAILED, /* COMMIT failed on all the nodes */
RXACT_PART_COMMITTED, /* COMMIT failed on some and succeeded on other nodes */
RXACT_COMMITTED, /* COMMIT succeeded on all the nodes */
RXACT_ABORT_FAILED, /* ABORT failed on all the nodes */
RXACT_PART_ABORTED, /* ABORT failed on some and succeeded on other nodes */
RXACT_ABORTED /* ABORT succeeded on all the nodes */
} RemoteXactStatus;
typedef struct RemoteXactState
{
/* Current status of the remote 2PC */
RemoteXactStatus status;
/*
* Information about all the nodes involved in the transaction. We track
* the number of writers and readers. The first numWriteRemoteNodes entries
* in the remoteNodeHandles and remoteNodeStatus correspond to the writer
* connections and rest correspond to the reader connections.
*/
int numWriteRemoteNodes;
int numReadRemoteNodes;
int maxRemoteNodes;
PGXCNodeHandle **remoteNodeHandles;
RemoteXactNodeStatus *remoteNodeStatus;
GlobalTransactionId commitXid;
bool preparedLocalNode;
char prepareGID[256]; /* GID used for internal 2PC */
} RemoteXactState;
static RemoteXactState remoteXactState;
#ifdef PGXC
typedef struct
{
xact_callback function;
void *fparams;
} abort_callback_type;
#endif
/*
* Buffer size does not affect performance significantly, just do not allow
* connection buffer grows infinitely
*/
#define COPY_BUFFER_SIZE 8192
#define PRIMARY_NODE_WRITEAHEAD 1024 * 1024
/*
* List of PGXCNodeHandle to track readers and writers involved in the
* current transaction
*/
static List *XactWriteNodes;
static List *XactReadNodes;
static char *preparedNodes;
/*
* Flag to track if a temporary object is accessed by the current transaction
*/
static bool temp_object_included = false;
#ifdef PGXC
static abort_callback_type dbcleanup_info = { NULL, NULL };
#endif
static int pgxc_node_begin(int conn_count, PGXCNodeHandle ** connections,
GlobalTransactionId gxid, bool need_tran_block,
bool readOnly, char node_type);
static PGXCNodeAllHandles * get_exec_connections(RemoteQueryState *planstate,
ExecNodes *exec_nodes,
RemoteQueryExecType exec_type);
static void close_node_cursors(PGXCNodeHandle **connections, int conn_count, char *cursor);
static int pgxc_get_transaction_nodes(PGXCNodeHandle *connections[], int size, bool writeOnly);
static int pgxc_get_connections(PGXCNodeHandle *connections[], int size, List *connlist);
static bool pgxc_start_command_on_connection(PGXCNodeHandle *connection,
RemoteQueryState *remotestate, Snapshot snapshot);
static TupleTableSlot * RemoteQueryNext(ScanState *node);
static bool RemoteQueryRecheck(RemoteQueryState *node, TupleTableSlot *slot);
static char *generate_begin_command(void);
static bool pgxc_node_remote_prepare(char *prepareGID);
static void pgxc_node_remote_commit(void);
static void pgxc_node_remote_abort(void);
static char *pgxc_node_get_nodelist(bool localNode);
static void ExecClearTempObjectIncluded(void);
static void init_RemoteXactState(bool preparedLocalNode);
static void clear_RemoteXactState(void);
static void pgxc_node_report_error(RemoteQueryState *combiner);
static bool IsReturningDMLOnReplicatedTable(RemoteQuery *rq);
static void SetDataRowForIntParams(JunkFilter *junkfilter,
TupleTableSlot *sourceSlot, TupleTableSlot *newSlot,
RemoteQueryState *rq_state);
static void pgxc_append_param_val(StringInfo buf, Datum val, Oid valtype);
static void pgxc_append_param_junkval(TupleTableSlot *slot, AttrNumber attno, Oid valtype, StringInfo buf);
static void pgxc_rq_fire_bstriggers(RemoteQueryState *node);
static void pgxc_rq_fire_astriggers(RemoteQueryState *node);
static int flushPGXCNodeHandleData(PGXCNodeHandle *handle);
/*
* Create a structure to store parameters needed to combine responses from
* multiple connections as well as state information
*/
static RemoteQueryState *
CreateResponseCombiner(int node_count, CombineType combine_type)
{
RemoteQueryState *combiner;
/* ResponseComber is a typedef for pointer to ResponseCombinerData */
combiner = makeNode(RemoteQueryState);
if (combiner == NULL)
{
/* Out of memory */
return combiner;
}
combiner->node_count = node_count;
combiner->connections = NULL;
combiner->conn_count = 0;
combiner->combine_type = combine_type;
combiner->command_complete_count = 0;
combiner->request_type = REQUEST_TYPE_NOT_DEFINED;
combiner->tuple_desc = NULL;
combiner->description_count = 0;
combiner->copy_in_count = 0;
combiner->copy_out_count = 0;
combiner->errorMessage = NULL;
combiner->errorDetail = NULL;
combiner->query_Done = false;
combiner->currentRow.msg = NULL;
combiner->currentRow.msglen = 0;
combiner->currentRow.msgnode = 0;
combiner->rowBuffer = NIL;
combiner->tapenodes = NULL;
combiner->remoteCopyType = REMOTE_COPY_NONE;
combiner->copy_file = NULL;
combiner->rqs_cmd_id = FirstCommandId;
combiner->rqs_processed = 0;
return combiner;
}
/*
* Parse out row count from the command status response and convert it to integer
*/
static int
parse_row_count(const char *message, size_t len, uint64 *rowcount)
{
int digits = 0;
int pos;
*rowcount = 0;
/* skip \0 string terminator */
for (pos = 0; pos < len - 1; pos++)
{
if (message[pos] >= '0' && message[pos] <= '9')
{
*rowcount = *rowcount * 10 + message[pos] - '0';
digits++;
}
else
{
*rowcount = 0;
digits = 0;
}
}
return digits;
}
/*
* Convert RowDescription message to a TupleDesc
*/
static TupleDesc
create_tuple_desc(char *msg_body, size_t len)
{
TupleDesc result;
int i, nattr;
uint16 n16;
/* get number of attributes */
memcpy(&n16, msg_body, 2);
nattr = ntohs(n16);
msg_body += 2;
result = CreateTemplateTupleDesc(nattr, false);
/* decode attributes */
for (i = 1; i <= nattr; i++)
{
AttrNumber attnum;
char *attname;
char *typname;
Oid oidtypeid;
int32 typemode, typmod;
attnum = (AttrNumber) i;
/* attribute name */
attname = msg_body;
msg_body += strlen(attname) + 1;
/* type name */
typname = msg_body;
msg_body += strlen(typname) + 1;
/* table OID, ignored */
msg_body += 4;
/* column no, ignored */
msg_body += 2;
/* data type OID, ignored */
msg_body += 4;
/* type len, ignored */
msg_body += 2;
/* type mod */
memcpy(&typemode, msg_body, 4);
typmod = ntohl(typemode);
msg_body += 4;
/* PGXCTODO text/binary flag? */
msg_body += 2;
/* Get the OID type and mode type from typename */
parseTypeString(typname, &oidtypeid, NULL);
TupleDescInitEntry(result, attnum, attname, oidtypeid, typmod, 0);
}
return result;
}
/*
* Handle CopyOutCommandComplete ('c') message from a Datanode connection
*/
static void
HandleCopyOutComplete(RemoteQueryState *combiner)
{
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COPY_OUT;
if (combiner->request_type != REQUEST_TYPE_COPY_OUT)
/* Inconsistent responses */
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Unexpected response from the Datanodes for 'c' message, current request type %d", combiner->request_type)));
/* Just do nothing, close message is managed by the Coordinator */
combiner->copy_out_count++;
}
/*
* Handle CommandComplete ('C') message from a Datanode connection
*/
static void
HandleCommandComplete(RemoteQueryState *combiner, char *msg_body, size_t len, PGXCNodeHandle *conn)
{
int digits = 0;
bool non_fqs_dml;
/* Is this a DML query that is not FQSed ? */
non_fqs_dml = (combiner->ss.ps.plan &&
((RemoteQuery*)combiner->ss.ps.plan)->rq_params_internal);
/*
* If we did not receive description we are having rowcount or OK response
*/
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COMMAND;
/* Extract rowcount */
if (combiner->combine_type != COMBINE_TYPE_NONE)
{
uint64 rowcount;
digits = parse_row_count(msg_body, len, &rowcount);
if (digits > 0)
{
/*
* PGXC TODO: Need to completely remove the dependency on whether
* it's an FQS or non-FQS DML query. For this, command_complete_count
* needs to be better handled. Currently this field is being updated
* for each iteration of FetchTuple by re-using the same combiner
* for each iteration, whereas it seems it should be updated only
* for each node execution, not for each tuple fetched.
*/
/* Replicated write, make sure they are the same */
if (combiner->combine_type == COMBINE_TYPE_SAME)
{
if (combiner->command_complete_count)
{
/* For FQS, check if there is a consistency issue with replicated table. */
if (rowcount != combiner->rqs_processed && !non_fqs_dml)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Write to replicated table returned"
"different results from the Datanodes")));
}
/* Always update the row count. We have initialized it to 0 */
combiner->rqs_processed = rowcount;
}
else
combiner->rqs_processed += rowcount;
/*
* This rowcount will be used to increment estate->es_processed
* either in ExecInsert/Update/Delete for non-FQS query, or will
* used in RemoteQueryNext() for FQS query.
*/
}
else
combiner->combine_type = COMBINE_TYPE_NONE;
}
/* If response checking is enable only then do further processing */
if (conn->ck_resp_rollback == RESP_ROLLBACK_CHECK)
{
conn->ck_resp_rollback = RESP_ROLLBACK_NOT_RECEIVED;
if (len == ROLLBACK_RESP_LEN) /* No need to do string comparison otherwise */
{
if (strcmp(msg_body, "ROLLBACK") == 0)
conn->ck_resp_rollback = RESP_ROLLBACK_RECEIVED;
}
}
combiner->command_complete_count++;
}
/*
* Handle RowDescription ('T') message from a Datanode connection
*/
static bool
HandleRowDescription(RemoteQueryState *combiner, char *msg_body, size_t len)
{
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_QUERY;
if (combiner->request_type != REQUEST_TYPE_QUERY)
{
/* Inconsistent responses */
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Unexpected response from the Datanodes for 'T' message, current request type %d", combiner->request_type)));
}
/* Increment counter and check if it was first */
if (combiner->description_count++ == 0)
{
combiner->tuple_desc = create_tuple_desc(msg_body, len);
return true;
}
return false;
}
#ifdef NOT_USED
/*
* Handle ParameterStatus ('S') message from a Datanode connection (SET command)
*/
static void
HandleParameterStatus(RemoteQueryState *combiner, char *msg_body, size_t len)
{
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_QUERY;
if (combiner->request_type != REQUEST_TYPE_QUERY)
{
/* Inconsistent responses */
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Unexpected response from the Datanodes for 'S' message, current request type %d", combiner->request_type)));
}
/* Proxy last */
if (++combiner->description_count == combiner->node_count)
{
pq_putmessage('S', msg_body, len);
}
}
#endif
/*
* Handle CopyInResponse ('G') message from a Datanode connection
*/
static void
HandleCopyIn(RemoteQueryState *combiner)
{
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COPY_IN;
if (combiner->request_type != REQUEST_TYPE_COPY_IN)
{
/* Inconsistent responses */
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Unexpected response from the Datanodes for 'G' message, current request type %d", combiner->request_type)));
}
/*
* The normal PG code will output an G message when it runs in the
* Coordinator, so do not proxy message here, just count it.
*/
combiner->copy_in_count++;
}
/*
* Handle CopyOutResponse ('H') message from a Datanode connection
*/
static void
HandleCopyOut(RemoteQueryState *combiner)
{
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COPY_OUT;
if (combiner->request_type != REQUEST_TYPE_COPY_OUT)
{
/* Inconsistent responses */
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Unexpected response from the Datanodes for 'H' message, current request type %d", combiner->request_type)));
}
/*
* The normal PG code will output an H message when it runs in the
* Coordinator, so do not proxy message here, just count it.
*/
combiner->copy_out_count++;
}
/*
* Handle CopyOutDataRow ('d') message from a Datanode connection
*/
static void
HandleCopyDataRow(RemoteQueryState *combiner, char *msg_body, size_t len)
{
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COPY_OUT;
/* Inconsistent responses */
if (combiner->request_type != REQUEST_TYPE_COPY_OUT)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Unexpected response from the Datanodes for 'd' message, current request type %d", combiner->request_type)));
/* count the row */
combiner->processed++;
/* Output remote COPY operation to correct location */
switch (combiner->remoteCopyType)
{
case REMOTE_COPY_FILE:
/* Write data directly to file */
fwrite(msg_body, 1, len, combiner->copy_file);
break;
case REMOTE_COPY_STDOUT:
/* Send back data to client */
pq_putmessage('d', msg_body, len);
break;
case REMOTE_COPY_TUPLESTORE:
{
Datum *values;
bool *nulls;
TupleDesc tupdesc = combiner->tuple_desc;
int i, dropped;
Form_pg_attribute *attr = tupdesc->attrs;
FmgrInfo *in_functions;
Oid *typioparams;
char **fields;
values = (Datum *) palloc(tupdesc->natts * sizeof(Datum));
nulls = (bool *) palloc(tupdesc->natts * sizeof(bool));
in_functions = (FmgrInfo *) palloc(tupdesc->natts * sizeof(FmgrInfo));
typioparams = (Oid *) palloc(tupdesc->natts * sizeof(Oid));
/* Calculate the Oids of input functions */
for (i = 0; i < tupdesc->natts; i++)
{
Oid in_func_oid;
/* Do not need any information for dropped attributes */
if (attr[i]->attisdropped)
continue;
getTypeInputInfo(attr[i]->atttypid,
&in_func_oid, &typioparams[i]);
fmgr_info(in_func_oid, &in_functions[i]);
}
/*
* Convert message into an array of fields.
* Last \n is not included in converted message.
*/
fields = CopyOps_RawDataToArrayField(tupdesc, msg_body, len - 1);
/* Fill in the array values */
dropped = 0;
for (i = 0; i < tupdesc->natts; i++)
{
char *string = fields[i - dropped];
/* Do not need any information for dropped attributes */
if (attr[i]->attisdropped)
{
dropped++;
nulls[i] = true; /* Consider dropped parameter as NULL */
continue;
}
/* Find value */
values[i] = InputFunctionCall(&in_functions[i],
string,
typioparams[i],
attr[i]->atttypmod);
/* Setup value with NULL flag if necessary */
if (string == NULL)
nulls[i] = true;
else
nulls[i] = false;
}
/* Then insert the values into tuplestore */
tuplestore_putvalues(combiner->tuplestorestate,
combiner->tuple_desc,
values,
nulls);
/* Clean up everything */
if (*fields)
pfree(*fields);
pfree(fields);
pfree(values);
pfree(nulls);
pfree(in_functions);
pfree(typioparams);
}
break;
case REMOTE_COPY_NONE:
default:
Assert(0); /* Should not happen */
}
}
/*
* Handle DataRow ('D') message from a Datanode connection
* The function returns true if buffer can accept more data rows.
* Caller must stop reading if function returns false
*/
static void
HandleDataRow(RemoteQueryState *combiner, char *msg_body, size_t len, Oid nodeoid)
{
/* We expect previous message is consumed */
Assert(combiner->currentRow.msg == NULL);
if (combiner->request_type != REQUEST_TYPE_QUERY)
{
/* Inconsistent responses */
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Unexpected response from the Datanodes for 'D' message, current request type %d", combiner->request_type)));
}
/*
* If we got an error already ignore incoming data rows from other nodes
* Still we want to continue reading until get CommandComplete
*/
if (combiner->errorMessage)
return;
/*
* We are copying message because it points into connection buffer, and
* will be overwritten on next socket read
*/
combiner->currentRow.msg = (char *) palloc(len);
memcpy(combiner->currentRow.msg, msg_body, len);
combiner->currentRow.msglen = len;
combiner->currentRow.msgnode = nodeoid;
}
/*
* Handle ErrorResponse ('E') message from a Datanode connection
*/
static void
HandleError(RemoteQueryState *combiner, char *msg_body, size_t len)
{
/* parse error message */
char *code = NULL;
char *message = NULL;
char *detail = NULL;
int offset = 0;
/*
* Scan until point to terminating \0
*/
while (offset + 1 < len)
{
/* pointer to the field message */
char *str = msg_body + offset + 1;
switch (msg_body[offset])
{
case 'C': /* code */
code = str;
break;
case 'M': /* message */
message = str;
break;
case 'D': /* details */
detail = str;
break;
/* Fields not yet in use */
case 'S': /* severity */
case 'R': /* routine */
case 'H': /* hint */
case 'P': /* position string */
case 'p': /* position int */
case 'q': /* int query */
case 'W': /* where */
case 'F': /* file */
case 'L': /* line */
default:
break;
}
/* code, message and \0 */
offset += strlen(str) + 2;
}
/*
* We may have special handling for some errors, default handling is to
* throw out error with the same message. We can not ereport immediately
* because we should read from this and other connections until
* ReadyForQuery is received, so we just store the error message.
* If multiple connections return errors only first one is reported.
*/
if (!combiner->errorMessage)
{
combiner->errorMessage = pstrdup(message);
/* Error Code is exactly 5 significant bytes */
if (code)
memcpy(combiner->errorCode, code, 5);
}
if (!combiner->errorDetail && detail != NULL)
{
combiner->errorDetail = pstrdup(detail);
}
/*
* If Datanode have sent ErrorResponse it will never send CommandComplete.
* Increment the counter to prevent endless waiting for it.
*/
combiner->command_complete_count++;
}
/*
* HandleCmdComplete -
* combine deparsed sql statements execution results
*
* Input parameters:
* commandType is dml command type
* combineTag is used to combine the completion result
* msg_body is execution result needed to combine
* len is msg_body size
*/
void
HandleCmdComplete(CmdType commandType, CombineTag *combine,
const char *msg_body, size_t len)
{
int digits = 0;
uint64 originrowcount = 0;
uint64 rowcount = 0;
uint64 total = 0;
if (msg_body == NULL)
return;
/* if there's nothing in combine, just copy the msg_body */
if (strlen(combine->data) == 0)
{
strcpy(combine->data, msg_body);
combine->cmdType = commandType;
return;
}
else
{
/* commandType is conflict */
if (combine->cmdType != commandType)
return;
/* get the processed row number from msg_body */
digits = parse_row_count(msg_body, len + 1, &rowcount);
elog(DEBUG1, "digits is %d\n", digits);
Assert(digits >= 0);
/* no need to combine */
if (digits == 0)
return;
/* combine the processed row number */
parse_row_count(combine->data, strlen(combine->data) + 1, &originrowcount);
elog(DEBUG1, "originrowcount is %lu, rowcount is %lu\n", originrowcount, rowcount);
total = originrowcount + rowcount;
}
/* output command completion tag */
switch (commandType)
{
case CMD_SELECT:
strcpy(combine->data, "SELECT");
break;
case CMD_INSERT:
snprintf(combine->data, COMPLETION_TAG_BUFSIZE,
"INSERT %u %lu", 0, total);
break;
case CMD_UPDATE:
snprintf(combine->data, COMPLETION_TAG_BUFSIZE,
"UPDATE %lu", total);
break;
case CMD_DELETE:
snprintf(combine->data, COMPLETION_TAG_BUFSIZE,
"DELETE %lu", total);
break;
default:
strcpy(combine->data, "");
break;
}
}
/*
* HandleDatanodeCommandId ('M') message from a Datanode connection
*/
static void
HandleDatanodeCommandId(RemoteQueryState *combiner, char *msg_body, size_t len)
{
uint32 n32;
CommandId cid;
Assert(msg_body != NULL);
Assert(len >= 2);
/* Get the command Id */
memcpy(&n32, &msg_body[0], 4);
cid = ntohl(n32);
/* If received command Id is higher than current one, set it to a new value */
if (cid > GetReceivedCommandId())
SetReceivedCommandId(cid);
}
/*
* Examine the specified combiner state and determine if command was completed
* successfully
*/
static bool
validate_combiner(RemoteQueryState *combiner)
{
/* There was error message while combining */
if (combiner->errorMessage)
return false;
/* Check if state is defined */
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
return false;
/* Check all nodes completed */
if ((combiner->request_type == REQUEST_TYPE_COMMAND
|| combiner->request_type == REQUEST_TYPE_QUERY)
&& combiner->command_complete_count != combiner->node_count)
return false;
/* Check count of description responses */
if (combiner->request_type == REQUEST_TYPE_QUERY
&& combiner->description_count != combiner->node_count)
return false;
/* Check count of copy-in responses */
if (combiner->request_type == REQUEST_TYPE_COPY_IN
&& combiner->copy_in_count != combiner->node_count)
return false;
/* Check count of copy-out responses */
if (combiner->request_type == REQUEST_TYPE_COPY_OUT
&& combiner->copy_out_count != combiner->node_count)
return false;
/* Add other checks here as needed */
/* All is good if we are here */
return true;
}
/*
* Close combiner and free allocated memory, if it is not needed
*/
static void
CloseCombiner(RemoteQueryState *combiner)
{
if (combiner)
{
if (combiner->connections)
pfree(combiner->connections);
if (combiner->tuple_desc)
{
/*
* In the case of a remote COPY with tuplestore, combiner is not
* responsible from freeing the tuple store. This is done at an upper
* level once data redistribution is completed.
*/
if (combiner->remoteCopyType != REMOTE_COPY_TUPLESTORE)
FreeTupleDesc(combiner->tuple_desc);
}
if (combiner->errorMessage)
pfree(combiner->errorMessage);
if (combiner->errorDetail)
pfree(combiner->errorDetail);
if (combiner->cursor_connections)
pfree(combiner->cursor_connections);
if (combiner->tapenodes)
pfree(combiner->tapenodes);
pfree(combiner);
}
}
/*
* Validate combiner and release storage freeing allocated memory
*/
static bool
ValidateAndCloseCombiner(RemoteQueryState *combiner)
{
bool valid = validate_combiner(combiner);
CloseCombiner(combiner);
return valid;
}
/*
* It is possible if multiple steps share the same Datanode connection, when
* executor is running multi-step query or client is running multiple queries
* using Extended Query Protocol. After returning next tuple ExecRemoteQuery
* function passes execution control to the executor and then it can be given
* to the same RemoteQuery or to different one. It is possible that before
* returning a tuple the function do not read all Datanode responses. In this
* case pending responses should be read in context of original RemoteQueryState
* till ReadyForQuery message and data rows should be stored (buffered) to be
* available when fetch from that RemoteQueryState is requested again.
* BufferConnection function does the job.
* If a RemoteQuery is going to use connection it should check connection state.
* DN_CONNECTION_STATE_QUERY indicates query has data to read and combiner
* points to the original RemoteQueryState. If combiner differs from "this" the
* connection should be buffered.
*/
void
BufferConnection(PGXCNodeHandle *conn)
{
RemoteQueryState *combiner = conn->combiner;
MemoryContext oldcontext;
if (combiner == NULL || conn->state != DN_CONNECTION_STATE_QUERY)
return;
/*
* When BufferConnection is invoked CurrentContext is related to other
* portal, which is trying to control the connection.
* TODO See if we can find better context to switch to
*/
oldcontext = MemoryContextSwitchTo(combiner->ss.ss_ScanTupleSlot->tts_mcxt);
/* Verify the connection is in use by the combiner */
combiner->current_conn = 0;
while (combiner->current_conn < combiner->conn_count)
{
if (combiner->connections[combiner->current_conn] == conn)
break;
combiner->current_conn++;
}
Assert(combiner->current_conn < combiner->conn_count);
/*
* Buffer data rows until Datanode return number of rows specified by the
* fetch_size parameter of last Execute message (PortalSuspended message)
* or end of result set is reached (CommandComplete message)
*/
while (conn->state == DN_CONNECTION_STATE_QUERY)
{
int res;
/* Move to buffer currentRow (received from the Datanode) */
if (combiner->currentRow.msg)
{
RemoteDataRow dataRow = (RemoteDataRow) palloc(sizeof(RemoteDataRowData));
*dataRow = combiner->currentRow;
combiner->currentRow.msg = NULL;
combiner->currentRow.msglen = 0;
combiner->currentRow.msgnode = 0;
combiner->rowBuffer = lappend(combiner->rowBuffer, dataRow);
}
res = handle_response(conn, combiner);
/*
* If response message is a DataRow it will be handled on the next
* iteration.
* PortalSuspended will cause connection state change and break the loop
* The same is for CommandComplete, but we need additional handling -
* remove connection from the list of active connections.
* We may need to add handling error response
*/
if (res == RESPONSE_EOF)
{
/* incomplete message, read more */
if (pgxc_node_receive(1, &conn, NULL))
{
conn->state = DN_CONNECTION_STATE_ERROR_FATAL;
add_error_message(conn, "Failed to fetch from Datanode");
}
}
else if (res == RESPONSE_COMPLETE)
{
/* Remove current connection, move last in-place, adjust current_conn */
if (combiner->current_conn < --combiner->conn_count)
combiner->connections[combiner->current_conn] = combiner->connections[combiner->conn_count];
else
combiner->current_conn = 0;
}
/*
* Before output RESPONSE_COMPLETE or PORTAL_SUSPENDED handle_response()
* changes connection state to DN_CONNECTION_STATE_IDLE, breaking the
* loop. We do not need to do anything specific in case of
* PORTAL_SUSPENDED so skiping "else if" block for that case
*/
}
MemoryContextSwitchTo(oldcontext);
conn->combiner = NULL;
}
/*
* copy the datarow from combiner to the given slot, in the slot's memory
* context
*/
static void
CopyDataRowTupleToSlot(RemoteQueryState *combiner, TupleTableSlot *slot)
{
char *msg;
MemoryContext oldcontext;
oldcontext = MemoryContextSwitchTo(slot->tts_mcxt);
msg = (char *)palloc(combiner->currentRow.msglen);
memcpy(msg, combiner->currentRow.msg, combiner->currentRow.msglen);
ExecStoreDataRowTuple(msg, combiner->currentRow.msglen,
combiner->currentRow.msgnode, slot, true);
pfree(combiner->currentRow.msg);
combiner->currentRow.msg = NULL;
combiner->currentRow.msglen = 0;
combiner->currentRow.msgnode = 0;
MemoryContextSwitchTo(oldcontext);
}
/*
* Get next data row from the combiner's buffer into provided slot
* Just clear slot and return false if buffer is empty, that means end of result
* set is reached
*/
bool
FetchTuple(RemoteQueryState *combiner, TupleTableSlot *slot)
{
bool have_tuple = false;
/* If we have message in the buffer, consume it */
if (combiner->currentRow.msg)
{
CopyDataRowTupleToSlot(combiner, slot);
have_tuple = true;
}
/*
* Note: If we are fetching not sorted results we can not have both
* currentRow and buffered rows. When connection is buffered currentRow
* is moved to buffer, and then it is cleaned after buffering is
* completed. Afterwards rows will be taken from the buffer bypassing
* currentRow until buffer is empty, and only after that data are read
* from a connection.
* PGXCTODO: the message should be allocated in the same memory context as
* that of the slot. Are we sure of that in the call to
* ExecStoreDataRowTuple below? If one fixes this memory issue, please
* consider using CopyDataRowTupleToSlot() for the same.
*/
if (list_length(combiner->rowBuffer) > 0)
{
RemoteDataRow dataRow = (RemoteDataRow) linitial(combiner->rowBuffer);
combiner->rowBuffer = list_delete_first(combiner->rowBuffer);
ExecStoreDataRowTuple(dataRow->msg, dataRow->msglen, dataRow->msgnode,
slot, true);
pfree(dataRow);
return true;
}
while (combiner->conn_count > 0)
{
int res;
PGXCNodeHandle *conn = combiner->connections[combiner->current_conn];
/* Going to use a connection, buffer it if needed */
if (conn->state == DN_CONNECTION_STATE_QUERY && conn->combiner != NULL
&& conn->combiner != combiner)
BufferConnection(conn);
/*
* If current connection is idle it means portal on the Datanode is
* suspended. If we have a tuple do not hurry to request more rows,
* leave connection clean for other RemoteQueries.
* If we do not have, request more and try to get it
*/
if (conn->state == DN_CONNECTION_STATE_IDLE)
{
/*
* If we have tuple to return do not hurry to request more, keep
* connection clean
*/
if (have_tuple)
return true;
else
{
if (pgxc_node_send_execute(conn, combiner->cursor, 1) != 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to fetch from Datanode")));
if (pgxc_node_send_sync(conn) != 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to fetch from Datanode")));
if (pgxc_node_receive(1, &conn, NULL))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to fetch from Datanode")));
conn->combiner = combiner;
}
}
/* read messages */
res = handle_response(conn, combiner);
if (res == RESPONSE_EOF)
{
/* incomplete message, read more */
if (pgxc_node_receive(1, &conn, NULL))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to fetch from Datanode")));
continue;
}
else if (res == RESPONSE_SUSPENDED)
{
/* Make next connection current */
if (++combiner->current_conn >= combiner->conn_count)
combiner->current_conn = 0;
}
else if (res == RESPONSE_COMPLETE)
{
/* Remove current connection, move last in-place, adjust current_conn */
if (combiner->current_conn < --combiner->conn_count)
combiner->connections[combiner->current_conn] = combiner->connections[combiner->conn_count];
else
combiner->current_conn = 0;
}
else if (res == RESPONSE_DATAROW && have_tuple)
{
/*
* We already have a tuple and received another one, leave it till
* next fetch
*/
return true;
}
/* If we have message in the buffer, consume it */
if (combiner->currentRow.msg)
{
CopyDataRowTupleToSlot(combiner, slot);
have_tuple = true;
}
}
/* report end of data to the caller */
if (!have_tuple)
ExecClearTuple(slot);
return have_tuple;
}
/*
* Handle responses from the Datanode connections
*/
static int
pgxc_node_receive_responses(const int conn_count, PGXCNodeHandle ** connections,
struct timeval * timeout, RemoteQueryState *combiner)
{
int count = conn_count;
PGXCNodeHandle *to_receive[conn_count];
/* make a copy of the pointers to the connections */
memcpy(to_receive, connections, conn_count * sizeof(PGXCNodeHandle *));
/*
* Read results.
* Note we try and read from Datanode connections even if there is an error on one,
* so as to avoid reading incorrect results on the next statement.
* Other safegaurds exist to avoid this, however.
*/
while (count > 0)
{
int i = 0;
if (pgxc_node_receive(count, to_receive, timeout))
return EOF;
while (i < count)
{
int result = handle_response(to_receive[i], combiner);
switch (result)
{
case RESPONSE_EOF: /* have something to read, keep receiving */
i++;
break;
case RESPONSE_COMPLETE:
case RESPONSE_COPY:
/* Handling is done, do not track this connection */
count--;
/* Move last connection in place */
if (i < count)
to_receive[i] = to_receive[count];
break;
default:
/* Inconsistent responses */
add_error_message(to_receive[i], "Unexpected response from the Datanodes");
elog(ERROR, "Unexpected response from the Datanodes, result = %d, request type %d", result, combiner->request_type);
/* Stop tracking and move last connection in place */
count--;
if (i < count)
to_receive[i] = to_receive[count];
}
}
}
pgxc_node_report_error(combiner);
return 0;
}
/*
* Read next message from the connection and update the combiner accordingly
* If we are in an error state we just consume the messages, and do not proxy
* Long term, we should look into cancelling executing statements
* and closing the connections.
* Return values:
* RESPONSE_EOF - need to receive more data for the connection
* RESPONSE_COMPLETE - done with the connection
* RESPONSE_TUPLEDESC - got tuple description
* RESPONSE_DATAROW - got data row
* RESPONSE_COPY - got copy response
* RESPONSE_BARRIER_OK - barrier command completed successfully
*/
int
handle_response(PGXCNodeHandle * conn, RemoteQueryState *combiner)
{
char *msg;
int msg_len;
char msg_type;
bool suspended = false;
for (;;)
{
Assert(conn->state != DN_CONNECTION_STATE_IDLE);
/*
* If we are in the process of shutting down, we
* may be rolling back, and the buffer may contain other messages.
* We want to avoid a procarray exception
* as well as an error stack overflow.
*/
if (proc_exit_inprogress)
conn->state = DN_CONNECTION_STATE_ERROR_FATAL;
/* don't read from from the connection if there is a fatal error */
if (conn->state == DN_CONNECTION_STATE_ERROR_FATAL)
return RESPONSE_COMPLETE;
/* No data available, exit */
if (!HAS_MESSAGE_BUFFERED(conn))
return RESPONSE_EOF;
Assert(conn->combiner == combiner || conn->combiner == NULL);
/* TODO handle other possible responses */
msg_type = get_message(conn, &msg_len, &msg);
switch (msg_type)
{
case '\0': /* Not enough data in the buffer */
return RESPONSE_EOF;
case 'c': /* CopyToCommandComplete */
HandleCopyOutComplete(combiner);
break;
case 'C': /* CommandComplete */
HandleCommandComplete(combiner, msg, msg_len, conn);
break;
case 'T': /* RowDescription */
#ifdef DN_CONNECTION_DEBUG
Assert(!conn->have_row_desc);
conn->have_row_desc = true;
#endif
if (HandleRowDescription(combiner, msg, msg_len))
return RESPONSE_TUPDESC;
break;
case 'D': /* DataRow */
#ifdef DN_CONNECTION_DEBUG
Assert(conn->have_row_desc);
#endif
HandleDataRow(combiner, msg, msg_len, conn->nodeoid);
return RESPONSE_DATAROW;
case 's': /* PortalSuspended */
suspended = true;
break;
case '1': /* ParseComplete */
case '2': /* BindComplete */
case '3': /* CloseComplete */
case 'n': /* NoData */
/* simple notifications, continue reading */
break;
case 'G': /* CopyInResponse */
conn->state = DN_CONNECTION_STATE_COPY_IN;
HandleCopyIn(combiner);
/* Done, return to caller to let it know the data can be passed in */
return RESPONSE_COPY;
case 'H': /* CopyOutResponse */
conn->state = DN_CONNECTION_STATE_COPY_OUT;
HandleCopyOut(combiner);
return RESPONSE_COPY;
case 'd': /* CopyOutDataRow */
conn->state = DN_CONNECTION_STATE_COPY_OUT;
HandleCopyDataRow(combiner, msg, msg_len);
break;
case 'E': /* ErrorResponse */
HandleError(combiner, msg, msg_len);
add_error_message(conn, combiner->errorMessage);
/*
* Do not return with an error, we still need to consume Z,
* ready-for-query
*/
break;
case 'A': /* NotificationResponse */
case 'N': /* NoticeResponse */
case 'S': /* SetCommandComplete */
/*
* Ignore these to prevent multiple messages, one from each
* node. Coordinator will send one for DDL anyway
*/
break;
case 'Z': /* ReadyForQuery */
{
/*
* Return result depends on previous connection state.
* If it was PORTAL_SUSPENDED Coordinator want to send down
* another EXECUTE to fetch more rows, otherwise it is done
* with the connection
*/
int result = suspended ? RESPONSE_SUSPENDED : RESPONSE_COMPLETE;
conn->transaction_status = msg[0];
conn->state = DN_CONNECTION_STATE_IDLE;
conn->combiner = NULL;
#ifdef DN_CONNECTION_DEBUG
conn->have_row_desc = false;
#endif
return result;
}
case 'M': /* Command Id */
HandleDatanodeCommandId(combiner, msg, msg_len);
break;
case 'b':
conn->state = DN_CONNECTION_STATE_IDLE;
return RESPONSE_BARRIER_OK;
case 'I': /* EmptyQuery */
default:
/* sync lost? */
elog(WARNING, "Received unsupported message type: %c", msg_type);
conn->state = DN_CONNECTION_STATE_ERROR_FATAL;
/* stop reading */
return RESPONSE_COMPLETE;
}
}
/* never happen, but keep compiler quiet */
return RESPONSE_EOF;
}
/*
* Has the Datanode sent Ready For Query
*/
bool
is_data_node_ready(PGXCNodeHandle * conn)
{
char *msg;
int msg_len;
char msg_type;
for (;;)
{
/*
* If we are in the process of shutting down, we
* may be rolling back, and the buffer may contain other messages.
* We want to avoid a procarray exception
* as well as an error stack overflow.
*/
if (proc_exit_inprogress)
conn->state = DN_CONNECTION_STATE_ERROR_FATAL;
/* don't read from from the connection if there is a fatal error */
if (conn->state == DN_CONNECTION_STATE_ERROR_FATAL)
return true;
/* No data available, exit */
if (!HAS_MESSAGE_BUFFERED(conn))
return false;
msg_type = get_message(conn, &msg_len, &msg);
switch (msg_type)
{
case 's': /* PortalSuspended */
break;
case 'Z': /* ReadyForQuery */
/*
* Return result depends on previous connection state.
* If it was PORTAL_SUSPENDED Coordinator want to send down
* another EXECUTE to fetch more rows, otherwise it is done
* with the connection
*/
conn->transaction_status = msg[0];
conn->state = DN_CONNECTION_STATE_IDLE;
conn->combiner = NULL;
return true;
}
}
/* never happen, but keep compiler quiet */
return false;
}
/*
* Construct a BEGIN TRANSACTION command after taking into account the
* current options. The returned string is not palloced and is valid only until
* the next call to the function.
*/
static char *
generate_begin_command(void)
{
static char begin_cmd[1024];
const char *read_only;
const char *isolation_level;
/*
* First get the READ ONLY status because the next call to GetConfigOption
* will overwrite the return buffer
*/
if (strcmp(GetConfigOption("transaction_read_only", false, false), "on") == 0)
read_only = "READ ONLY";
else
read_only = "READ WRITE";
/* Now get the isolation_level for the transaction */
isolation_level = GetConfigOption("transaction_isolation", false, false);
if (strcmp(isolation_level, "default") == 0)
isolation_level = GetConfigOption("default_transaction_isolation", false, false);
/* Finally build a START TRANSACTION command */
sprintf(begin_cmd, "START TRANSACTION ISOLATION LEVEL %s %s", isolation_level, read_only);
return begin_cmd;
}
/*
* Send BEGIN command to the Datanodes or Coordinators and receive responses.
* Also send the GXID for the transaction.
*/
static int
pgxc_node_begin(int conn_count, PGXCNodeHandle **connections,
GlobalTransactionId gxid, bool need_tran_block,
bool readOnly, char node_type)
{
int i;
struct timeval *timeout = NULL;
RemoteQueryState *combiner;
TimestampTz timestamp = GetCurrentGTMStartTimestamp();
PGXCNodeHandle *new_connections[conn_count];
int new_count = 0;
int con[conn_count];
int j = 0;
/*
* If no remote connections, we don't have anything to do
*/
if (conn_count == 0)
return 0;
for (i = 0; i < conn_count; i++)
{
/*
* If the node is already a participant in the transaction, skip it
*/
if (list_member(XactReadNodes, connections[i]) ||
list_member(XactWriteNodes, connections[i]))
{
/*
* If we are doing a write operation, we may need to shift the node
* to the write-list. RegisterTransactionNodes does that for us
*/
if (!readOnly)
RegisterTransactionNodes(1, (void **)&connections[i], true);
continue;
}
/*
* PGXC TODO - A connection should not be in DN_CONNECTION_STATE_QUERY
* state when we are about to send a BEGIN TRANSACTION command to the
* node. We should consider changing the following to an assert and fix
* any bugs reported
*/
if (connections[i]->state == DN_CONNECTION_STATE_QUERY)
BufferConnection(connections[i]);
/* Send GXID and check for errors */
if (GlobalTransactionIdIsValid(gxid) && pgxc_node_send_gxid(connections[i], gxid))
return EOF;
/* Send timestamp and check for errors */
if (GlobalTimestampIsValid(timestamp) && pgxc_node_send_timestamp(connections[i], timestamp))
return EOF;
/* Send BEGIN */
if (need_tran_block)
{
/* Send the BEGIN TRANSACTION command and check for errors */
if (pgxc_node_send_query(connections[i], generate_begin_command()))
return EOF;
con[j++] = PGXCNodeGetNodeId(connections[i]->nodeoid, node_type);
/*
* Register the node as a participant in the transaction. The
* caller should tell us if the node may do any write activitiy
*
* XXX This is a bit tricky since it would be difficult to know if
* statement has any side effect on the Datanode. So a SELECT
* statement may invoke a function on the Datanode which may end up
* modifying the data at the Datanode. We can possibly rely on the
* function qualification to decide if a statement is a read-only or a
* read-write statement.
*/
RegisterTransactionNodes(1, (void **)&connections[i], !readOnly);
new_connections[new_count++] = connections[i];
}
}
/*
* If we did not send a BEGIN command to any node, we are done. Otherwise,
* we need to check for any errors and report them
*/
if (new_count == 0)
return 0;
combiner = CreateResponseCombiner(new_count, COMBINE_TYPE_NONE);
/* Receive responses */
if (pgxc_node_receive_responses(new_count, new_connections, timeout, combiner))
return EOF;
/* Verify status */
if (!ValidateAndCloseCombiner(combiner))
return EOF;
/*
* Ask pooler to send commands (if any) to nodes involved in transaction to alter the
* behavior of current transaction. This fires all transaction level commands before
* issuing any DDL, DML or SELECT within the current transaction block.
*/
if (GetCurrentLocalParamStatus())
{
int res;
if (node_type == PGXC_NODE_DATANODE)
res = PoolManagerSendLocalCommand(j, con, 0, NULL);
else
res = PoolManagerSendLocalCommand(0, NULL, j, con);
if (res != 0)
return EOF;
}
/* No problem, let's get going */
return 0;
}
/*
* Prepare all remote nodes involved in this transaction. The local node is
* handled separately and prepared first in xact.c. If there is any error
* during this phase, it will be reported via ereport() and the transaction
* will be aborted on the local as well as remote nodes
*
* prepareGID is created and passed from xact.c
*/
static bool
pgxc_node_remote_prepare(char *prepareGID)
{
int result = 0;
int write_conn_count = remoteXactState.numWriteRemoteNodes;
char prepare_cmd[256];
int i;
PGXCNodeHandle **connections = remoteXactState.remoteNodeHandles;
RemoteQueryState *combiner = NULL;
/*
* If there is NO write activity or the caller does not want us to run a
* 2PC protocol, we don't need to do anything special
*/
if ((write_conn_count == 0) || (prepareGID == NULL))
return false;
SetSendCommandId(false);
/* Save the prepareGID in the global state information */
sprintf(remoteXactState.prepareGID, "%s", prepareGID);
/* Generate the PREPARE TRANSACTION command */
sprintf(prepare_cmd, "PREPARE TRANSACTION '%s'", remoteXactState.prepareGID);
for (i = 0; i < write_conn_count; i++)
{
/*
* PGXCTODO - We should actually make sure that the connection state is
* IDLE when we reach here. The executor should have guaranteed that
* before the transaction gets to the commit point. For now, consume
* the pending data on the connection
*/
if (connections[i]->state != DN_CONNECTION_STATE_IDLE)
BufferConnection(connections[i]);
/* Clean the previous errors, if any */
connections[i]->error = NULL;
/*
* Now we are ready to PREPARE the transaction. Any error at this point
* can be safely ereport-ed and the transaction will be aborted.
*/
if (pgxc_node_send_query(connections[i], prepare_cmd))
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARE_FAILED;
remoteXactState.status = RXACT_PREPARE_FAILED;
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to send PREPARE TRANSACTION command to "
"the node %u", connections[i]->nodeoid)));
}
else
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARE_SENT;
/* Let the HandleCommandComplete know response checking is enable */
connections[i]->ck_resp_rollback = RESP_ROLLBACK_CHECK;
}
}
/*
* Receive and check for any errors. In case of errors, we don't bail out
* just yet. We first go through the list of connections and look for
* errors on each connection. This is important to ensure that we run
* an appropriate ROLLBACK command later on (prepared transactions must be
* rolled back with ROLLBACK PREPARED commands).
*
* PGXCTODO - There doesn't seem to be a solid mechanism to track errors on
* individual connections. The transaction_status field doesn't get set
* every time there is an error on the connection. The combiner mechanism is
* good for parallel proessing, but I think we should have a leak-proof
* mechanism to track connection status
*/
if (write_conn_count)
{
combiner = CreateResponseCombiner(write_conn_count, COMBINE_TYPE_NONE);
/* Receive responses */
result = pgxc_node_receive_responses(write_conn_count, connections, NULL, combiner);
if (result || !validate_combiner(combiner))
result = EOF;
else
{
CloseCombiner(combiner);
combiner = NULL;
}
for (i = 0; i < write_conn_count; i++)
{
if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_PREPARE_SENT)
{
if (connections[i]->error)
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARE_FAILED;
remoteXactState.status = RXACT_PREPARE_FAILED;
}
else
{
/* Did we receive ROLLBACK in response to PREPARE TRANSCATION? */
if (connections[i]->ck_resp_rollback == RESP_ROLLBACK_RECEIVED)
{
/* If yes, it means PREPARE TRANSACTION failed */
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARE_FAILED;
remoteXactState.status = RXACT_PREPARE_FAILED;
result = 0;
}
else
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARED;
}
}
}
}
}
/*
* If we failed to PREPARE on one or more nodes, report an error and let
* the normal abort processing take charge of aborting the transaction
*/
if (result)
{
remoteXactState.status = RXACT_PREPARE_FAILED;
if (combiner)
pgxc_node_report_error(combiner);
else
elog(ERROR, "failed to PREPARE transaction on one or more nodes");
}
if (remoteXactState.status == RXACT_PREPARE_FAILED)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to PREPARE the transaction on one or more nodes")));
/* Everything went OK. */
remoteXactState.status = RXACT_PREPARED;
return result;
}
/*
* Commit a running or a previously PREPARED transaction on the remote nodes.
* The local transaction is handled separately in xact.c
*
* Once a COMMIT command is sent to any node, the transaction must be finally
* be committed. But we still report errors via ereport and let
* AbortTransaction take care of handling partly committed transactions.
*
* For 2PC transactions: If local node is involved in the transaction, its
* already prepared locally and we are in a context of a different transaction
* (we call it auxulliary transaction) already. So AbortTransaction will
* actually abort the auxilliary transaction, which is OK. OTOH if the local
* node is not involved in the main transaction, then we don't care much if its
* rolled back on the local node as part of abort processing.
*
* When 2PC is not used for reasons such as because transaction has accessed
* some temporary objects, we are already exposed to the risk of committing it
* one node and aborting on some other node. So such cases need not get more
* attentions.
*/
static void
pgxc_node_remote_commit(void)
{
int result = 0;
char commitPrepCmd[256];
char commitCmd[256];
int write_conn_count = remoteXactState.numWriteRemoteNodes;
int read_conn_count = remoteXactState.numReadRemoteNodes;
PGXCNodeHandle **connections = remoteXactState.remoteNodeHandles;
PGXCNodeHandle *new_connections[write_conn_count + read_conn_count];
int new_conn_count = 0;
int i;
RemoteQueryState *combiner = NULL;
/*
* We must handle reader and writer connections both since the transaction
* must be closed even on a read-only node
*/
if (read_conn_count + write_conn_count == 0)
return;
SetSendCommandId(false);
/*
* Barrier:
*
* We should acquire the BarrierLock in SHARE mode here to ensure that
* there are no in-progress barrier at this point. This mechanism would
* work as long as LWLock mechanism does not starve a EXCLUSIVE lock
* requester
*/
LWLockAcquire(BarrierLock, LW_SHARED);
/*
* The readers can be committed with a simple COMMIT command. We still need
* this to close the transaction block
*/
sprintf(commitCmd, "COMMIT TRANSACTION");
/*
* If we are running 2PC, construct a COMMIT command to commit the prepared
* transactions
*/
if (remoteXactState.status == RXACT_PREPARED)
{
sprintf(commitPrepCmd, "COMMIT PREPARED '%s'", remoteXactState.prepareGID);
/*
* If the local node is involved in the transaction, we would have
* already prepared it and started a new transaction. We can use the
* GXID of the new transaction to run the COMMIT PREPARED commands.
* So get an auxilliary GXID only if the local node is not involved
*/
if (!GlobalTransactionIdIsValid(remoteXactState.commitXid))
remoteXactState.commitXid = GetAuxilliaryTransactionId();
if(!GlobalTransactionIdIsValid(remoteXactState.commitXid))
{
remoteXactState.status = RXACT_COMMIT_FAILED;
ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to set commitXid for COMMIT PREPARED command")));
}
}
/*
* First send GXID if necessary. If there is an error at this stage, the
* transaction can be aborted safely because we haven't yet sent COMMIT
* command to any participant
*/
for (i = 0; i < write_conn_count + read_conn_count; i++)
{
if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_PREPARED)
{
Assert(GlobalTransactionIdIsValid(remoteXactState.commitXid));
if (pgxc_node_send_gxid(connections[i], remoteXactState.commitXid))
{
remoteXactState.status = RXACT_COMMIT_FAILED;
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to send GXID for COMMIT PREPARED "
"command")));
}
}
}
/*
* Now send the COMMIT command to all the participants
*/
for (i = 0; i < write_conn_count + read_conn_count; i++)
{
const char *command;
Assert(remoteXactState.remoteNodeStatus[i] == RXACT_NODE_PREPARED ||
remoteXactState.remoteNodeStatus[i] == RXACT_NODE_NONE);
if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_PREPARED)
command = commitPrepCmd;
else
command = commitCmd;
/* Clean the previous errors, if any */
connections[i]->error = NULL;
if (pgxc_node_send_query(connections[i], command))
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_COMMIT_FAILED;
remoteXactState.status = RXACT_COMMIT_FAILED;
/*
* If the error occurred on the first connection, we still have
* chance to abort the whole transaction. We prefer that because
* that reduces the need for any manual intervention at least until
* we have a automatic mechanism to resolve in-doubt transactions
*
* XXX We can ideally check for first writer connection, but keep
* it simple for now
*/
if (i == 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to send COMMIT command to node")));
else
add_error_message(connections[i], "failed to send COMMIT "
"command to node");
}
else
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_COMMIT_SENT;
new_connections[new_conn_count++] = connections[i];
}
}
/*
* Release the BarrierLock.
*/
LWLockRelease(BarrierLock);
if (new_conn_count)
{
combiner = CreateResponseCombiner(new_conn_count, COMBINE_TYPE_NONE);
/* Receive responses */
result = pgxc_node_receive_responses(new_conn_count, new_connections, NULL, combiner);
if (result || !validate_combiner(combiner))
result = EOF;
else
{
CloseCombiner(combiner);
combiner = NULL;
}
/*
* Even if the command failed on some node, don't throw an error just
* yet. That gives a chance to look for individual connection status
* and record appropriate information for later recovery
*
* XXX A node once prepared must be able to either COMMIT or ABORT. So a
* COMMIT can fail only because of either communication error or because
* the node went down. Even if one node commits, the transaction must be
* eventually committed on all the nodes.
*/
/* At this point, we must be in one the following state */
Assert(remoteXactState.status == RXACT_COMMIT_FAILED ||
remoteXactState.status == RXACT_PREPARED ||
remoteXactState.status == RXACT_NONE);
/*
* Go through every connection and check if COMMIT succeeded or failed on
* that connection. If the COMMIT has failed on one node, but succeeded on
* some other, such transactions need special attention (by the
* administrator for now)
*/
for (i = 0; i < write_conn_count + read_conn_count; i++)
{
if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_COMMIT_SENT)
{
if (connections[i]->error)
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_COMMIT_FAILED;
if (remoteXactState.status != RXACT_PART_COMMITTED)
remoteXactState.status = RXACT_COMMIT_FAILED;
}
else
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_COMMITTED;
if (remoteXactState.status == RXACT_COMMIT_FAILED)
remoteXactState.status = RXACT_PART_COMMITTED;
}
}
}
}
if (result)
{
if (combiner)
pgxc_node_report_error(combiner);
else
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to COMMIT the transaction on one or more nodes")));
}
if (remoteXactState.status == RXACT_COMMIT_FAILED ||
remoteXactState.status == RXACT_PART_COMMITTED)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to COMMIT the transaction on one or more nodes")));
remoteXactState.status = RXACT_COMMITTED;
}
/*
* Abort the current transaction on the local and remote nodes. If the
* transaction is prepared on the remote node, we send a ROLLBACK PREPARED
* command, otherwise a ROLLBACK command is sent.
*
* Note that if the local node was involved and prepared successfully, we are
* running in a separate transaction context right now
*/
static void
pgxc_node_remote_abort(void)
{
int result = 0;
char *rollbackCmd = "ROLLBACK TRANSACTION";
char rollbackPrepCmd[256];
int write_conn_count = remoteXactState.numWriteRemoteNodes;
int read_conn_count = remoteXactState.numReadRemoteNodes;
int i;
PGXCNodeHandle **connections = remoteXactState.remoteNodeHandles;
PGXCNodeHandle *new_connections[remoteXactState.numWriteRemoteNodes + remoteXactState.numReadRemoteNodes];
int new_conn_count = 0;
RemoteQueryState *combiner = NULL;
SetSendCommandId(false);
/* Send COMMIT/ROLLBACK PREPARED TRANSACTION to the remote nodes */
for (i = 0; i < write_conn_count + read_conn_count; i++)
{
RemoteXactNodeStatus status = remoteXactState.remoteNodeStatus[i];
/* Clean the previous errors, if any */
connections[i]->error = NULL;
if ((status == RXACT_NODE_PREPARED) ||
(status == RXACT_NODE_PREPARE_SENT))
{
sprintf(rollbackPrepCmd, "ROLLBACK PREPARED '%s'", remoteXactState.prepareGID);
if (!GlobalTransactionIdIsValid(remoteXactState.commitXid))
remoteXactState.commitXid = GetAuxilliaryTransactionId();
/* Do not report error to avoid infinite loop of errors */
if (!GlobalTransactionIdIsValid(remoteXactState.commitXid))
{
ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to set commitXid for ROLLBACK PREPARED command")));
return;
}
Assert(GlobalTransactionIdIsValid(remoteXactState.commitXid));
if (pgxc_node_send_gxid(connections[i], remoteXactState.commitXid))
{
add_error_message(connections[i], "failed to send GXID for "
"ROLLBACK PREPARED command");
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_FAILED;
remoteXactState.status = RXACT_ABORT_FAILED;
}
else if (pgxc_node_send_query(connections[i], rollbackPrepCmd))
{
add_error_message(connections[i], "failed to send ROLLBACK PREPARED "
"TRANSACTION command to node");
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_FAILED;
remoteXactState.status = RXACT_ABORT_FAILED;
}
else
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_SENT;
new_connections[new_conn_count++] = connections[i];
}
}
else
{
if (pgxc_node_send_query(connections[i], rollbackCmd))
{
add_error_message(connections[i], "failed to send ROLLBACK "
"TRANSACTION command to node");
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_FAILED;
remoteXactState.status = RXACT_ABORT_FAILED;
}
else
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_SENT;
new_connections[new_conn_count++] = connections[i];
}
}
}
if (new_conn_count)
{
combiner = CreateResponseCombiner(new_conn_count, COMBINE_TYPE_NONE);
/* Receive responses */
result = pgxc_node_receive_responses(new_conn_count, new_connections, NULL, combiner);
if (result || !validate_combiner(combiner))
result = EOF;
else
{
CloseCombiner(combiner);
combiner = NULL;
}
for (i = 0; i < write_conn_count + read_conn_count; i++)
{
if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_ABORT_SENT)
{
if (connections[i]->error)
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_FAILED;
if (remoteXactState.status != RXACT_PART_ABORTED)
remoteXactState.status = RXACT_ABORT_FAILED;
elog(LOG, "Failed to ABORT at node %d\nDetail: %s",
connections[i]->nodeoid, connections[i]->error);
}
else
{
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORTED;
if (remoteXactState.status == RXACT_ABORT_FAILED)
remoteXactState.status = RXACT_PART_ABORTED;
}
}
}
}
if (result)
{
if (combiner)
pgxc_node_report_error(combiner);
else
elog(LOG, "Failed to ABORT an implicitly PREPARED "
"transaction - result %d", result);
}
/*
* Don't ereport because we might already been abort processing and any
* error at this point can lead to infinite recursion
*
* XXX How do we handle errors reported by internal functions used to
* communicate with remote nodes ?
*/
if (remoteXactState.status == RXACT_ABORT_FAILED ||
remoteXactState.status == RXACT_PART_ABORTED)
elog(LOG, "Failed to ABORT an implicitly PREPARED transaction "
"status - %d", remoteXactState.status);
else
remoteXactState.status = RXACT_ABORTED;
return;
}
/*
* Begin COPY command
* The copy_connections array must have room for requested number of items.
* The function can not deal with mix of coordinators and datanodes.
*/
PGXCNodeHandle**
pgxcNodeCopyBegin(const char *query, List *nodelist, Snapshot snapshot, char node_type)
{
int i;
int default_node_count = (node_type == PGXC_NODE_DATANODE) ?
NumDataNodes : NumCoords;
int conn_count = list_length(nodelist) == 0 ? default_node_count : list_length(nodelist);
struct timeval *timeout = NULL;
PGXCNodeAllHandles *pgxc_handles;
PGXCNodeHandle **connections;
PGXCNodeHandle **copy_connections;
ListCell *nodeitem;
bool need_tran_block;
GlobalTransactionId gxid;
RemoteQueryState *combiner;
Assert(node_type == PGXC_NODE_DATANODE || node_type == PGXC_NODE_COORDINATOR);
if (conn_count == 0)
return NULL;
/* Get needed Datanode connections */
if (node_type == PGXC_NODE_DATANODE)
{
pgxc_handles = get_handles(nodelist, NULL, false);
connections = pgxc_handles->datanode_handles;
}
else
{
pgxc_handles = get_handles(NULL, nodelist, true);
connections = pgxc_handles->coord_handles;
}
if (!connections)
return NULL;
/*
* If more than one nodes are involved or if we are already in a
* transaction block, we must the remote statements in a transaction block
*/
need_tran_block = (conn_count > 1) || (TransactionBlockStatusCode() == 'T');
elog(DEBUG1, "conn_count = %d, need_tran_block = %s", conn_count,
need_tran_block ? "true" : "false");
/*
* We need to be able quickly find a connection handle for specified node number,
* So store connections in an array where index is node-1.
* Unused items in the array should be NULL
*/
copy_connections = (PGXCNodeHandle **) palloc0(default_node_count * sizeof(PGXCNodeHandle *));
i = 0;
foreach(nodeitem, nodelist)
copy_connections[lfirst_int(nodeitem)] = connections[i++];
gxid = GetCurrentTransactionId();
if (!GlobalTransactionIdIsValid(gxid))
{
pfree_pgxc_all_handles(pgxc_handles);
pfree(copy_connections);
return NULL;
}
/* Start transaction on connections where it is not started */
if (pgxc_node_begin(conn_count, connections, gxid, need_tran_block, false, node_type))
{
pfree_pgxc_all_handles(pgxc_handles);
pfree(copy_connections);
return NULL;
}
/* Send query to nodes */
for (i = 0; i < conn_count; i++)
{
if (connections[i]->state == DN_CONNECTION_STATE_QUERY)
BufferConnection(connections[i]);
if (snapshot && pgxc_node_send_snapshot(connections[i], snapshot))
{
add_error_message(connections[i], "Can not send request");
pfree_pgxc_all_handles(pgxc_handles);
pfree(copy_connections);
return NULL;
}
if (pgxc_node_send_query(connections[i], query) != 0)
{
add_error_message(connections[i], "Can not send request");
pfree_pgxc_all_handles(pgxc_handles);
pfree(copy_connections);
return NULL;
}
}
/*
* We are expecting CopyIn response, but do not want to send it to client,
* caller should take care about this, because here we do not know if
* client runs console or file copy
*/
combiner = CreateResponseCombiner(conn_count, COMBINE_TYPE_NONE);
/* Receive responses */
if (pgxc_node_receive_responses(conn_count, connections, timeout, combiner)
|| !ValidateAndCloseCombiner(combiner))
{
pgxcNodeCopyFinish(connections, -1, COMBINE_TYPE_NONE, PGXC_NODE_DATANODE);
pfree(connections);
pfree(copy_connections);
return NULL;
}
pfree(connections);
return copy_connections;
}
/*
* Send a data row to the specified nodes
*/
int
DataNodeCopyIn(char *data_row, int len, ExecNodes *exec_nodes, PGXCNodeHandle** copy_connections)
{
PGXCNodeHandle *primary_handle = NULL;
ListCell *nodeitem;
/* size + data row + \n */
int msgLen = 4 + len + 1;
int nLen = htonl(msgLen);
if (exec_nodes->primarynodelist)
{
primary_handle = copy_connections[lfirst_int(list_head(exec_nodes->primarynodelist))];
}
if (primary_handle)
{
if (primary_handle->state == DN_CONNECTION_STATE_COPY_IN)
{
/* precalculate to speed up access */
int bytes_needed = primary_handle->outEnd + 1 + msgLen;
/* flush buffer if it is almost full */
if (bytes_needed > COPY_BUFFER_SIZE)
{
/* First look if Datanode has sent a error message */
int read_status = pgxc_node_read_data(primary_handle, true);
if (read_status == EOF || read_status < 0)
{
add_error_message(primary_handle, "failed to read data from Datanode");
return EOF;
}
if (primary_handle->inStart < primary_handle->inEnd)
{
RemoteQueryState *combiner = CreateResponseCombiner(1, COMBINE_TYPE_NONE);
handle_response(primary_handle, combiner);
if (!ValidateAndCloseCombiner(combiner))
return EOF;
}
if (DN_CONNECTION_STATE_ERROR(primary_handle))
return EOF;
if (send_some(primary_handle, primary_handle->outEnd) < 0)
{
add_error_message(primary_handle, "failed to send data to Datanode");
return EOF;
}
else if (primary_handle->outEnd > MAX_SIZE_TO_FORCE_FLUSH)
{
int rv;
if ((rv = flushPGXCNodeHandleData(primary_handle)) != 0)
return EOF;
}
}
if (ensure_out_buffer_capacity(bytes_needed, primary_handle) != 0)
{
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
primary_handle->outBuffer[primary_handle->outEnd++] = 'd';
memcpy(primary_handle->outBuffer + primary_handle->outEnd, &nLen, 4);
primary_handle->outEnd += 4;
memcpy(primary_handle->outBuffer + primary_handle->outEnd, data_row, len);
primary_handle->outEnd += len;
primary_handle->outBuffer[primary_handle->outEnd++] = '\n';
}
else
{
add_error_message(primary_handle, "Invalid Datanode connection");
return EOF;
}
}
foreach(nodeitem, exec_nodes->nodeList)
{
PGXCNodeHandle *handle = copy_connections[lfirst_int(nodeitem)];
if (handle && handle->state == DN_CONNECTION_STATE_COPY_IN)
{
/* precalculate to speed up access */
int bytes_needed = handle->outEnd + 1 + msgLen;
/* flush buffer if it is almost full */
if ((primary_handle && bytes_needed > PRIMARY_NODE_WRITEAHEAD)
|| (!primary_handle && bytes_needed > COPY_BUFFER_SIZE))
{
int to_send = handle->outEnd;
/* First look if Datanode has sent a error message */
int read_status = pgxc_node_read_data(handle, true);
if (read_status == EOF || read_status < 0)
{
add_error_message(handle, "failed to read data from Datanode");
return EOF;
}
if (handle->inStart < handle->inEnd)
{
RemoteQueryState *combiner = CreateResponseCombiner(1, COMBINE_TYPE_NONE);
handle_response(handle, combiner);
if (!ValidateAndCloseCombiner(combiner))
return EOF;
}
if (DN_CONNECTION_STATE_ERROR(handle))
return EOF;
/*
* Allow primary node to write out data before others.
* If primary node was blocked it would not accept copy data.
* So buffer at least PRIMARY_NODE_WRITEAHEAD at the other nodes.
* If primary node is blocked and is buffering, other buffers will
* grow accordingly.
*/
if (primary_handle)
{
if (primary_handle->outEnd + PRIMARY_NODE_WRITEAHEAD < handle->outEnd)
to_send = handle->outEnd - primary_handle->outEnd - PRIMARY_NODE_WRITEAHEAD;
else
to_send = 0;
}
/*
* Try to send down buffered data if we have
*/
if (to_send && send_some(handle, to_send) < 0)
{
add_error_message(handle, "failed to send data to Datanode");
return EOF;
}
else if (handle->outEnd > MAX_SIZE_TO_FORCE_FLUSH)
{
int rv;
if ((rv = flushPGXCNodeHandleData(handle)) != 0)
return EOF;
}
}
if (ensure_out_buffer_capacity(bytes_needed, handle) != 0)
{
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
handle->outBuffer[handle->outEnd++] = 'd';
memcpy(handle->outBuffer + handle->outEnd, &nLen, 4);
handle->outEnd += 4;
memcpy(handle->outBuffer + handle->outEnd, data_row, len);
handle->outEnd += len;
handle->outBuffer[handle->outEnd++] = '\n';
}
else
{
add_error_message(handle, "Invalid Datanode connection");
return EOF;
}
}
return 0;
}
uint64
DataNodeCopyOut(ExecNodes *exec_nodes,
PGXCNodeHandle** copy_connections,
TupleDesc tupleDesc,
FILE* copy_file,
Tuplestorestate *store,
RemoteCopyType remoteCopyType)
{
RemoteQueryState *combiner;
int conn_count = list_length(exec_nodes->nodeList) == 0 ? NumDataNodes : list_length(exec_nodes->nodeList);
ListCell *nodeitem;
uint64 processed;
combiner = CreateResponseCombiner(conn_count, COMBINE_TYPE_SUM);
combiner->processed = 0;
combiner->remoteCopyType = remoteCopyType;
/*
* If there is an existing file where to copy data,
* pass it to combiner when remote COPY output is sent back to file.
*/
if (copy_file && remoteCopyType == REMOTE_COPY_FILE)
combiner->copy_file = copy_file;
if (store && remoteCopyType == REMOTE_COPY_TUPLESTORE)
{
combiner->tuplestorestate = store;
combiner->tuple_desc = tupleDesc;
}
foreach(nodeitem, exec_nodes->nodeList)
{
PGXCNodeHandle *handle = copy_connections[lfirst_int(nodeitem)];
int read_status = 0;
Assert(handle && handle->state == DN_CONNECTION_STATE_COPY_OUT);
/*
* H message has been consumed, continue to manage data row messages.
* Continue to read as long as there is data.
*/
while (read_status >= 0 && handle->state == DN_CONNECTION_STATE_COPY_OUT)
{
if (handle_response(handle,combiner) == RESPONSE_EOF)
{
/* read some extra-data */
read_status = pgxc_node_read_data(handle, true);
if (read_status < 0)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("unexpected EOF on datanode connection")));
else
/*
* Set proper connection status - handle_response
* has changed it to DN_CONNECTION_STATE_QUERY
*/
handle->state = DN_CONNECTION_STATE_COPY_OUT;
}
/* There is no more data that can be read from connection */
}
}
processed = combiner->processed;
if (!ValidateAndCloseCombiner(combiner))
{
if (!PersistentConnections)
release_handles();
pfree(copy_connections);
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Unexpected response from the Datanodes when combining, request type %d", combiner->request_type)));
}
return processed;
}
/*
* Finish copy process on all connections
*/
void
pgxcNodeCopyFinish(PGXCNodeHandle** copy_connections, int primary_dn_index,
CombineType combine_type, char node_type)
{
int i;
RemoteQueryState *combiner = NULL;
bool error = false;
struct timeval *timeout = NULL; /* wait forever */
int default_conn_count = (node_type == PGXC_NODE_DATANODE) ? NumDataNodes : NumCoords;
PGXCNodeHandle **connections = palloc0(sizeof(PGXCNodeHandle *) * default_conn_count);
PGXCNodeHandle *primary_handle = NULL;
int conn_count = 0;
Assert(node_type == PGXC_NODE_DATANODE || node_type == PGXC_NODE_COORDINATOR);
for (i = 0; i < default_conn_count; i++)
{
PGXCNodeHandle *handle = copy_connections[i];
if (!handle)
continue;
if (i == primary_dn_index)
primary_handle = handle;
else
connections[conn_count++] = handle;
}
if (primary_handle)
{
error = true;
if (primary_handle->state == DN_CONNECTION_STATE_COPY_IN || primary_handle->state == DN_CONNECTION_STATE_COPY_OUT)
error = DataNodeCopyEnd(primary_handle, false);
combiner = CreateResponseCombiner(conn_count + 1, combine_type);
error = (pgxc_node_receive_responses(1, &primary_handle, timeout, combiner) != 0) || error;
}
for (i = 0; i < conn_count; i++)
{
PGXCNodeHandle *handle = connections[i];
error = true;
if (handle->state == DN_CONNECTION_STATE_COPY_IN || handle->state == DN_CONNECTION_STATE_COPY_OUT)
error = DataNodeCopyEnd(handle, false);
}
if (!combiner)
combiner = CreateResponseCombiner(conn_count, combine_type);
error = (pgxc_node_receive_responses(conn_count, connections, timeout, combiner) != 0) || error;
if (!ValidateAndCloseCombiner(combiner) || error)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Error while running COPY")));
}
/*
* End copy process on a connection
*/
bool
DataNodeCopyEnd(PGXCNodeHandle *handle, bool is_error)
{
int nLen = htonl(4);
if (handle == NULL)
return true;
/* msgType + msgLen */
if (ensure_out_buffer_capacity(handle->outEnd + 1 + 4, handle) != 0)
return true;
if (is_error)
handle->outBuffer[handle->outEnd++] = 'f';
else
handle->outBuffer[handle->outEnd++] = 'c';
memcpy(handle->outBuffer + handle->outEnd, &nLen, 4);
handle->outEnd += 4;
/* We need response right away, so send immediately */
if (pgxc_node_flush(handle) < 0)
return true;
return false;
}
RemoteQueryState *
ExecInitRemoteQuery(RemoteQuery *node, EState *estate, int eflags)
{
RemoteQueryState *remotestate;
TupleDesc scan_type;
/* RemoteQuery node is the leaf node in the plan tree, just like seqscan */
Assert(innerPlan(node) == NULL);
Assert(outerPlan(node) == NULL);
remotestate = CreateResponseCombiner(0, node->combine_type);
remotestate->ss.ps.plan = (Plan *) node;
remotestate->ss.ps.state = estate;
/*
* Miscellaneous initialisation
*
* create expression context for node
*/
ExecAssignExprContext(estate, &remotestate->ss.ps);
/* Initialise child expressions */
remotestate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->scan.plan.targetlist,
(PlanState *) remotestate);
remotestate->ss.ps.qual = (List *)
ExecInitExpr((Expr *) node->scan.plan.qual,
(PlanState *) remotestate);
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_MARK)));
/* Extract the eflags bits that are relevant for tuplestorestate */
remotestate->eflags = (eflags & (EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD));
/* We anyways have to support REWIND for ReScan */
remotestate->eflags |= EXEC_FLAG_REWIND;
remotestate->eof_underlying = false;
remotestate->tuplestorestate = NULL;
ExecInitResultTupleSlot(estate, &remotestate->ss.ps);
ExecInitScanTupleSlot(estate, &remotestate->ss);
scan_type = ExecTypeFromTL(node->base_tlist, false);
ExecAssignScanType(&remotestate->ss, scan_type);
remotestate->ss.ps.ps_TupFromTlist = false;
/*
* If there are parameters supplied, get them into a form to be sent to the
* Datanodes with bind message. We should not have had done this before.
*/
SetDataRowForExtParams(estate->es_param_list_info, remotestate);
/*
* Initialize result tuple type and projection info.
*/
ExecAssignResultTypeFromTL(&remotestate->ss.ps);
ExecAssignScanProjectionInfo(&remotestate->ss);
if (node->rq_save_command_id)
{
/* Save command id to be used in some special cases */
remotestate->rqs_cmd_id = GetCurrentCommandId(false);
}
return remotestate;
}
/*
* Get Node connections depending on the connection type:
* Datanodes Only, Coordinators only or both types
*/
static PGXCNodeAllHandles *
get_exec_connections(RemoteQueryState *planstate,
ExecNodes *exec_nodes,
RemoteQueryExecType exec_type)
{
List *nodelist = NIL;
List *primarynode = NIL;
List *coordlist = NIL;
PGXCNodeHandle *primaryconnection;
int co_conn_count, dn_conn_count;
bool is_query_coord_only = false;
PGXCNodeAllHandles *pgxc_handles = NULL;
/*
* If query is launched only on Coordinators, we have to inform get_handles
* not to ask for Datanode connections even if list of Datanodes is NIL.
*/
if (exec_type == EXEC_ON_COORDS)
is_query_coord_only = true;
if (exec_nodes)
{
if (exec_nodes->en_expr)
{
/* execution time determining of target Datanodes */
bool isnull;
ExprState *estate = ExecInitExpr(exec_nodes->en_expr,
(PlanState *) planstate);
Datum partvalue = ExecEvalExpr(estate,
planstate->ss.ps.ps_ExprContext,
&isnull,
NULL);
RelationLocInfo *rel_loc_info = GetRelationLocInfo(exec_nodes->en_relid);
/* PGXCTODO what is the type of partvalue here */
ExecNodes *nodes = GetRelationNodes(rel_loc_info,
partvalue,
isnull,
exprType((Node *) exec_nodes->en_expr),
exec_nodes->accesstype);
/*
* en_expr is set by pgxc_set_en_expr only for distributed
* relations while planning DMLs, hence a select for update
* on a replicated table here is an assertion
*/
Assert(!(exec_nodes->accesstype == RELATION_ACCESS_READ_FOR_UPDATE &&
IsRelationReplicated(rel_loc_info)));
if (nodes)
{
nodelist = nodes->nodeList;
primarynode = nodes->primarynodelist;
pfree(nodes);
}
FreeRelationLocInfo(rel_loc_info);
}
else if (OidIsValid(exec_nodes->en_relid))
{
RelationLocInfo *rel_loc_info = GetRelationLocInfo(exec_nodes->en_relid);
ExecNodes *nodes = GetRelationNodes(rel_loc_info, 0, true, InvalidOid, exec_nodes->accesstype);
/*
* en_relid is set only for DMLs, hence a select for update on a
* replicated table here is an assertion
*/
Assert(!(exec_nodes->accesstype == RELATION_ACCESS_READ_FOR_UPDATE &&
IsRelationReplicated(rel_loc_info)));
/* Use the obtained list for given table */
if (nodes)
nodelist = nodes->nodeList;
/*
* Special handling for ROUND ROBIN distributed tables. The target
* node must be determined at the execution time
*/
if (rel_loc_info->locatorType == LOCATOR_TYPE_RROBIN && nodes)
{
nodelist = nodes->nodeList;
primarynode = nodes->primarynodelist;
}
else if (nodes)
{
if (exec_type == EXEC_ON_DATANODES || exec_type == EXEC_ON_ALL_NODES)
{
nodelist = exec_nodes->nodeList;
primarynode = exec_nodes->primarynodelist;
}
}
if (nodes)
pfree(nodes);
FreeRelationLocInfo(rel_loc_info);
}
else
{
if (exec_type == EXEC_ON_DATANODES || exec_type == EXEC_ON_ALL_NODES)
nodelist = exec_nodes->nodeList;
else if (exec_type == EXEC_ON_COORDS)
coordlist = exec_nodes->nodeList;
primarynode = exec_nodes->primarynodelist;
}
}
/* Set node list and DN number */
if (list_length(nodelist) == 0 &&
(exec_type == EXEC_ON_ALL_NODES ||
exec_type == EXEC_ON_DATANODES))
{
/* Primary connection is included in this number of connections if it exists */
dn_conn_count = NumDataNodes;
}
else
{
if (exec_type == EXEC_ON_DATANODES || exec_type == EXEC_ON_ALL_NODES)
{
if (primarynode)
dn_conn_count = list_length(nodelist) + 1;
else
dn_conn_count = list_length(nodelist);
}
else
dn_conn_count = 0;
}
/* Set Coordinator list and Coordinator number */
if ((list_length(nodelist) == 0 && exec_type == EXEC_ON_ALL_NODES) ||
(list_length(coordlist) == 0 && exec_type == EXEC_ON_COORDS))
{
coordlist = GetAllCoordNodes();
co_conn_count = list_length(coordlist);
}
else
{
if (exec_type == EXEC_ON_COORDS)
co_conn_count = list_length(coordlist);
else
co_conn_count = 0;
}
/* Get other connections (non-primary) */
pgxc_handles = get_handles(nodelist, coordlist, is_query_coord_only);
if (!pgxc_handles)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Could not obtain connection from pool")));
/* Get connection for primary node, if used */
if (primarynode)
{
/* Let's assume primary connection is always a Datanode connection for the moment */
PGXCNodeAllHandles *pgxc_conn_res;
pgxc_conn_res = get_handles(primarynode, NULL, false);
/* primary connection is unique */
primaryconnection = pgxc_conn_res->datanode_handles[0];
pfree(pgxc_conn_res);
if (!primaryconnection)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Could not obtain connection from pool")));
pgxc_handles->primary_handle = primaryconnection;
}
/* Depending on the execution type, we still need to save the initial node counts */
pgxc_handles->dn_conn_count = dn_conn_count;
pgxc_handles->co_conn_count = co_conn_count;
return pgxc_handles;
}
static bool
pgxc_start_command_on_connection(PGXCNodeHandle *connection,
RemoteQueryState *remotestate,
Snapshot snapshot)
{
CommandId cid;
RemoteQuery *step = (RemoteQuery *) remotestate->ss.ps.plan;
if (connection->state == DN_CONNECTION_STATE_QUERY)
BufferConnection(connection);
/*
* Scan descriptor would be valid and would contain a valid snapshot
* in cases when we need to send out of order command id to data node
* e.g. in case of a fetch
*/
if (remotestate->cursor != NULL &&
remotestate->cursor[0] != '\0' &&
remotestate->ss.ss_currentScanDesc != NULL &&
remotestate->ss.ss_currentScanDesc->rs_snapshot != NULL)
cid = remotestate->ss.ss_currentScanDesc->rs_snapshot->curcid;
else
{
/*
* An insert into a child by selecting form its parent gets translated
* into a multi-statement transaction in which first we select from parent
* and then insert into child, then select form child and insert into child.
* The select from child should not see the just inserted rows.
* The command id of the select from child is therefore set to
* the command id of the insert-select query saved earlier.
* Similarly a WITH query that updates a table in main query
* and inserts a row in the same table in the WITH query
* needs to make sure that the row inserted by the WITH query does
* not get updated by the main query.
*/
if (step->exec_nodes->accesstype == RELATION_ACCESS_READ && step->rq_save_command_id)
cid = remotestate->rqs_cmd_id;
else
cid = GetCurrentCommandId(false);
}
if (pgxc_node_send_cmd_id(connection, cid) < 0 )
return false;
if (snapshot && pgxc_node_send_snapshot(connection, snapshot))
return false;
if (step->statement || step->cursor || remotestate->rqs_num_params)
{
/* need to use Extended Query Protocol */
int fetch = 0;
bool prepared = false;
bool send_desc = false;
if (step->base_tlist != NULL ||
step->exec_nodes->accesstype == RELATION_ACCESS_READ ||
step->has_row_marks)
send_desc = true;
/* if prepared statement is referenced see if it is already exist */
if (step->statement)
prepared = ActivateDatanodeStatementOnNode(step->statement,
PGXCNodeGetNodeId(connection->nodeoid,
PGXC_NODE_DATANODE));
/*
* execute and fetch rows only if they will be consumed
* immediately by the sorter
*/
if (step->cursor)
fetch = 1;
if (pgxc_node_send_query_extended(connection,
prepared ? NULL : step->sql_statement,
step->statement,
step->cursor,
remotestate->rqs_num_params,
remotestate->rqs_param_types,
remotestate->paramval_len,
remotestate->paramval_data,
send_desc,
fetch) != 0)
return false;
}
else
{
if (pgxc_node_send_query(connection, step->sql_statement) != 0)
return false;
}
return true;
}
/*
* IsReturningDMLOnReplicatedTable
*
* This function returns true if the passed RemoteQuery
* 1. Operates on a table that is replicated
* 2. Represents a DML
* 3. Has a RETURNING clause in it
*
* If the passed RemoteQuery has a non null base_tlist
* means that DML has a RETURNING clause.
*/
static bool
IsReturningDMLOnReplicatedTable(RemoteQuery *rq)
{
if (IsExecNodesReplicated(rq->exec_nodes) &&
rq->base_tlist != NULL && /* Means DML has RETURNING */
(rq->exec_nodes->accesstype == RELATION_ACCESS_UPDATE ||
rq->exec_nodes->accesstype == RELATION_ACCESS_INSERT))
return true;
return false;
}
void
do_query(RemoteQueryState *node)
{
RemoteQuery *step = (RemoteQuery *) node->ss.ps.plan;
TupleTableSlot *scanslot = node->ss.ss_ScanTupleSlot;
bool force_autocommit = step->force_autocommit;
bool is_read_only = step->read_only;
GlobalTransactionId gxid = InvalidGlobalTransactionId;
Snapshot snapshot = GetActiveSnapshot();
PGXCNodeHandle **connections = NULL;
PGXCNodeHandle *primaryconnection = NULL;
int i;
int regular_conn_count = 0;
bool need_tran_block;
PGXCNodeAllHandles *pgxc_connections;
/*
* A Postgres-XC node cannot run transactions while in recovery as
* this operation needs transaction IDs. This is more a safety guard than anything else.
*/
if (RecoveryInProgress())
elog(ERROR, "cannot run transaction to remote nodes during recovery");
/*
* Remember if the remote query is accessing a temp object
*
* !! PGXC TODO Check if the is_temp flag is propogated correctly when a
* remote join is reduced
*/
if (step->is_temp)
ExecSetTempObjectIncluded();
/*
* Consider a test case
*
* create table rf(a int, b int) distributed by replication;
* insert into rf values(1,2),(3,4) returning ctid;
*
* While inserting the first row do_query works fine, receives the returned
* row from the first connection and returns it. In this iteration the other
* datanodes also had returned rows but they have not yet been read from the
* network buffers. On Next Iteration do_query does not enter the data
* receiving loop because it finds that node->connections is not null.
* It is therefore required to set node->connections to null here.
*/
if (node->conn_count == 0)
node->connections = NULL;
/*
* Get connections for Datanodes only, utilities and DDLs
* are launched in ExecRemoteUtility
*/
pgxc_connections = get_exec_connections(node, step->exec_nodes, step->exec_type);
if (step->exec_type == EXEC_ON_DATANODES)
{
connections = pgxc_connections->datanode_handles;
regular_conn_count = pgxc_connections->dn_conn_count;
}
else if (step->exec_type == EXEC_ON_COORDS)
{
connections = pgxc_connections->coord_handles;
regular_conn_count = pgxc_connections->co_conn_count;
}
primaryconnection = pgxc_connections->primary_handle;
/* Primary connection is counted separately */
if (primaryconnection)
regular_conn_count--;
pfree(pgxc_connections);
/*
* We save only regular connections, at the time we exit the function
* we finish with the primary connection and deal only with regular
* connections on subsequent invocations
*/
node->node_count = regular_conn_count;
if (force_autocommit || is_read_only)
need_tran_block = false;
else
need_tran_block = true;
/*
* XXX We are forcing a transaction block for non-read-only every remote query. We can
* get smarter here and avoid a transaction block if all of the following
* conditions are true:
*
* - there is only one writer node involved in the transaction (including
* the local node)
* - the statement being executed on the remote writer node is a single
* step statement. IOW, Coordinator must not send multiple queries to the
* remote node.
*
* Once we have leak-proof mechanism to enforce these constraints, we
* should relax the transaction block requirement.
*
need_tran_block = (!is_read_only && total_conn_count > 1) ||
(TransactionBlockStatusCode() == 'T');
*/
elog(DEBUG1, "has primary = %s, regular_conn_count = %d, "
"need_tran_block = %s", primaryconnection ? "true" : "false",
regular_conn_count, need_tran_block ? "true" : "false");
gxid = GetCurrentTransactionId();
if (!GlobalTransactionIdIsValid(gxid))
{
if (primaryconnection)
pfree(primaryconnection);
pfree(connections);
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to get next transaction ID")));
}
/* See if we have a primary node, execute on it first before the others */
if (primaryconnection)
{
if (pgxc_node_begin(1, &primaryconnection, gxid, need_tran_block,
is_read_only, PGXC_NODE_DATANODE))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Could not begin transaction on primary Datanode.")));
if (!pgxc_start_command_on_connection(primaryconnection, node, snapshot))
{
pfree(connections);
pfree(primaryconnection);
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to send command to Datanodes")));
}
Assert(node->combine_type == COMBINE_TYPE_SAME);
/* Make sure the command is completed on the primary node */
while (true)
{
int res;
if (pgxc_node_receive(1, &primaryconnection, NULL))
break;
res = handle_response(primaryconnection, node);
if (res == RESPONSE_COMPLETE)
break;
else if (res == RESPONSE_TUPDESC)
{
ExecSetSlotDescriptor(scanslot, node->tuple_desc);
/*
* Now tuple table slot is responsible for freeing the
* descriptor
*/
node->tuple_desc = NULL;
/*
* RemoteQuery node doesn't support backward scan, so
* randomAccess is false, neither we want this tuple store
* persist across transactions.
*/
node->tuplestorestate = tuplestore_begin_heap(false, false, work_mem);
tuplestore_set_eflags(node->tuplestorestate, node->eflags);
}
else if (res == RESPONSE_DATAROW)
{
pfree(node->currentRow.msg);
node->currentRow.msg = NULL;
node->currentRow.msglen = 0;
node->currentRow.msgnode = 0;
continue;
}
else if (res == RESPONSE_EOF)
continue;
else
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Unexpected response from Datanode")));
}
/* report error if any */
pgxc_node_report_error(node);
}
for (i = 0; i < regular_conn_count; i++)
{
if (pgxc_node_begin(1, &connections[i], gxid, need_tran_block,
is_read_only, PGXC_NODE_DATANODE))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Could not begin transaction on Datanodes.")));
if (!pgxc_start_command_on_connection(connections[i], node, snapshot))
{
pfree(connections);
if (primaryconnection)
pfree(primaryconnection);
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to send command to Datanodes")));
}
connections[i]->combiner = node;
}
if (step->cursor)
{
node->cursor_count = regular_conn_count;
node->cursor_connections = (PGXCNodeHandle **) palloc(regular_conn_count * sizeof(PGXCNodeHandle *));
memcpy(node->cursor_connections, connections, regular_conn_count * sizeof(PGXCNodeHandle *));
}
/*
* Stop if all commands are completed or we got a data row and
* initialized state node for subsequent invocations
*/
while (regular_conn_count > 0 && node->connections == NULL)
{
int i = 0;
if (pgxc_node_receive(regular_conn_count, connections, NULL))
{
pfree(connections);
if (primaryconnection)
pfree(primaryconnection);
if (node->cursor_connections)
pfree(node->cursor_connections);
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to read response from Datanodes")));
}
/*
* Handle input from the Datanodes.
* If we got a RESPONSE_DATAROW we can break handling to wrap
* it into a tuple and return. Handling will be continued upon
* subsequent invocations.
* If we got 0, we exclude connection from the list. We do not
* expect more input from it. In case of non-SELECT query we quit
* the loop when all nodes finish their work and send ReadyForQuery
* with empty connections array.
* If we got EOF, move to the next connection, will receive more
* data on the next iteration.
*/
while (i < regular_conn_count)
{
int res = handle_response(connections[i], node);
if (res == RESPONSE_EOF)
{
i++;
}
else if (res == RESPONSE_COMPLETE)
{
if (i < --regular_conn_count)
connections[i] = connections[regular_conn_count];
}
else if (res == RESPONSE_TUPDESC)
{
ExecSetSlotDescriptor(scanslot, node->tuple_desc);
/*
* Now tuple table slot is responsible for freeing the
* descriptor
*/
node->tuple_desc = NULL;
/*
* RemoteQuery node doesn't support backward scan, so
* randomAccess is false, neither we want this tuple store
* persist across transactions.
*/
node->tuplestorestate = tuplestore_begin_heap(false, false, work_mem);
tuplestore_set_eflags(node->tuplestorestate, node->eflags);
}
else if (res == RESPONSE_DATAROW)
{
/*
* Got first data row, quit the loop
*/
node->connections = connections;
node->conn_count = regular_conn_count;
node->current_conn = i;
break;
}
else
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Unexpected response from Datanode")));
}
/* report error if any */
pgxc_node_report_error(node);
}
if (node->cursor_count)
{
node->conn_count = node->cursor_count;
memcpy(connections, node->cursor_connections, node->cursor_count * sizeof(PGXCNodeHandle *));
node->connections = connections;
}
}
/*
* ExecRemoteQuery
* Wrapper around the main RemoteQueryNext() function. This
* wrapper provides materialization of the result returned by
* RemoteQueryNext
*/
TupleTableSlot *
ExecRemoteQuery(RemoteQueryState *node)
{
return ExecScan(&(node->ss),
(ExecScanAccessMtd) RemoteQueryNext,
(ExecScanRecheckMtd) RemoteQueryRecheck);
}
/*
* RemoteQueryRecheck -- remote query routine to recheck a tuple in EvalPlanQual
*/
static bool
RemoteQueryRecheck(RemoteQueryState *node, TupleTableSlot *slot)
{
/*
* Note that unlike IndexScan, RemoteQueryScan never use keys in heap_beginscan
* (and this is very bad) - so, here we do not check are keys ok or not.
*/
return true;
}
/*
* Execute step of PGXC plan.
* The step specifies a command to be executed on specified nodes.
* On first invocation connections to the Datanodes are initialized and
* command is executed. Further, as well as within subsequent invocations,
* responses are received until step is completed or there is a tuple to emit.
* If there is a tuple it is returned, otherwise returned NULL. The NULL result
* from the function indicates completed step.
* The function returns at most one tuple per invocation.
*/
static TupleTableSlot *
RemoteQueryNext(ScanState *scan_node)
{
RemoteQueryState *node = (RemoteQueryState *)scan_node;
TupleTableSlot *scanslot = scan_node->ss_ScanTupleSlot;
RemoteQuery *rq = (RemoteQuery*) node->ss.ps.plan;
EState *estate = node->ss.ps.state;
/*
* Initialize tuples processed to 0, to make sure we don't re-use the
* values from the earlier iteration of RemoteQueryNext(). For an FQS'ed
* DML returning query, it may not get updated for subsequent calls.
* because there won't be a HandleCommandComplete() call to update this
* field.
*/
node->rqs_processed = 0;
if (!node->query_Done)
{
/* Fire BEFORE STATEMENT triggers just before the query execution */
pgxc_rq_fire_bstriggers(node);
do_query(node);
node->query_Done = true;
}
if (node->update_cursor)
{
PGXCNodeAllHandles *all_dn_handles = get_exec_connections(node, NULL, EXEC_ON_DATANODES);
close_node_cursors(all_dn_handles->datanode_handles,
all_dn_handles->dn_conn_count,
node->update_cursor);
pfree(node->update_cursor);
node->update_cursor = NULL;
pfree_pgxc_all_handles(all_dn_handles);
}
else if(node->tuplestorestate)
{
/*
* If we are not at the end of the tuplestore, try
* to fetch a tuple from tuplestore.
*/
Tuplestorestate *tuplestorestate = node->tuplestorestate;
bool eof_tuplestore = tuplestore_ateof(tuplestorestate);
/*
* If we can fetch another tuple from the tuplestore, return it.
*/
if (!eof_tuplestore)
{
/* RemoteQuery node doesn't support backward scans */
if(!tuplestore_gettupleslot(tuplestorestate, true, false, scanslot))
eof_tuplestore = true;
}
/*
* Consider a test case
*
* create table ta1 (v1 int, v2 int);
* insert into ta1 values(1,2),(2,3),(3,4);
*
* create table ta2 (v1 int, v2 int);
* insert into ta2 values(1,2),(2,3),(3,4);
*
* select t1.ctid, t2.ctid,* from ta1 t1, ta2 t2
* where t2.v2<=3 order by t1.v1;
* ctid | ctid | v1 | v2 | v1 | v2
* -------+-------+----+----+----+----
* Row_1 (0,1) | (0,1) | 1 | 2 | 1 | 2
* Row_2 (0,1) | (0,2) | 1 | 2 | 2 | 3
* Row_3 (0,2) | (0,1) | 2 | 3 | 1 | 2
* Row_4 (0,2) | (0,2) | 2 | 3 | 2 | 3
* Row_5 (0,1) | (0,1) | 3 | 4 | 1 | 2
* Row_6 (0,1) | (0,2) | 3 | 4 | 2 | 3
* (6 rows)
*
* Note that in the resulting join, we are getting one row of ta1 twice,
* as shown by the ctid's in the results. Now consider this update
*
* update ta1 t1 set v2=t1.v2+10 from ta2 t2
* where t2.v2<=3 returning t1.ctid,t1.v1 t1_v1, t1.v2 t1_v2;
*
* The first iteration of the update runs for Row_1, succeeds and
* updates its ctid to say (0,3). In the second iteration for Row_2,
* since the ctid of the row has already changed, fails to update any
* row and hence do_query does not return any tuple. The FetchTuple
* call in RemoteQueryNext hence fails and eof_underlying is set to true.
* However in the third iteration for Row_3, the update succeeds and
* returns a row, but since the eof_underlying is already set to true,
* the RemoteQueryNext does not bother calling FetchTuple, we therefore
* do not get more than one row returned as a result of the update
* returning query. It is therefore required in RemoteQueryNext to call
* FetchTuple in case do_query has copied a row in node->currentRow.msg.
* Also we have to reset the eof_underlying flag every time
* FetchTuple succeeds to clear any previously set status.
*/
if (eof_tuplestore &&
(!node->eof_underlying ||
(node->currentRow.msg != NULL)))
{
/*
* If tuplestore has reached its end but the underlying RemoteQueryNext() hasn't
* finished yet, try to fetch another row.
*/
if (FetchTuple(node, scanslot))
{
/* See comments a couple of lines above */
node->eof_underlying = false;
/*
* Append a copy of the returned tuple to tuplestore. NOTE: because
* the tuplestore is certainly in EOF state, its read position will
* move forward over the added tuple. This is what we want.
*/
if (tuplestorestate && !TupIsNull(scanslot))
tuplestore_puttupleslot(tuplestorestate, scanslot);
}
else
node->eof_underlying = true;
}
if (eof_tuplestore && node->eof_underlying)
ExecClearTuple(scanslot);
}
else
ExecClearTuple(scanslot);
/* report error if any */
pgxc_node_report_error(node);
/*
* Now we know the query is successful. Fire AFTER STATEMENT triggers. Make
* sure this is the last iteration of the query. If an FQS query has
* RETURNING clause, this function can be called multiple times until we
* return NULL.
*/
if (TupIsNull(scanslot))
pgxc_rq_fire_astriggers(node);
/*
* If it's an FQSed DML query for which command tag is to be set,
* then update estate->es_processed. For other queries, the standard
* executer takes care of it; namely, in ExecModifyTable for DML queries
* and ExecutePlan for SELECT queries.
*/
if (rq->remote_query->canSetTag &&
!rq->rq_params_internal &&
(rq->remote_query->commandType == CMD_INSERT ||
rq->remote_query->commandType == CMD_UPDATE ||
rq->remote_query->commandType == CMD_DELETE))
estate->es_processed += node->rqs_processed;
return scanslot;
}
/*
* End the remote query
*/
void
ExecEndRemoteQuery(RemoteQueryState *node)
{
ListCell *lc;
/* clean up the buffer */
foreach(lc, node->rowBuffer)
{
RemoteDataRow dataRow = (RemoteDataRow) lfirst(lc);
pfree(dataRow->msg);
}
list_free_deep(node->rowBuffer);
node->current_conn = 0;
while (node->conn_count > 0)
{
int res;
PGXCNodeHandle *conn = node->connections[node->current_conn];
/* throw away message */
if (node->currentRow.msg)
{
pfree(node->currentRow.msg);
node->currentRow.msg = NULL;
}
if (conn == NULL)
{
node->conn_count--;
continue;
}
/* no data is expected */
if (conn->state == DN_CONNECTION_STATE_IDLE ||
conn->state == DN_CONNECTION_STATE_ERROR_FATAL)
{
if (node->current_conn < --node->conn_count)
node->connections[node->current_conn] = node->connections[node->conn_count];
continue;
}
res = handle_response(conn, node);
if (res == RESPONSE_EOF)
{
struct timeval timeout;
timeout.tv_sec = END_QUERY_TIMEOUT;
timeout.tv_usec = 0;
if (pgxc_node_receive(1, &conn, &timeout))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to read response from Datanodes when ending query")));
}
}
if (node->tuplestorestate != NULL)
ExecClearTuple(node->ss.ss_ScanTupleSlot);
/*
* Release tuplestore resources
*/
if (node->tuplestorestate != NULL)
tuplestore_end(node->tuplestorestate);
node->tuplestorestate = NULL;
/*
* If there are active cursors close them
*/
if (node->cursor || node->update_cursor)
{
PGXCNodeAllHandles *all_handles = NULL;
PGXCNodeHandle **cur_handles;
bool bFree = false;
int nCount;
int i;
cur_handles = node->cursor_connections;
nCount = node->cursor_count;
for(i=0;i<node->cursor_count;i++)
{
if (node->cursor_connections == NULL || node->cursor_connections[i]->sock == -1)
{
bFree = true;
all_handles = get_exec_connections(node, NULL, EXEC_ON_DATANODES);
cur_handles = all_handles->datanode_handles;
nCount = all_handles->dn_conn_count;
break;
}
}
if (node->cursor)
{
close_node_cursors(cur_handles, nCount, node->cursor);
pfree(node->cursor);
node->cursor = NULL;
}
if (node->update_cursor)
{
close_node_cursors(cur_handles, nCount, node->update_cursor);
pfree(node->update_cursor);
node->update_cursor = NULL;
}
if (bFree)
pfree_pgxc_all_handles(all_handles);
}
/*
* Clean up parameters if they were set
*/
if (node->paramval_data)
{
pfree(node->paramval_data);
node->paramval_data = NULL;
node->paramval_len = 0;
}
/* Free the param types if they are newly allocated */
if (node->rqs_param_types &&
node->rqs_param_types != ((RemoteQuery*)node->ss.ps.plan)->rq_param_types)
{
pfree(node->rqs_param_types);
node->rqs_param_types = NULL;
node->rqs_num_params = 0;
}
if (node->ss.ss_currentRelation)
ExecCloseScanRelation(node->ss.ss_currentRelation);
CloseCombiner(node);
}
static void
close_node_cursors(PGXCNodeHandle **connections, int conn_count, char *cursor)
{
int i;
RemoteQueryState *combiner;
for (i = 0; i < conn_count; i++)
{
if (connections[i]->state == DN_CONNECTION_STATE_QUERY)
BufferConnection(connections[i]);
if (pgxc_node_send_close(connections[i], false, cursor) != 0)
ereport(WARNING,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to close Datanode cursor")));
if (pgxc_node_send_sync(connections[i]) != 0)
ereport(WARNING,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to close Datanode cursor")));
}
combiner = CreateResponseCombiner(conn_count, COMBINE_TYPE_NONE);
while (conn_count > 0)
{
if (pgxc_node_receive(conn_count, connections, NULL))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to close Datanode cursor")));
i = 0;
while (i < conn_count)
{
int res = handle_response(connections[i], combiner);
if (res == RESPONSE_EOF)
{
i++;
}
else if (res == RESPONSE_COMPLETE)
{
if (--conn_count > i)
connections[i] = connections[conn_count];
}
else
{
// Unexpected response, ignore?
}
}
}
ValidateAndCloseCombiner(combiner);
}
/*
* Encode parameter values to format of DataRow message (the same format is
* used in Bind) to prepare for sending down to Datanodes.
* The data row is copied to RemoteQueryState.paramval_data.
*/
void
SetDataRowForExtParams(ParamListInfo paraminfo, RemoteQueryState *rq_state)
{
StringInfoData buf;
uint16 n16;
int i;
int real_num_params = 0;
RemoteQuery *node = (RemoteQuery*) rq_state->ss.ps.plan;
/* If there are no parameters, there is no data to BIND. */
if (!paraminfo)
return;
/*
* If this query has been generated internally as a part of two-step DML
* statement, it uses only the internal parameters for input values taken
* from the source data, and it never uses external parameters. So even if
* parameters were being set externally, they won't be present in this
* statement (they might be present in the source data query). In such
* case where parameters refer to the values returned by SELECT query, the
* parameter data and parameter types would be set in SetDataRowForIntParams().
*/
if (node->rq_params_internal)
return;
Assert(!rq_state->paramval_data);
/*
* It is necessary to fetch parameters
* before looking at the output value.
*/
for (i = 0; i < paraminfo->numParams; i++)
{
ParamExternData *param;
param = ¶minfo->params[i];
if (!OidIsValid(param->ptype) && paraminfo->paramFetch != NULL)
(*paraminfo->paramFetch) (paraminfo, i + 1);
/*
* This is the last parameter found as useful, so we need
* to include all the previous ones to keep silent the remote
* nodes. All the parameters prior to the last usable having no
* type available will be considered as NULL entries.
*/
if (OidIsValid(param->ptype))
real_num_params = i + 1;
}
/*
* If there are no parameters available, simply leave.
* This is possible in the case of a query called through SPI
* and using no parameters.
*/
if (real_num_params == 0)
{
rq_state->paramval_data = NULL;
rq_state->paramval_len = 0;
return;
}
initStringInfo(&buf);
/* Number of parameter values */
n16 = htons(real_num_params);
appendBinaryStringInfo(&buf, (char *) &n16, 2);
/* Parameter values */
for (i = 0; i < real_num_params; i++)
{
ParamExternData *param = ¶minfo->params[i];
uint32 n32;
/*
* Parameters with no types are considered as NULL and treated as integer
* The same trick is used for dropped columns for remote DML generation.
*/
if (param->isnull || !OidIsValid(param->ptype))
{
n32 = htonl(-1);
appendBinaryStringInfo(&buf, (char *) &n32, 4);
}
else
{
Oid typOutput;
bool typIsVarlena;
Datum pval;
char *pstring;
int len;
/* Get info needed to output the value */
getTypeOutputInfo(param->ptype, &typOutput, &typIsVarlena);
/*
* If we have a toasted datum, forcibly detoast it here to avoid
* memory leakage inside the type's output routine.
*/
if (typIsVarlena)
pval = PointerGetDatum(PG_DETOAST_DATUM(param->value));
else
pval = param->value;
/* Convert Datum to string */
pstring = OidOutputFunctionCall(typOutput, pval);
/* copy data to the buffer */
len = strlen(pstring);
n32 = htonl(len);
appendBinaryStringInfo(&buf, (char *) &n32, 4);
appendBinaryStringInfo(&buf, pstring, len);
}
}
/*
* If parameter types are not already set, infer them from
* the paraminfo.
*/
if (node->rq_num_params > 0)
{
/*
* Use the already known param types for BIND. Parameter types
* can be already known when the same plan is executed multiple
* times.
*/
if (node->rq_num_params != real_num_params)
elog(ERROR, "Number of user-supplied parameters do not match "
"the number of remote parameters");
rq_state->rqs_num_params = node->rq_num_params;
rq_state->rqs_param_types = node->rq_param_types;
}
else
{
rq_state->rqs_num_params = real_num_params;
rq_state->rqs_param_types = (Oid *) palloc(sizeof(Oid) * real_num_params);
for (i = 0; i < real_num_params; i++)
rq_state->rqs_param_types[i] = paraminfo->params[i].ptype;
}
/* Assign the newly allocated data row to paramval */
rq_state->paramval_data = buf.data;
rq_state->paramval_len = buf.len;
}
/* ----------------------------------------------------------------
* ExecRemoteQueryReScan
*
* Rescans the relation.
* ----------------------------------------------------------------
*/
void
ExecRemoteQueryReScan(RemoteQueryState *node, ExprContext *exprCtxt)
{
/*
* If the materialized store is not empty, just rewind the stored output.
*/
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
if (!node->tuplestorestate)
return;
tuplestore_rescan(node->tuplestorestate);
}
/*
* Execute utility statement on multiple Datanodes
* It does approximately the same as
*
* RemoteQueryState *state = ExecInitRemoteQuery(plan, estate, flags);
* Assert(TupIsNull(ExecRemoteQuery(state));
* ExecEndRemoteQuery(state)
*
* But does not need an Estate instance and does not do some unnecessary work,
* like allocating tuple slots.
*/
void
ExecRemoteUtility(RemoteQuery *node)
{
RemoteQueryState *remotestate;
bool force_autocommit = node->force_autocommit;
RemoteQueryExecType exec_type = node->exec_type;
GlobalTransactionId gxid = InvalidGlobalTransactionId;
Snapshot snapshot = GetActiveSnapshot();
PGXCNodeAllHandles *pgxc_connections;
int co_conn_count;
int dn_conn_count;
bool need_tran_block;
ExecDirectType exec_direct_type = node->exec_direct_type;
int i;
if (!force_autocommit)
RegisterTransactionLocalNode(true);
/*
* It is possible to invoke create table with inheritance on
* temporary objects. Remember that we might have accessed a temp object
*/
if (node->is_temp)
ExecSetTempObjectIncluded();
remotestate = CreateResponseCombiner(0, node->combine_type);
pgxc_connections = get_exec_connections(NULL, node->exec_nodes, exec_type);
dn_conn_count = pgxc_connections->dn_conn_count;
co_conn_count = pgxc_connections->co_conn_count;
if (force_autocommit)
need_tran_block = false;
else
need_tran_block = true;
/* Commands launched through EXECUTE DIRECT do not need start a transaction */
if (exec_direct_type == EXEC_DIRECT_UTILITY)
{
need_tran_block = false;
/* This check is not done when analyzing to limit dependencies */
if (IsTransactionBlock())
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
errmsg("cannot run EXECUTE DIRECT with utility inside a transaction block")));
}
gxid = GetCurrentTransactionId();
if (!GlobalTransactionIdIsValid(gxid))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to get next transaction ID")));
if (exec_type == EXEC_ON_ALL_NODES || exec_type == EXEC_ON_DATANODES)
{
if (pgxc_node_begin(dn_conn_count, pgxc_connections->datanode_handles,
gxid, need_tran_block, false, PGXC_NODE_DATANODE))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Could not begin transaction on Datanodes")));
for (i = 0; i < dn_conn_count; i++)
{
PGXCNodeHandle *conn = pgxc_connections->datanode_handles[i];
if (conn->state == DN_CONNECTION_STATE_QUERY)
BufferConnection(conn);
if (snapshot && pgxc_node_send_snapshot(conn, snapshot))
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to send command to Datanodes")));
}
if (pgxc_node_send_query(conn, node->sql_statement) != 0)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to send command to Datanodes")));
}
}
}
if (exec_type == EXEC_ON_ALL_NODES || exec_type == EXEC_ON_COORDS)
{
if (pgxc_node_begin(co_conn_count, pgxc_connections->coord_handles,
gxid, need_tran_block, false, PGXC_NODE_COORDINATOR))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Could not begin transaction on coordinators")));
/* Now send it to Coordinators if necessary */
for (i = 0; i < co_conn_count; i++)
{
if (snapshot && pgxc_node_send_snapshot(pgxc_connections->coord_handles[i], snapshot))
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to send command to coordinators")));
}
if (pgxc_node_send_query(pgxc_connections->coord_handles[i], node->sql_statement) != 0)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to send command to coordinators")));
}
}
}
/*
* Stop if all commands are completed or we got a data row and
* initialized state node for subsequent invocations
*/
if (exec_type == EXEC_ON_ALL_NODES ||
exec_type == EXEC_ON_DATANODES)
{
while (dn_conn_count > 0)
{
int i = 0;
if (pgxc_node_receive(dn_conn_count, pgxc_connections->datanode_handles, NULL))
break;
/*
* Handle input from the Datanodes.
* We do not expect Datanodes returning tuples when running utility
* command.
* If we got EOF, move to the next connection, will receive more
* data on the next iteration.
*/
while (i < dn_conn_count)
{
PGXCNodeHandle *conn = pgxc_connections->datanode_handles[i];
int res = handle_response(conn, remotestate);
if (res == RESPONSE_EOF)
{
i++;
}
else if (res == RESPONSE_COMPLETE)
{
if (i < --dn_conn_count)
pgxc_connections->datanode_handles[i] =
pgxc_connections->datanode_handles[dn_conn_count];
}
else if (res == RESPONSE_TUPDESC)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Unexpected response from Datanode")));
}
else if (res == RESPONSE_DATAROW)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Unexpected response from Datanode")));
}
}
}
}
/* Make the same for Coordinators */
if (exec_type == EXEC_ON_ALL_NODES ||
exec_type == EXEC_ON_COORDS)
{
while (co_conn_count > 0)
{
int i = 0;
if (pgxc_node_receive(co_conn_count, pgxc_connections->coord_handles, NULL))
break;
while (i < co_conn_count)
{
int res = handle_response(pgxc_connections->coord_handles[i], remotestate);
if (res == RESPONSE_EOF)
{
i++;
}
else if (res == RESPONSE_COMPLETE)
{
if (i < --co_conn_count)
pgxc_connections->coord_handles[i] =
pgxc_connections->coord_handles[co_conn_count];
}
else if (res == RESPONSE_TUPDESC)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Unexpected response from coordinator")));
}
else if (res == RESPONSE_DATAROW)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Unexpected response from coordinator")));
}
}
}
}
/*
* We have processed all responses from nodes and if we have
* error message pending we can report it. All connections should be in
* consistent state now and so they can be released to the pool after ROLLBACK.
*/
pgxc_node_report_error(remotestate);
}
/*
* Called when the backend is ending.
*/
void
PGXCNodeCleanAndRelease(int code, Datum arg)
{
/* Clean up prepared transactions before releasing connections */
DropAllPreparedStatements();
/* Release Datanode connections */
release_handles();
/* Disconnect from Pooler */
PoolManagerDisconnect();
/* Close connection with GTM */
CloseGTM();
}
static int
pgxc_get_connections(PGXCNodeHandle *connections[], int size, List *connlist)
{
ListCell *lc;
int count = 0;
foreach(lc, connlist)
{
PGXCNodeHandle *conn = (PGXCNodeHandle *) lfirst(lc);
Assert (count < size);
connections[count++] = conn;
}
return count;
}
/*
* Get all connections for which we have an open transaction,
* for both Datanodes and Coordinators
*/
static int
pgxc_get_transaction_nodes(PGXCNodeHandle *connections[], int size, bool write)
{
return pgxc_get_connections(connections, size, write ? XactWriteNodes : XactReadNodes);
}
void
ExecCloseRemoteStatement(const char *stmt_name, List *nodelist)
{
PGXCNodeAllHandles *all_handles;
PGXCNodeHandle **connections;
RemoteQueryState *combiner;
int conn_count;
int i;
/* Exit if nodelist is empty */
if (list_length(nodelist) == 0)
return;
/* get needed Datanode connections */
all_handles = get_handles(nodelist, NIL, false);
conn_count = all_handles->dn_conn_count;
connections = all_handles->datanode_handles;
for (i = 0; i < conn_count; i++)
{
if (connections[i]->state == DN_CONNECTION_STATE_QUERY)
BufferConnection(connections[i]);
if (pgxc_node_send_close(connections[i], true, stmt_name) != 0)
{
/*
* statements are not affected by statement end, so consider
* unclosed statement on the Datanode as a fatal issue and
* force connection is discarded
*/
connections[i]->state = DN_CONNECTION_STATE_ERROR_FATAL;
ereport(WARNING,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to close Datanode statemrnt")));
}
if (pgxc_node_send_sync(connections[i]) != 0)
{
connections[i]->state = DN_CONNECTION_STATE_ERROR_FATAL;
ereport(WARNING,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to close Datanode statement")));
}
}
combiner = CreateResponseCombiner(conn_count, COMBINE_TYPE_NONE);
while (conn_count > 0)
{
if (pgxc_node_receive(conn_count, connections, NULL))
{
for (i = 0; i <= conn_count; i++)
connections[i]->state = DN_CONNECTION_STATE_ERROR_FATAL;
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to close Datanode statement")));
}
i = 0;
while (i < conn_count)
{
int res = handle_response(connections[i], combiner);
if (res == RESPONSE_EOF)
{
i++;
}
else if (res == RESPONSE_COMPLETE)
{
if (--conn_count > i)
connections[i] = connections[conn_count];
}
else
{
connections[i]->state = DN_CONNECTION_STATE_ERROR_FATAL;
}
}
}
ValidateAndCloseCombiner(combiner);
pfree_pgxc_all_handles(all_handles);
}
/*
* DataNodeCopyInBinaryForAll
*
* In a COPY TO, send to all Datanodes PG_HEADER for a COPY TO in binary mode.
*/
int DataNodeCopyInBinaryForAll(char *msg_buf, int len, PGXCNodeHandle** copy_connections)
{
int i;
int conn_count = 0;
PGXCNodeHandle *connections[NumDataNodes];
int msgLen = 4 + len + 1;
int nLen = htonl(msgLen);
for (i = 0; i < NumDataNodes; i++)
{
PGXCNodeHandle *handle = copy_connections[i];
if (!handle)
continue;
connections[conn_count++] = handle;
}
for (i = 0; i < conn_count; i++)
{
PGXCNodeHandle *handle = connections[i];
if (handle->state == DN_CONNECTION_STATE_COPY_IN)
{
/* msgType + msgLen */
if (ensure_out_buffer_capacity(handle->outEnd + 1 + msgLen, handle) != 0)
{
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
handle->outBuffer[handle->outEnd++] = 'd';
memcpy(handle->outBuffer + handle->outEnd, &nLen, 4);
handle->outEnd += 4;
memcpy(handle->outBuffer + handle->outEnd, msg_buf, len);
handle->outEnd += len;
handle->outBuffer[handle->outEnd++] = '\n';
}
else
{
add_error_message(handle, "Invalid Datanode connection");
return EOF;
}
}
return 0;
}
/*
* ExecSetTempObjectIncluded
*
* Remember that we have accessed a temporary object.
*/
void
ExecSetTempObjectIncluded(void)
{
temp_object_included = true;
}
/*
* ExecClearTempObjectIncluded
*
* Forget about temporary objects
*/
static void
ExecClearTempObjectIncluded(void)
{
temp_object_included = false;
}
/* ExecIsTempObjectIncluded
*
* Check if a temporary object has been accessed
*/
bool
ExecIsTempObjectIncluded(void)
{
return temp_object_included;
}
/*
* ExecProcNodeDMLInXC
*
* This function is used by ExecInsert/Update/Delete to execute the
* Insert/Update/Delete on the datanode using RemoteQuery plan.
*
* In XC, a non-FQSed UPDATE/DELETE is planned as a two step process
* The first step selects the ctid & node id of the row to be modified and the
* second step creates a parameterized query that is supposed to take the data
* row returned by the lower plan node as the parameters to modify the affected
* row. In case of an INSERT however the first step is used to get the new
* column values to be inserted in the target table and the second step uses
* those values as parameters of the INSERT query.
*
* We use extended query protocol to avoid repeated planning of the query and
* pass the column values(in case of an INSERT) and ctid & xc_node_id
* (in case of UPDATE/DELETE) as parameters while executing the query.
*
* Parameters:
* resultRemoteRel: The RemoteQueryState containing DML statement to be
* executed
* sourceDataSlot: The tuple returned by the first step (described above)
* to be used as parameters in the second step.
* newDataSlot: This has all the junk attributes stripped off from
* sourceDataSlot, plus BEFORE triggers may have modified the
* originally fetched data values. In other words, this has
* the final values that are to be sent to datanode through BIND.
*
* Returns the result of RETURNING clause if any
*/
TupleTableSlot *
ExecProcNodeDMLInXC(EState *estate,
TupleTableSlot *sourceDataSlot,
TupleTableSlot *newDataSlot)
{
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
RemoteQueryState *resultRemoteRel = (RemoteQueryState *) estate->es_result_remoterel;
ExprContext *econtext = resultRemoteRel->ss.ps.ps_ExprContext;
TupleTableSlot *returningResultSlot = NULL; /* RETURNING clause result */
TupleTableSlot *temp_slot;
bool dml_returning_on_replicated = false;
RemoteQuery *step = (RemoteQuery *) resultRemoteRel->ss.ps.plan;
/*
* If the tuple returned by the previous step was null,
* simply return null tuple, no need to execute the DML
*/
if (TupIsNull(sourceDataSlot))
return NULL;
/*
* The current implementation of DMLs with RETURNING when run on replicated
* tables returns row from one of the datanodes. In order to achieve this
* ExecProcNode is repeatedly called saving one tuple and rejecting the rest.
* Do we have a DML on replicated table with RETURNING?
*/
dml_returning_on_replicated = IsReturningDMLOnReplicatedTable(step);
/*
* Use data row returned by the previous step as parameter for
* the DML to be executed in this step.
*/
SetDataRowForIntParams(resultRelInfo->ri_junkFilter,
sourceDataSlot, newDataSlot, resultRemoteRel);
/*
* do_query calls get_exec_connections to determine target nodes
* at execution time. The function get_exec_connections can decide
* to evaluate en_expr to determine the target nodes. To evaluate en_expr,
* ExecEvalVar is called which picks up values from ecxt_scantuple if Var
* does not refer either OUTER or INNER varno. Hence we should copy the
* tuple returned by previous step in ecxt_scantuple if econtext is set.
* The econtext is set only when en_expr is set for execution time
* determination of the target nodes.
*/
if (econtext)
econtext->ecxt_scantuple = newDataSlot;
/*
* This loop would be required to reject tuples received from datanodes
* when a DML with RETURNING is run on a replicated table otherwise it
* would run once.
* PGXC_TODO: This approach is error prone if the DML statement constructed
* by the planner is such that it updates more than one row (even in case of
* non-replicated data). Fix it.
*/
do
{
temp_slot = ExecProcNode((PlanState *)resultRemoteRel);
if (!TupIsNull(temp_slot))
{
/* Have we already copied the returned tuple? */
if (returningResultSlot == NULL)
{
/* Copy the received tuple to be returned later */
returningResultSlot = MakeSingleTupleTableSlot(temp_slot->tts_tupleDescriptor);
returningResultSlot = ExecCopySlot(returningResultSlot, temp_slot);
}
/* Clear the received tuple, the copy required has already been saved */
ExecClearTuple(temp_slot);
}
else
{
/* Null tuple received, so break the loop */
ExecClearTuple(temp_slot);
break;
}
} while (dml_returning_on_replicated);
/*
* A DML can impact more than one row, e.g. an update without any where
* clause on a table with more than one row. We need to make sure that
* RemoteQueryNext calls do_query for each affected row, hence we reset
* the flag here and finish the DML being executed only when we return
* NULL from ExecModifyTable
*/
resultRemoteRel->query_Done = false;
return returningResultSlot;
}
void
RegisterTransactionNodes(int count, void **connections, bool write)
{
int i;
MemoryContext oldcontext = MemoryContextSwitchTo(TopMemoryContext);
for (i = 0; i < count; i++)
{
/*
* Add the node to either read or write participants. If a node is
* already in the write participant's list, don't add it to the read
* participant's list. OTOH if a node is currently in the read
* participant's list, but we are now initiating a write operation on
* the node, move it to the write participant's list
*/
if (write)
{
XactWriteNodes = list_append_unique(XactWriteNodes, connections[i]);
XactReadNodes = list_delete(XactReadNodes, connections[i]);
}
else
{
if (!list_member(XactWriteNodes, connections[i]))
XactReadNodes = list_append_unique(XactReadNodes, connections[i]);
}
}
MemoryContextSwitchTo(oldcontext);
}
void
ForgetTransactionNodes(void)
{
list_free(XactReadNodes);
XactReadNodes = NIL;
list_free(XactWriteNodes);
XactWriteNodes = NIL;
}
/*
* Clear per transaction remote information
*/
void
AtEOXact_Remote(void)
{
ExecClearTempObjectIncluded();
ForgetTransactionNodes();
clear_RemoteXactState();
}
/*
* Do pre-commit processing for remote nodes which includes Datanodes and
* Coordinators. If more than one nodes are involved in the transaction write
* activity, then we must run 2PC. For 2PC, we do the following steps:
*
* 1. PREPARE the transaction locally if the local node is involved in the
* transaction. If local node is not involved, skip this step and go to the
* next step
* 2. PREPARE the transaction on all the remote nodes. If any node fails to
* PREPARE, directly go to step 6
* 3. Now that all the involved nodes are PREPAREd, we can commit the
* transaction. We first inform the GTM that the transaction is fully
* PREPARED and also supply the list of the nodes involved in the
* transaction
* 4. COMMIT PREPARED the transaction on all the remotes nodes and then
* finally COMMIT PREPARED on the local node if its involved in the
* transaction and start a new transaction so that normal commit processing
* works unchanged. Go to step 5.
* 5. Return and let the normal commit processing resume
* 6. Abort by ereporting the error and let normal abort-processing take
* charge.
*/
void
PreCommit_Remote(char *prepareGID, bool preparedLocalNode)
{
if (!preparedLocalNode)
PrePrepare_Remote(prepareGID, preparedLocalNode, false);
/*
* OK, everything went fine. At least one remote node is in PREPARED state
* and the transaction is successfully prepared on all the involved nodes.
* Now we are ready to commit the transaction. We need a new GXID to send
* down the remote nodes to execute the forthcoming COMMIT PREPARED
* command. So grab one from the GTM and track it. It will be closed along
* with the main transaction at the end.
*/
pgxc_node_remote_commit();
/*
* If the transaction is not committed successfully on all the involved
* nodes, it will remain in PREPARED state on those nodes. Such transaction
* should be be reported as live in the snapshots. So we must not close the
* transaction on the GTM. We just record the state of the transaction in
* the GTM and flag a warning for applications to take care of such
* in-doubt transactions
*/
if (remoteXactState.status == RXACT_PART_COMMITTED)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to commit the transaction on one or more nodes")));
Assert(remoteXactState.status == RXACT_COMMITTED ||
remoteXactState.status == RXACT_NONE);
clear_RemoteXactState();
/*
* The transaction is now successfully committed on all the remote nodes.
* (XXX How about the local node ?). It can now be cleaned up from the GTM
* as well
*/
if (!PersistentConnections)
release_handles();
}
/*
* Do abort processing for the transaction. We must abort the transaction on
* all the involved nodes. If a node has already prepared a transaction, we run
* ROLLBACK PREPARED command on the node. Otherwise, a simple ROLLBACK command
* is sufficient.
*
* We must guard against the case when a transaction is prepared succefully on
* all the nodes and some error occurs after we send a COMMIT PREPARED message
* to at lease one node. Such a transaction must not be aborted to preserve
* global consistency. We handle this case by recording the nodes involved in
* the transaction at the GTM and keep the transaction open at the GTM so that
* its reported as "in-progress" on all the nodes until resolved
*/
bool
PreAbort_Remote(void)
{
if (IS_PGXC_COORDINATOR && !IsConnFromCoord())
{
cancel_query();
clear_all_data();
}
if (remoteXactState.status == RXACT_COMMITTED)
return false;
if (remoteXactState.status == RXACT_PART_COMMITTED)
{
/*
* In this case transaction is partially committed, pick up the list of nodes
* prepared and not committed and register them on GTM as if it is an explicit 2PC.
* This permits to keep the transaction alive in snapshot and other transaction
* don't have any side effects with partially committed transactions
*/
char *nodestring = NULL;
/*
* Get the list of nodes in prepared state; such nodes have not
* committed successfully
*/
nodestring = pgxc_node_get_nodelist(remoteXactState.preparedLocalNode);
Assert(nodestring);
/* Save the node list and gid on GTM. */
StartPreparedTranGTM(GetTopGlobalTransactionId(),
remoteXactState.prepareGID,
nodestring);
/* Finish to prepare the transaction. */
PrepareTranGTM(GetTopGlobalTransactionId());
clear_RemoteXactState();
return false;
}
else
{
/*
* The transaction is neither part or fully committed. We can safely
* abort such transaction
*/
if (remoteXactState.status == RXACT_NONE)
init_RemoteXactState(false);
pgxc_node_remote_abort();
}
clear_RemoteXactState();
if (!PersistentConnections)
release_handles();
return true;
}
char *
PrePrepare_Remote(char *prepareGID, bool localNode, bool implicit)
{
init_RemoteXactState(false);
/*
* PREPARE the transaction on all nodes including remote nodes as well as
* local node. Any errors will be reported via ereport and the transaction
* will be aborted accordingly.
*/
pgxc_node_remote_prepare(prepareGID);
if (preparedNodes)
pfree(preparedNodes);
preparedNodes = NULL;
if (!implicit)
preparedNodes = pgxc_node_get_nodelist(true);
return preparedNodes;
}
void
PostPrepare_Remote(char *prepareGID, char *nodestring, bool implicit)
{
remoteXactState.preparedLocalNode = true;
/*
* If this is an explicit PREPARE request by the client, we must also save
* the list of nodes involved in this transaction on the GTM for later use
*/
if (!implicit)
{
/* Save the node list and gid on GTM. */
StartPreparedTranGTM(GetTopGlobalTransactionId(),
prepareGID,
nodestring);
/* Finish to prepare the transaction. */
PrepareTranGTM(GetTopGlobalTransactionId());
clear_RemoteXactState();
}
/* Now forget the transaction nodes */
ForgetTransactionNodes();
}
/*
* Return the list of nodes where the prepared transaction is not yet committed
*/
static char *
pgxc_node_get_nodelist(bool localNode)
{
int i;
char *nodestring = NULL, *nodename;
for (i = 0; i < remoteXactState.numWriteRemoteNodes; i++)
{
RemoteXactNodeStatus status = remoteXactState.remoteNodeStatus[i];
PGXCNodeHandle *conn = remoteXactState.remoteNodeHandles[i];
if (status != RXACT_NODE_COMMITTED)
{
nodename = get_pgxc_nodename(conn->nodeoid);
if (!nodestring)
{
nodestring = (char *) MemoryContextAlloc(TopMemoryContext, strlen(nodename) + 1);
sprintf(nodestring, "%s", nodename);
}
else
{
nodestring = (char *) repalloc(nodestring,
strlen(nodename) + strlen(nodestring) + 2);
sprintf(nodestring, "%s,%s", nodestring, nodename);
}
}
}
/* Case of a single Coordinator */
if (localNode && PGXCNodeId >= 0)
{
if (!nodestring)
{
nodestring = (char *) MemoryContextAlloc(TopMemoryContext, strlen(PGXCNodeName) + 1);
sprintf(nodestring, "%s", PGXCNodeName);
}
else
{
nodestring = (char *) repalloc(nodestring,
strlen(PGXCNodeName) + strlen(nodestring) + 2);
sprintf(nodestring, "%s,%s", nodestring, PGXCNodeName);
}
}
return nodestring;
}
bool
IsTwoPhaseCommitRequired(bool localWrite)
{
if ((list_length(XactWriteNodes) > 1) ||
((list_length(XactWriteNodes) == 1) && localWrite))
{
if (ExecIsTempObjectIncluded())
{
elog(DEBUG1, "Transaction accessed temporary objects - "
"2PC will not be used and that can lead to data inconsistencies "
"in case of failures");
return false;
}
return true;
}
else
return false;
}
static void
clear_RemoteXactState(void)
{
/* Clear the previous state */
remoteXactState.numWriteRemoteNodes = 0;
remoteXactState.numReadRemoteNodes = 0;
remoteXactState.status = RXACT_NONE;
remoteXactState.commitXid = InvalidGlobalTransactionId;
remoteXactState.prepareGID[0] = '\0';
if ((remoteXactState.remoteNodeHandles == NULL) ||
(remoteXactState.maxRemoteNodes < (NumDataNodes + NumCoords)))
{
if (!remoteXactState.remoteNodeHandles)
remoteXactState.remoteNodeHandles = (PGXCNodeHandle **)
malloc(sizeof(PGXCNodeHandle *) * (MaxDataNodes + MaxCoords));
else
remoteXactState.remoteNodeHandles = (PGXCNodeHandle **)
realloc(remoteXactState.remoteNodeHandles,
sizeof(PGXCNodeHandle *) * (NumDataNodes + NumCoords));
if (!remoteXactState.remoteNodeStatus)
remoteXactState.remoteNodeStatus = (RemoteXactNodeStatus *)
malloc(sizeof(RemoteXactNodeStatus) * (MaxDataNodes + MaxCoords));
else
remoteXactState.remoteNodeStatus = (RemoteXactNodeStatus *)
realloc (remoteXactState.remoteNodeStatus,
sizeof(RemoteXactNodeStatus) * (NumDataNodes + NumCoords));
remoteXactState.maxRemoteNodes = NumDataNodes + NumCoords;
}
if (remoteXactState.remoteNodeHandles)
memset(remoteXactState.remoteNodeHandles, 0,
sizeof (PGXCNodeHandle *) * (NumDataNodes + NumCoords));
if (remoteXactState.remoteNodeStatus)
memset(remoteXactState.remoteNodeStatus, 0,
sizeof (RemoteXactNodeStatus) * (NumDataNodes + NumCoords));
}
static void
init_RemoteXactState(bool preparedLocalNode)
{
int write_conn_count, read_conn_count;
PGXCNodeHandle **connections;
clear_RemoteXactState();
remoteXactState.preparedLocalNode = preparedLocalNode;
connections = remoteXactState.remoteNodeHandles;
Assert(connections);
/*
* First get information about all the nodes involved in this transaction
*/
write_conn_count = pgxc_get_transaction_nodes(connections,
NumDataNodes + NumCoords, true);
remoteXactState.numWriteRemoteNodes = write_conn_count;
read_conn_count = pgxc_get_transaction_nodes(connections + write_conn_count,
NumDataNodes + NumCoords - write_conn_count, false);
remoteXactState.numReadRemoteNodes = read_conn_count;
}
bool
FinishRemotePreparedTransaction(char *prepareGID, bool commit)
{
char *nodename, *nodestring;
List *nodelist = NIL, *coordlist = NIL;
GlobalTransactionId gxid, prepare_gxid;
PGXCNodeAllHandles *pgxc_handles;
bool prepared_local = false;
int i;
/*
* Please note that with xc_maintenance_mode = on, COMMIT/ROLLBACK PREPARED will not
* propagate to remote nodes. Only GTM status is cleaned up.
*/
if (xc_maintenance_mode)
{
if (commit)
{
pgxc_node_remote_commit();
CommitPreparedTranGTM(prepare_gxid, gxid);
}
else
{
pgxc_node_remote_abort();
RollbackTranGTM(prepare_gxid);
RollbackTranGTM(gxid);
}
return false;
}
/*
* Get the list of nodes involved in this transaction.
*
* This function returns the GXID of the prepared transaction. It also
* returns a fresh GXID which can be used for running COMMIT PREPARED
* commands on the remote nodes. Both these GXIDs can then be either
* committed or aborted together.
*
* XXX While I understand that we get the prepared and a new GXID with a
* single call, it doesn't look nicer and create confusion. We should
* probably split them into two parts. This is used only for explicit 2PC
* which should not be very common in XC
*/
if (GetGIDDataGTM(prepareGID, &gxid, &prepare_gxid, &nodestring) < 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("prepared transaction with identifier \"%s\" does not exist",
prepareGID)));
/*
* Now based on the nodestring, run COMMIT/ROLLBACK PREPARED command on the
* remote nodes and also finish the transaction locally is required
*/
nodename = strtok(nodestring, ",");
while (nodename != NULL)
{
Oid nodeoid;
int nodeIndex;
char nodetype;
nodeoid = get_pgxc_nodeoid(nodename);
if (!OidIsValid(nodeoid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("PGXC Node %s: object not defined",
nodename)));
/* Get node type and index */
nodetype = get_pgxc_nodetype(nodeoid);
nodeIndex = PGXCNodeGetNodeId(nodeoid, get_pgxc_nodetype(nodeoid));
/* Check if node is requested is the self-node or not */
if (nodetype == PGXC_NODE_COORDINATOR)
{
if (nodeIndex == PGXCNodeId - 1)
prepared_local = true;
else
coordlist = lappend_int(coordlist, nodeIndex);
}
else
nodelist = lappend_int(nodelist, nodeIndex);
nodename = strtok(NULL, ",");
}
/*
* Now get handles for all the involved Datanodes and the Coordinators
*/
pgxc_handles = get_handles(nodelist, coordlist, false);
/*
* Send GXID (as received above) to the remote nodes.
if (pgxc_node_begin(pgxc_handles->dn_conn_count,
pgxc_handles->datanode_handles,
gxid, false, false))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Could not begin transaction on Datanodes")));
*/
RegisterTransactionNodes(pgxc_handles->dn_conn_count,
(void **) pgxc_handles->datanode_handles, true);
/*
if (pgxc_node_begin(pgxc_handles->co_conn_count,
pgxc_handles->coord_handles,
gxid, false, false))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Could not begin transaction on coordinators")));
*/
RegisterTransactionNodes(pgxc_handles->co_conn_count,
(void **) pgxc_handles->coord_handles, true);
/*
* Initialize the remoteXactState so that we can use the APIs to take care
* of commit/abort.
*/
init_RemoteXactState(prepared_local);
remoteXactState.commitXid = gxid;
/*
* At this point, most of the things are set up in remoteXactState except
* the state information for all the involved nodes. Force that now and we
* are ready to call the commit/abort API
*/
strcpy(remoteXactState.prepareGID, prepareGID);
for (i = 0; i < remoteXactState.numWriteRemoteNodes; i++)
remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARED;
remoteXactState.status = RXACT_PREPARED;
if (commit)
{
pgxc_node_remote_commit();
CommitPreparedTranGTM(prepare_gxid, gxid);
}
else
{
pgxc_node_remote_abort();
RollbackTranGTM(prepare_gxid);
RollbackTranGTM(gxid);
}
/*
* The following is also only for usual operation. With xc_maintenance_mode = on,
* no remote operation will be done here and no post-operation work is needed.
*/
clear_RemoteXactState();
ForgetTransactionNodes();
return prepared_local;
}
/*
* pgxc_node_report_error
* Throw error from Datanode if any.
*/
static void
pgxc_node_report_error(RemoteQueryState *combiner)
{
/* If no combiner, nothing to do */
if (!combiner)
return;
if (combiner->errorMessage)
{
char *code = combiner->errorCode;
if (combiner->errorDetail != NULL)
ereport(ERROR,
(errcode(MAKE_SQLSTATE(code[0], code[1], code[2], code[3], code[4])),
errmsg("%s", combiner->errorMessage), errdetail("%s", combiner->errorDetail) ));
else
ereport(ERROR,
(errcode(MAKE_SQLSTATE(code[0], code[1], code[2], code[3], code[4])),
errmsg("%s", combiner->errorMessage)));
}
}
/*
* get_success_nodes:
* Currently called to print a user-friendly message about
* which nodes the query failed.
* Gets all the nodes where no 'E' (error) messages were received; i.e. where the
* query ran successfully.
*/
static ExecNodes *
get_success_nodes(int node_count, PGXCNodeHandle **handles, char node_type, StringInfo failednodes)
{
ExecNodes *success_nodes = NULL;
int i;
for (i = 0; i < node_count; i++)
{
PGXCNodeHandle *handle = handles[i];
int nodenum = PGXCNodeGetNodeId(handle->nodeoid, node_type);
if (!handle->error)
{
if (!success_nodes)
success_nodes = makeNode(ExecNodes);
success_nodes->nodeList = lappend_int(success_nodes->nodeList, nodenum);
}
else
{
if (failednodes->len == 0)
appendStringInfo(failednodes, "Error message received from nodes:");
appendStringInfo(failednodes, " %s#%d",
(node_type == PGXC_NODE_COORDINATOR ? "coordinator" : "datanode"),
nodenum + 1);
}
}
return success_nodes;
}
/*
* pgxc_all_success_nodes: Uses get_success_nodes() to collect the
* user-friendly message from coordinator as well as datanode.
*/
void
pgxc_all_success_nodes(ExecNodes **d_nodes, ExecNodes **c_nodes, char **failednodes_msg)
{
PGXCNodeAllHandles *connections = get_exec_connections(NULL, NULL, EXEC_ON_ALL_NODES);
StringInfoData failednodes;
initStringInfo(&failednodes);
*d_nodes = get_success_nodes(connections->dn_conn_count,
connections->datanode_handles,
PGXC_NODE_DATANODE,
&failednodes);
*c_nodes = get_success_nodes(connections->co_conn_count,
connections->coord_handles,
PGXC_NODE_COORDINATOR,
&failednodes);
if (failednodes.len == 0)
*failednodes_msg = NULL;
else
*failednodes_msg = failednodes.data;
}
/*
* set_dbcleanup_callback:
* Register a callback function which does some non-critical cleanup tasks
* on xact success or abort, such as tablespace/database directory cleanup.
*/
void set_dbcleanup_callback(xact_callback function, void *paraminfo, int paraminfo_size)
{
void *fparams;
fparams = MemoryContextAlloc(TopMemoryContext, paraminfo_size);
memcpy(fparams, paraminfo, paraminfo_size);
dbcleanup_info.function = function;
dbcleanup_info.fparams = fparams;
}
/*
* AtEOXact_DBCleanup: To be called at post-commit or pre-abort.
* Calls the cleanup function registered during this transaction, if any.
*/
void AtEOXact_DBCleanup(bool isCommit)
{
if (dbcleanup_info.function)
(*dbcleanup_info.function)(isCommit, dbcleanup_info.fparams);
/*
* Just reset the callbackinfo. We anyway don't want this to be called again,
* until explicitly set.
*/
dbcleanup_info.function = NULL;
if (dbcleanup_info.fparams)
{
pfree(dbcleanup_info.fparams);
dbcleanup_info.fparams = NULL;
}
}
/*
* SetDataRowForIntParams: Form a BIND data row for internal parameters.
* This function is called when the data for the parameters of remote
* statement resides in some plan slot of an internally generated remote
* statement rather than from some extern params supplied by the caller of the
* query. Currently DML is the only case where we generate a query with
* internal parameters.
* The parameter data is constructed from the slot data, and stored in
* RemoteQueryState.paramval_data.
* At the same time, remote parameter types are inferred from the slot
* tuple descriptor, and stored in RemoteQueryState.rqs_param_types.
* On subsequent calls, these param types are re-used.
* The data to be BOUND consists of table column data to be inserted/updated
* and the ctid/nodeid values to be supplied for the WHERE clause of the
* query. The data values are present in dataSlot whereas the ctid/nodeid
* are available in sourceSlot as junk attributes.
* sourceSlot is used only to retrieve ctid/nodeid, so it does not get
* used for INSERTs, although it will never be NULL.
* The slots themselves are undisturbed.
*/
static void
SetDataRowForIntParams(JunkFilter *junkfilter,
TupleTableSlot *sourceSlot, TupleTableSlot *dataSlot,
RemoteQueryState *rq_state)
{
StringInfoData buf;
uint16 numparams = 0;
RemoteQuery *step = (RemoteQuery *) rq_state->ss.ps.plan;
Assert(sourceSlot);
/* Calculate the total number of parameters */
if (step->rq_max_param_num > 0)
numparams = step->rq_max_param_num;
else if (dataSlot)
numparams = dataSlot->tts_tupleDescriptor->natts;
/* Add number of junk attributes */
if (junkfilter)
{
if (junkfilter->jf_junkAttNo)
numparams++;
if (junkfilter->jf_xc_node_id)
numparams++;
}
/*
* Infer param types from the slot tupledesc and junk attributes. But we
* have to do it only the first time: the interal parameters remain the same
* while processing all the source data rows because the data slot tupdesc
* never changes. Even though we can determine the internal param types
* during planning, we want to do it here: we don't want to set the param
* types and param data at two different places. Doing them together here
* helps us to make sure that the param types are in sync with the param
* data.
*/
/*
* We know the numparams, now initialize the param types if not already
* done. Once set, this will be re-used for each source data row.
*/
if (rq_state->rqs_num_params == 0)
{
int attindex = 0;
rq_state->rqs_num_params = numparams;
rq_state->rqs_param_types =
(Oid *) palloc(sizeof(Oid) * rq_state->rqs_num_params);
if (dataSlot) /* We have table attributes to bind */
{
TupleDesc tdesc = dataSlot->tts_tupleDescriptor;
int numatts = tdesc->natts;
if (step->rq_max_param_num > 0)
numatts = step->rq_max_param_num;
for (attindex = 0; attindex < numatts; attindex++)
{
rq_state->rqs_param_types[attindex] =
tdesc->attrs[attindex]->atttypid;
}
}
if (junkfilter) /* Param types for specific junk attributes if present */
{
/* jf_junkAttNo always contains ctid */
if (AttributeNumberIsValid(junkfilter->jf_junkAttNo))
rq_state->rqs_param_types[attindex] = TIDOID;
if (AttributeNumberIsValid(junkfilter->jf_xc_node_id))
rq_state->rqs_param_types[attindex + 1] = INT4OID;
}
}
else
{
Assert(rq_state->rqs_num_params == numparams);
}
/*
* If we already have the data row, just copy that, and we are done. One
* scenario where we can have the data row is for INSERT ... SELECT.
* Effectively, in this case, we just re-use the data row from SELECT as-is
* for BIND row of INSERT. But just make sure all of the data required to
* bind is available in the slot. If there are junk attributes to be added
* in the BIND row, we cannot re-use the data row as-is.
*/
if (!junkfilter && dataSlot && dataSlot->tts_dataRow)
{
rq_state->paramval_data = (char *)palloc(dataSlot->tts_dataLen);
memcpy(rq_state->paramval_data, dataSlot->tts_dataRow, dataSlot->tts_dataLen);
rq_state->paramval_len = dataSlot->tts_dataLen;
return;
}
initStringInfo(&buf);
{
uint16 params_nbo = htons(numparams); /* Network byte order */
appendBinaryStringInfo(&buf, (char *) ¶ms_nbo, sizeof(params_nbo));
}
if (dataSlot)
{
TupleDesc tdesc = dataSlot->tts_tupleDescriptor;
int attindex;
int numatts = tdesc->natts;
/* Append the data attributes */
if (step->rq_max_param_num > 0)
numatts = step->rq_max_param_num;
/* ensure we have all values */
slot_getallattrs(dataSlot);
for (attindex = 0; attindex < numatts; attindex++)
{
uint32 n32;
Assert(attindex < numparams);
if (dataSlot->tts_isnull[attindex])
{
n32 = htonl(-1);
appendBinaryStringInfo(&buf, (char *) &n32, 4);
}
else
pgxc_append_param_val(&buf, dataSlot->tts_values[attindex], tdesc->attrs[attindex]->atttypid);
}
}
/*
* From the source data, fetch the junk attribute values to be appended in
* the end of the data buffer. The junk attribute vals like ctid and
* xc_node_id are used in the WHERE clause parameters.
* These attributes would not be present for INSERT.
*/
if (junkfilter)
{
/* First one - jf_junkAttNo - always reprsents ctid */
pgxc_append_param_junkval(sourceSlot, junkfilter->jf_junkAttNo,
TIDOID, &buf);
pgxc_append_param_junkval(sourceSlot, junkfilter->jf_xc_node_id,
INT4OID, &buf);
}
/* Assign the newly allocated data row to paramval */
rq_state->paramval_data = buf.data;
rq_state->paramval_len = buf.len;
}
/*
* pgxc_append_param_junkval:
* Append into the data row the parameter whose value cooresponds to the junk
* attributes in the source slot, namely ctid or node_id.
*/
static void
pgxc_append_param_junkval(TupleTableSlot *slot, AttrNumber attno,
Oid valtype, StringInfo buf)
{
bool isNull;
if (slot && attno != InvalidAttrNumber)
{
/* Junk attribute positions are saved by ExecFindJunkAttribute() */
Datum val = ExecGetJunkAttribute(slot, attno, &isNull);
/* shouldn't ever get a null result... */
if (isNull)
elog(ERROR, "NULL junk attribute");
pgxc_append_param_val(buf, val, valtype);
}
}
/*
* pgxc_append_param_val:
* Append the parameter value for the SET clauses of the UPDATE statement.
* These values are the table attribute values from the dataSlot.
*/
static void
pgxc_append_param_val(StringInfo buf, Datum val, Oid valtype)
{
/* Convert Datum to string */
char *pstring;
int len;
uint32 n32;
Oid typOutput;
bool typIsVarlena;
/* Get info needed to output the value */
getTypeOutputInfo(valtype, &typOutput, &typIsVarlena);
/*
* If we have a toasted datum, forcibly detoast it here to avoid
* memory leakage inside the type's output routine.
*/
if (typIsVarlena)
val = PointerGetDatum(PG_DETOAST_DATUM(val));
pstring = OidOutputFunctionCall(typOutput, val);
/* copy data to the buffer */
len = strlen(pstring);
n32 = htonl(len);
appendBinaryStringInfo(buf, (char *) &n32, 4);
appendBinaryStringInfo(buf, pstring, len);
}
/*
* pgxc_rq_fire_bstriggers:
* BEFORE STATEMENT triggers to be fired for a user-supplied DML query.
* For non-FQS query, we internally generate remote DML query to be executed
* for each row to be processed. But we do not want to explicitly fire triggers
* for such a query; ExecModifyTable does that for us. It is the FQS DML query
* where we need to explicitly fire statement triggers on coordinator. We
* cannot run stmt triggers on datanode. While we can fire stmt trigger on
* datanode versus coordinator based on the function shippability, we cannot
* do the same for FQS query. The datanode has no knowledge that the trigger
* being fired is due to a non-FQS query or an FQS query. Even though it can
* find that all the triggers are shippable, it won't know whether the stmt
* itself has been FQSed. Even though all triggers were shippable, the stmt
* might have been planned on coordinator due to some other non-shippable
* clauses. So the idea here is to *always* fire stmt triggers on coordinator.
* Note that this does not prevent the query itself from being FQSed. This is
* because we separately fire stmt triggers on coordinator.
*/
static void
pgxc_rq_fire_bstriggers(RemoteQueryState *node)
{
RemoteQuery *rq = (RemoteQuery*) node->ss.ps.plan;
EState *estate = node->ss.ps.state;
/* If it's not an internally generated query, fire BS triggers */
if (!rq->rq_params_internal && estate->es_result_relations)
{
Assert(rq->remote_query);
switch (rq->remote_query->commandType)
{
case CMD_INSERT:
ExecBSInsertTriggers(estate, estate->es_result_relations);
break;
case CMD_UPDATE:
ExecBSUpdateTriggers(estate, estate->es_result_relations);
break;
case CMD_DELETE:
ExecBSDeleteTriggers(estate, estate->es_result_relations);
break;
default:
break;
}
}
}
/*
* pgxc_rq_fire_astriggers:
* AFTER STATEMENT triggers to be fired for a user-supplied DML query.
* See comments in pgxc_rq_fire_astriggers()
*/
static void
pgxc_rq_fire_astriggers(RemoteQueryState *node)
{
RemoteQuery *rq = (RemoteQuery*) node->ss.ps.plan;
EState *estate = node->ss.ps.state;
/* If it's not an internally generated query, fire AS triggers */
if (!rq->rq_params_internal && estate->es_result_relations)
{
Assert(rq->remote_query);
switch (rq->remote_query->commandType)
{
case CMD_INSERT:
ExecASInsertTriggers(estate, estate->es_result_relations);
break;
case CMD_UPDATE:
ExecASUpdateTriggers(estate, estate->es_result_relations);
break;
case CMD_DELETE:
ExecASDeleteTriggers(estate, estate->es_result_relations);
break;
default:
break;
}
}
}
/*
* Flush PGXCNodeHandle cash to the coordinator until the amount of remaining data
* becomes lower than the threshold.
*
* If datanode is too slow to handle data sent, retry to flush some of the buffered
* data.
*/
static int flushPGXCNodeHandleData(PGXCNodeHandle *handle)
{
int retry_no = 0;
int wait_microsec = 0;
size_t remaining;
while (handle->outEnd > MAX_SIZE_TO_STOP_FLUSH)
{
remaining = handle->outEnd;
if (send_some(handle, handle->outEnd) <0)
{
add_error_message(handle, "failed to send data to Datanode");
return EOF;
}
if (remaining == handle->outEnd)
{
/* No data sent */
retry_no++;
wait_microsec = retry_no < 5 ? 0 : (retry_no < 35 ? 2^(retry_no / 5) : 128) * 1000;
if (wait_microsec)
pg_usleep(wait_microsec);
continue;
}
else
{
/* Some data sent */
retry_no = 0;
wait_microsec = 0;
continue;
}
}
return 0;
}
| 58,219 |
742 | # -*- coding: utf-8 -*-
import os.path as osp
from threading import current_thread
from icrawler.utils import ThreadPool
class Feeder(ThreadPool):
"""Base class for feeder.
A thread pool of feeder threads, in charge of feeding urls to parsers.
Attributes:
thread_num (int): An integer indicating the number of threads.
global_signal (Signal): A :class:`Signal` object for communication
among all threads.
out_queue (Queue): A queue connected with parsers' inputs,
storing page urls.
session (Session): A session object.
logger (Logger): A logging.Logger object used for logging.
workers (list): A list storing all the threading.Thread objects
of the feeder.
lock (Lock): A :class:`Lock` instance shared by all feeder threads.
"""
def __init__(self, thread_num, signal, session):
"""Init Feeder with some shared variables."""
super(Feeder, self).__init__(
thread_num=thread_num, in_queue=None, name='feeder')
self.signal = signal
self.session = session
def feed(self, **kwargs):
"""Feed urls.
This method should be implemented by users.
"""
raise NotImplementedError
def worker_exec(self, **kwargs):
"""Target function of workers"""
self.feed(**kwargs)
self.logger.info('thread {} exit'.format(current_thread().name))
def __exit__(self):
self.logger.info('all feeder threads exited')
class UrlListFeeder(Feeder):
"""Url list feeder which feed a list of urls"""
def feed(self, url_list, offset=0, max_num=0):
if isinstance(url_list, str):
if osp.isfile(url_list):
with open(url_list, 'r') as fin:
url_list = [line.rstrip('\n') for line in fin]
else:
raise IOError('url list file {} not found'.format(url_list))
elif not isinstance(url_list, list):
raise TypeError('"url_list" can only be a filename or a str list')
if offset < 0 or offset >= len(url_list):
raise ValueError('"offset" exceed the list length')
else:
if max_num > 0:
end_idx = min(len(url_list), offset + max_num)
else:
end_idx = len(url_list)
for i in range(offset, end_idx):
url = url_list[i]
self.out_queue.put(url)
self.logger.debug('put url to url_queue: {}'.format(url))
class SimpleSEFeeder(Feeder):
"""Simple search engine like Feeder"""
def feed(self, url_template, keyword, offset, max_num, page_step):
"""Feed urls once
Args:
url_template: A string with parameters replaced with "{}".
keyword: A string indicating the searching keyword.
offset: An integer indicating the starting index.
max_num: An integer indicating the max number of images to be crawled.
page_step: An integer added to offset after each iteration.
"""
for i in range(offset, offset + max_num, page_step):
url = url_template.format(keyword, i)
self.out_queue.put(url)
self.logger.debug('put url to url_queue: {}'.format(url))
| 1,410 |
451 | <reponame>Shlpeng/Qualitis
/*
* Copyright 2019 WeBank
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webank.wedatasphere.qualitis.rule.controller;
import com.webank.wedatasphere.qualitis.exception.UnExpectedRequestException;
import com.webank.wedatasphere.qualitis.response.GeneralResponse;
import com.webank.wedatasphere.qualitis.rule.request.multi.AddMultiSourceRuleRequest;
import com.webank.wedatasphere.qualitis.rule.request.multi.DeleteMultiSourceRequest;
import com.webank.wedatasphere.qualitis.rule.request.multi.ModifyMultiSourceRequest;
import com.webank.wedatasphere.qualitis.rule.service.MultiSourceRuleService;
import com.webank.wedatasphere.qualitis.exception.UnExpectedRequestException;
import com.webank.wedatasphere.qualitis.response.GeneralResponse;
import com.webank.wedatasphere.qualitis.rule.request.multi.AddMultiSourceRuleRequest;
import com.webank.wedatasphere.qualitis.rule.request.multi.DeleteMultiSourceRequest;
import com.webank.wedatasphere.qualitis.rule.request.multi.ModifyMultiSourceRequest;
import com.webank.wedatasphere.qualitis.rule.service.MultiSourceRuleService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
/**
* @author howeye
*/
@Path("api/v1/projector/mul_source_rule")
public class MultiSourceRuleController {
@Autowired
private MultiSourceRuleService multiSourceRuleService;
private static final Logger LOGGER = LoggerFactory.getLogger(MultiSourceRuleController.class);
/**
* Add multi-table rule
* @return
*/
@PUT
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public GeneralResponse<?> addMultiSourceRule(AddMultiSourceRuleRequest request) throws UnExpectedRequestException {
try {
return multiSourceRuleService.addMultiSourceRule(request);
} catch (UnExpectedRequestException e) {
throw new UnExpectedRequestException(e.getMessage());
} catch (Exception e) {
LOGGER.error("Failed to add multi_source rule. caused by: {}", e.getMessage(), e);
return new GeneralResponse<>("500", "{&FAILED_TO_ADD_MULTI_SOURCE_RULE}", null);
}
}
/**
* Delete multi-table rule
* @return
*/
@DELETE
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public GeneralResponse<?> deleteMultiSourceRule(DeleteMultiSourceRequest request) throws UnExpectedRequestException {
try {
return multiSourceRuleService.deleteMultiSourceRule(request);
} catch (UnExpectedRequestException e) {
throw new UnExpectedRequestException(e.getMessage());
} catch (Exception e) {
LOGGER.error("Failed to delete multi_source rule. caused by: {}", e.getMessage(), e);
return new GeneralResponse<>("500", "{&FAILED_TO_DELETE_MULTI_SOURCE_RULE}", null);
}
}
/**
* Modify multi-table rule
* @return
*/
@POST
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public GeneralResponse<?> modifyMultiSourceRule(ModifyMultiSourceRequest request) throws UnExpectedRequestException {
try {
return multiSourceRuleService.modifyMultiSourceRule(request);
} catch (UnExpectedRequestException e) {
throw new UnExpectedRequestException(e.getMessage());
} catch (Exception e) {
LOGGER.error("Failed to modify multi_source rule. caused by: {}", e.getMessage(), e);
return new GeneralResponse<>("500", "{&FAILED_TO_MODIFY_MULTI_SOURCE_RULE}", null);
}
}
/**
* Get multi-table rule detail
* @return
*/
@GET
@Path("/{rule_id}")
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public GeneralResponse<?> getMultiSourceRule(@PathParam("rule_id")Long ruleId) throws UnExpectedRequestException {
try {
return multiSourceRuleService.getMultiSourceRule(ruleId);
} catch (UnExpectedRequestException e) {
throw new UnExpectedRequestException(e.getMessage());
} catch (Exception e) {
LOGGER.error("Failed to get details of multi_source rule. caused by: {}", e.getMessage(), e);
return new GeneralResponse<>("500", "{&FAILED_TO_GET_DETAILS_OF_MULTI_SOURCE_RULE}", null);
}
}
}
| 1,823 |
1,469 | //
// RawMessage.cpp
// WechatExporter
//
// Created by Matthew on 2020/10/13.
// Copyright © 2020 Matthew. All rights reserved.
//
#include "RawMessage.h"
#ifdef _WIN32
#include <atlstr.h>
#endif
bool convertUnknownField(const UnknownField &uf, std::string& value)
{
if (uf.type() == UnknownField::TYPE_LENGTH_DELIMITED)
{
value.assign(uf.length_delimited().c_str(), uf.GetLengthDelimitedSize());
return true;
}
else if (uf.type() == UnknownField::TYPE_VARINT)
{
value = std::to_string(uf.varint());
return true;
}
else if (uf.type() == UnknownField::TYPE_FIXED32)
{
value = std::to_string(uf.fixed32());
return true;
}
else if (uf.type() == UnknownField::TYPE_FIXED64)
{
value = std::to_string(uf.fixed64());
return true;
}
return false;
}
bool convertUnknownField(const UnknownField &uf, int& value)
{
if (uf.type() == UnknownField::TYPE_VARINT)
{
value = static_cast<int>(uf.varint());
return true;
}
else if (uf.type() == UnknownField::TYPE_FIXED32)
{
value = static_cast<int>(uf.fixed32());
return true;
}
else if (uf.type() == UnknownField::TYPE_FIXED64)
{
value = static_cast<int>(uf.fixed64());
return true;
}
return false;
}
std::string RawMessage::toUtf8String(const std::string& str)
{
return UnescapeCEscapeString(str);
}
RawMessage::RawMessage() : m_pool(NULL), m_factory(NULL), m_message(NULL)
{
}
RawMessage::~RawMessage()
{
release();
}
void RawMessage::release()
{
if (NULL != m_message)
{
delete m_message;
m_message = NULL;
}
if (NULL != m_factory)
{
delete m_factory;
m_factory = NULL;
}
if (NULL != m_pool)
{
delete m_pool;
m_pool = NULL;
}
}
bool RawMessage::merge(const char *data, int length)
{
release();
m_pool = new DescriptorPool();
FileDescriptorProto file;
file.set_name("empty_message.proto");
file.add_message_type()->set_name("EmptyMessage");
GOOGLE_CHECK(m_pool->BuildFile(file) != NULL);
const Descriptor *descriptor = m_pool->FindMessageTypeByName("EmptyMessage");
if (NULL == descriptor)
{
// FormatError(outputString, lengthOfOutputString, ERROR_NO_MESSAGE_TYPE, src->messageTypeName);
return false;
}
m_factory = new DynamicMessageFactory(m_pool);
const Message *message = m_factory->GetPrototype(descriptor);
if (NULL == message)
{
return false;
}
m_message = message->New();
if (NULL == m_message)
{
return false;
}
if (!m_message->ParseFromArray(reinterpret_cast<const void *>(data), length))
{
delete m_message;
m_message = NULL;
return false;
}
return true;
}
bool RawMessage::mergeFile(const std::string& path)
{
release();
#ifdef _WIN32
CA2W pszW(path.c_str(), CP_UTF8);
std::ifstream file(pszW, std::ios::in|std::ios::binary|std::ios::ate);
#else
std::ifstream file(path.c_str(), std::ios::in|std::ios::binary|std::ios::ate);
#endif
if (file.is_open())
{
std::streampos size = file.tellg();
std::vector<char> buffer;
buffer.resize(size);
file.seekg (0, std::ios::beg);
file.read((char *)(&buffer[0]), size);
file.close();
return merge(&buffer[0], static_cast<int>(size));
}
return false;
}
/*
bool RawMessage::parse(int fieldNumber, std::string& value)
{
if (NULL == m_message)
{
return false;
}
const Reflection* reflection = m_message->GetReflection();
const UnknownFieldSet& ufs = reflection->GetUnknownFields(*m_message);
for (int fieldIdx = 0; fieldIdx < ufs.field_count(); ++fieldIdx)
{
const UnknownField uf = ufs.field(fieldIdx);
if (uf.number() == fieldNumber)
{
value = uf.length_delimited();
return true;
}
}
return false;
}
bool RawMessage::parseFile(const std::string& path, std::string& fields, std::string& value)
{
std::string data = readFile(path);
return parse(data, fields, value);
}
bool RawMessage::parse(const std::string& data, std::string& fields, std::string& value)
{
std::queue<int> fieldNumbers;
std::string::size_type start = 0;
std::string::size_type end = fields.find('.');
while (end != std::string::npos)
{
std::string field = fields.substr(start, end - start);
if (field.empty())
{
return false;
}
int fieldNumber = std::stoi(field);
fieldNumbers.push(fieldNumber);
start = end + 1;
end = fields.find('.', start);
}
std::string field = fields.substr(start, end);
if (field.empty())
{
return false;
}
int fieldNumber = std::stoi(field);
fieldNumbers.push(fieldNumber);
return parse(data, fieldNumbers, value);
}
bool RawMessage::parse(const std::string& data, std::queue<int>& fieldNumbers, std::string& value)
{
if (fieldNumbers.empty())
{
return false;
}
UnknownFieldSet unknownFields;
if (unknownFields.ParseFromString(data))
{
int fieldNumber = fieldNumbers.front();
fieldNumbers.pop();
for (int fieldIdx = 0; fieldIdx < unknownFields.field_count(); ++fieldIdx)
{
const UnknownField uf = unknownFields.field(fieldIdx);
if (uf.number() == fieldNumber)
{
if (fieldNumbers.empty())
{
value = uf.length_delimited();
return true;
}
else
{
std::string embeded_data = uf.length_delimited();
return parse(embeded_data, fieldNumbers, value);
}
break;
}
}
}
return false;
}
*/
| 2,770 |
679 | <reponame>Grosskopf/openoffice
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_xmloff.hxx"
#include "formenums.hxx"
#include <osl/diagnose.h>
#include <com/sun/star/form/FormSubmitEncoding.hpp>
#include <com/sun/star/form/FormSubmitMethod.hpp>
#include <com/sun/star/sdb/CommandType.hpp>
#include <com/sun/star/form/NavigationBarMode.hpp>
#include <com/sun/star/form/TabulatorCycle.hpp>
#include <com/sun/star/form/FormButtonType.hpp>
#include <com/sun/star/form/ListSourceType.hpp>
#include <com/sun/star/awt/TextAlign.hpp>
#include <com/sun/star/awt/FontWidth.hpp>
#include <com/sun/star/awt/FontEmphasisMark.hpp>
#include <com/sun/star/awt/FontRelief.hpp>
#include <com/sun/star/awt/ScrollBarOrientation.hpp>
#include <com/sun/star/awt/VisualEffect.hpp>
#include <com/sun/star/awt/ImageScaleMode.hpp>
#include <tools/wintypes.hxx> // for check states
#include <xmloff/xmltoken.hxx>
//.........................................................................
namespace xmloff
{
//.........................................................................
using namespace ::com::sun::star::form;
using namespace ::com::sun::star::sdb;
using namespace ::com::sun::star::awt;
using namespace ::xmloff::token;
const SvXMLEnumMapEntry* OEnumMapper::s_pEnumMap[OEnumMapper::KNOWN_ENUM_PROPERTIES] =
{
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL
};
//---------------------------------------------------------------------
const SvXMLEnumMapEntry* OEnumMapper::getEnumMap(EnumProperties _eProperty)
{
OSL_ENSURE(_eProperty < KNOWN_ENUM_PROPERTIES, "OEnumMapper::getEnumMap: invalid index (this will crash)!");
const SvXMLEnumMapEntry*& rReturn = s_pEnumMap[_eProperty];
if (!rReturn)
{
// the map for this property is not initialized yet
switch (_eProperty)
{
// FormSubmitEncoding
case epSubmitEncoding:
{
static SvXMLEnumMapEntry aSubmitEncodingMap[] =
{
{ XML_APPLICATION_X_WWW_FORM_URLENCODED, FormSubmitEncoding_URL },
{ XML_MULTIPART_FORMDATA, FormSubmitEncoding_MULTIPART },
{ XML_APPLICATION_TEXT, FormSubmitEncoding_TEXT },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aSubmitEncodingMap;
}
break;
// FormSubmitMethod
case epSubmitMethod:
{
static SvXMLEnumMapEntry aSubmitMethodMap[] =
{
{ XML_GET, FormSubmitMethod_GET },
{ XML_POST, FormSubmitMethod_POST },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aSubmitMethodMap;
}
break;
// CommandType
case epCommandType:
{
static SvXMLEnumMapEntry aCommandTypeMap[] =
{
{ XML_TABLE, CommandType::TABLE },
{ XML_QUERY, CommandType::QUERY },
{ XML_COMMAND, CommandType::COMMAND },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aCommandTypeMap;
}
break;
// NavigationBarMode
case epNavigationType:
{
static SvXMLEnumMapEntry aNavigationTypeMap[] =
{
{ XML_NONE, NavigationBarMode_NONE },
{ XML_CURRENT, NavigationBarMode_CURRENT },
{ XML_PARENT, NavigationBarMode_PARENT },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aNavigationTypeMap;
};
break;
// TabulatorCycle
case epTabCyle:
{
static SvXMLEnumMapEntry aTabulytorCycleMap[] =
{
{ XML_RECORDS, TabulatorCycle_RECORDS },
{ XML_CURRENT, TabulatorCycle_CURRENT },
{ XML_PAGE, TabulatorCycle_PAGE },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aTabulytorCycleMap;
};
break;
// FormButtonType
case epButtonType:
{
static SvXMLEnumMapEntry aFormButtonTypeMap[] =
{
{ XML_PUSH, FormButtonType_PUSH },
{ XML_SUBMIT, FormButtonType_SUBMIT },
{ XML_RESET, FormButtonType_RESET },
{ XML_URL, FormButtonType_URL },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aFormButtonTypeMap;
};
break;
// ListSourceType
case epListSourceType:
{
static SvXMLEnumMapEntry aListSourceTypeMap[] =
{
{ XML_VALUE_LIST, ListSourceType_VALUELIST },
{ XML_TABLE, ListSourceType_TABLE },
{ XML_QUERY, ListSourceType_QUERY },
{ XML_SQL, ListSourceType_SQL },
{ XML_SQL_PASS_THROUGH, ListSourceType_SQLPASSTHROUGH },
{ XML_TABLE_FIELDS, ListSourceType_TABLEFIELDS },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aListSourceTypeMap;
};
break;
// check state of a checkbox
case epCheckState:
{
static SvXMLEnumMapEntry aCheckStateMap[] =
{
{ XML_UNCHECKED, STATE_NOCHECK },
{ XML_CHECKED, STATE_CHECK },
{ XML_UNKNOWN, STATE_DONTKNOW },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aCheckStateMap;
};
break;
case epTextAlign:
{
static SvXMLEnumMapEntry aTextAlignMap[] =
{
{ XML_START, TextAlign::LEFT },
{ XML_CENTER, TextAlign::CENTER },
{ XML_END, TextAlign::RIGHT },
{ XML_JUSTIFY, (sal_uInt16)-1 },
{ XML_JUSTIFIED, (sal_uInt16)-1 },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aTextAlignMap;
};
break;
case epBorderWidth:
{
static SvXMLEnumMapEntry aBorderTypeMap[] =
{
{ XML_NONE, 0 },
{ XML_HIDDEN, 0 },
{ XML_SOLID, 2 },
{ XML_DOUBLE, 2 },
{ XML_DOTTED, 2 },
{ XML_DASHED, 2 },
{ XML_GROOVE, 1 },
{ XML_RIDGE, 1 },
{ XML_INSET, 1 },
{ XML_OUTSET, 1 },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aBorderTypeMap;
};
break;
case epFontEmphasis:
{
static SvXMLEnumMapEntry aFontEmphasisMap[] =
{
{ XML_NONE, FontEmphasisMark::NONE },
{ XML_DOT, FontEmphasisMark::DOT },
{ XML_CIRCLE, FontEmphasisMark::CIRCLE },
{ XML_DISC, FontEmphasisMark::DISC },
{ XML_ACCENT, FontEmphasisMark::ACCENT },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aFontEmphasisMap;
}
break;
case epFontRelief:
{
static SvXMLEnumMapEntry aFontReliefMap[] =
{
{ XML_NONE, FontRelief::NONE },
{ XML_ENGRAVED, FontRelief::ENGRAVED },
{ XML_EMBOSSED, FontRelief::EMBOSSED },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aFontReliefMap;
}
break;
case epListLinkageType:
{
static SvXMLEnumMapEntry aListLinkageMap[] =
{
{ XML_SELECTION, 0 },
{ XML_SELECTION_INDEXES, 1 },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aListLinkageMap;
}
break;
case epOrientation:
{
static SvXMLEnumMapEntry aOrientationMap[] =
{
{ XML_HORIZONTAL, ScrollBarOrientation::HORIZONTAL },
{ XML_VERTICAL, ScrollBarOrientation::VERTICAL },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aOrientationMap;
}
break;
case epVisualEffect:
{
static SvXMLEnumMapEntry aVisualEffectMap[] =
{
{ XML_NONE, VisualEffect::NONE },
{ XML_3D, VisualEffect::LOOK3D },
{ XML_FLAT, VisualEffect::FLAT },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aVisualEffectMap;
}
break;
case epImagePosition:
{
static SvXMLEnumMapEntry aImagePositionMap[] =
{
{ XML_START, 0 },
{ XML_END, 1 },
{ XML_TOP, 2 },
{ XML_BOTTOM, 3 },
{ XML_CENTER, (sal_uInt16)-1 },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aImagePositionMap;
}
break;
case epImageAlign:
{
static SvXMLEnumMapEntry aImageAlignMap[] =
{
{ XML_START, 0 },
{ XML_CENTER, 1 },
{ XML_END, 2 },
{ XML_TOKEN_INVALID, 0 }
};
rReturn = aImageAlignMap;
}
break;
case epImageScaleMode:
{
static const SvXMLEnumMapEntry aScaleModeMap[] =
{
{ XML_BACKGROUND_NO_REPEAT, ImageScaleMode::NONE },
{ XML_REPEAT, ImageScaleMode::NONE }, // repeating the image is not supported
{ XML_STRETCH, ImageScaleMode::ANISOTROPIC },
{ XML_SCALE, ImageScaleMode::ISOTROPIC },
{ XML_TOKEN_INVALID, ImageScaleMode::NONE }
};
rReturn = aScaleModeMap;
}
break;
case KNOWN_ENUM_PROPERTIES:
break;
}
}
return rReturn;
}
//.........................................................................
} // namespace xmloff
//.........................................................................
| 5,247 |
348 | {"nom":"Chaveignes","circ":"4ème circonscription","dpt":"Indre-et-Loire","inscrits":463,"abs":216,"votants":247,"blancs":27,"nuls":8,"exp":212,"res":[{"nuance":"REM","nom":"<NAME>","voix":113},{"nuance":"LR","nom":"<NAME>","voix":99}]} | 94 |
2,151 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare component to handle bulk scrapes.
Invokes a list of browsers and sends them to a list of URLs,
saving the rendered results to a specified directory, then
performs comparison operations on the resulting bitmaps and
saves the results
"""
# This line is necessary to work around a QEMU bug
import _imaging
import os # Functions for walking the directory tree
import types # Runtime type-checking
import command_line # command-line parsing
import drivers # Functions for driving keyboard/mouse/windows, OS-specific
import operators # Functions that, given two bitmaps as input, produce
# output depending on the performance of an operation
import scrapers # Functions that know how to capture a render from
# particular browsers
import commands.compare2 # compare one page in two versions of same browser
import commands.maskmaker # generate a mask based on repeated scrapes
import commands.measure # measure length of time a page takes to load
import commands.scrape # scrape a URL or series of URLs to a bitmap
# The timeload command is obsolete (too flaky); it may be reinstated
# later but for now it's been superceded by "measure"
# import commands.timeload # measure length of time a page takes to load
def Scrape(browsers, urls, window_size=(1024, 768),
window_pos=(0, 0), timeout=20, save_path=None, **kwargs):
"""Invoke one or more browsers over one or more URLs, scraping renders.
Args:
browsers: browsers to invoke with optional version strings
urls: URLs to visit
window_size: size of the browser window to display
window_pos: location of browser window
timeout: time (in seconds) to wait for page to load
save_path: root of save path, automatically appended with browser and
version
kwargs: miscellaneous keyword args, passed to scraper
Returns:
None
@TODO(jhaas): more parameters, or perhaps an indefinite dictionary
parameter, for things like length of time to wait for timeout, speed
of mouse clicks, etc. Possibly on a per-browser, per-URL, or
per-browser-per-URL basis
"""
if type(browsers) in types.StringTypes: browsers = [browsers]
if save_path is None:
# default save path is "scrapes" off the current root
save_path = os.path.join(os.path.split(__file__)[0], "Scrapes")
for browser in browsers:
# Browsers should be tuples of (browser, version)
if type(browser) in types.StringTypes: browser = (browser, None)
scraper = scrapers.GetScraper(browser)
full_path = os.path.join(save_path, browser[0], scraper.version)
drivers.windowing.PreparePath(full_path)
scraper.Scrape(urls, full_path, window_size, window_pos, timeout, kwargs)
def Compare(base, compare, ops, root_path=None, out_path=None):
"""Compares a series of scrapes using a series of operators.
Args:
base: (browser, version) tuple of version to consider the baseline
compare: (browser, version) tuple of version to compare to
ops: list of operators plus operator arguments
root_path: root of the scrapes
out_path: place to put any output from the operators
Returns:
None
@TODO(jhaas): this method will likely change, to provide a robust and
well-defined way of chaining operators, applying operators conditionally,
and full-featured scripting of the operator chain. There also needs
to be better definition of the output; right now it's to stdout and
a log.txt file, with operator-dependent images saved for error output
"""
if root_path is None:
# default save path is "scrapes" off the current root
root_path = os.path.join(os.path.split(__file__)[0], "Scrapes")
if out_path is None:
out_path = os.path.join(os.path.split(__file__)[0], "Compares")
if type(base) in types.StringTypes: base = (base, None)
if type(compare) in types.StringTypes: compare = (compare, None)
if type(ops) in types.StringTypes: ops = [ops]
base_dir = os.path.join(root_path, base[0])
compare_dir = os.path.join(root_path, compare[0])
if base[1] is None:
# base defaults to earliest capture
base = (base[0], max(os.listdir(base_dir)))
if compare[1] is None:
# compare defaults to latest capture
compare = (compare[0], min(os.listdir(compare_dir)))
out_path = os.path.join(out_path, base[0], base[1], compare[0], compare[1])
drivers.windowing.PreparePath(out_path)
# TODO(jhaas): right now we're just dumping output to a log file
# (and the console), which works as far as it goes but isn't nearly
# robust enough. Change this after deciding exactly what we want to
# change it to.
out_file = open(os.path.join(out_path, "log.txt"), "w")
description_string = ("Comparing %s %s to %s %s" %
(base[0], base[1], compare[0], compare[1]))
out_file.write(description_string)
print description_string
base_dir = os.path.join(base_dir, base[1])
compare_dir = os.path.join(compare_dir, compare[1])
for filename in os.listdir(base_dir):
out_file.write("%s: " % filename)
if not os.path.isfile(os.path.join(compare_dir, filename)):
out_file.write("Does not exist in target directory\n")
print "File %s does not exist in target directory" % filename
continue
base_filename = os.path.join(base_dir, filename)
compare_filename = os.path.join(compare_dir, filename)
for op in ops:
if type(op) in types.StringTypes: op = (op, None)
module = operators.GetOperator(op[0])
ret = module.Compare(base_filename, compare_filename)
if ret is None:
print "%s: OK" % (filename,)
out_file.write("OK\n")
else:
print "%s: %s" % (filename, ret[0])
out_file.write("%s\n" % (ret[0]))
ret[1].save(os.path.join(out_path, filename))
out_file.close()
def main():
"""Main executable. Parse the command line and invoke the command."""
cmdline = command_line.CommandLine()
# The below two commands are currently unstable so have been disabled
# commands.compare2.CreateCommand(cmdline)
# commands.maskmaker.CreateCommand(cmdline)
commands.measure.CreateCommand(cmdline)
commands.scrape.CreateCommand(cmdline)
cmdline.ParseCommandLine()
return 0
if __name__ == "__main__":
sys.exit(main())
| 2,158 |
3,483 | <reponame>extremenelson/sirius
package info.ephyra.questionanalysis;
import info.ephyra.nlp.VerbFormConverter;
import info.ephyra.nlp.semantics.Predicate;
import info.ephyra.nlp.semantics.ontologies.Ontology;
import info.ephyra.nlp.semantics.ontologies.WordNet;
import info.ephyra.util.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import net.didion.jwnl.data.POS;
/**
* Expands single- and multi-token terms by looking them up in one or more open-
* or specific-domain ontologies.
*
* @author <NAME>
* @version 2007-02-30
*/
public class TermExpander {
/** Maximum number of expansions. */
public static final int MAX_EXPANSIONS = 100;
/** Minimum weight of an expansion. */
public static final double MIN_EXPANSION_WEIGHT = 0;
/** Maximum number of expansions used in a query. */
public static final int MAX_EXPANSIONS_QUERY = 10;
/** Minimum weight of an expansion used in a query. */
public static final double MIN_EXPANSION_WEIGHT_QUERY = 0.9;
/**
* Checks if the term is the target of one of the predicates.
*
* @param term a term
* @param ps predicates in the same sentence
* @return <code>true</code> iff the term is a predicate target
*/
private static boolean isTarget(Term term, Predicate[] ps) {
String text = term.getText();
String lemma = WordNet.getLemma(text, POS.VERB);
for (Predicate p : ps) {
String verb = p.getVerb();
if (verb.matches(".*?\\b" + text + "\\b.*+"))
return true;
// also try the infinitive in case the verb form was modified in the
// term but not in the predicate (e.g. because the predicate was
// built from an offset annotation)
if (lemma != null && verb.matches(".*?\\b" + lemma + "\\b.*+"))
return true;
}
return false;
}
/**
* Drops expansions that do not contain any keywords and applies the given
* thresholds for the maximum number of expansions and the minimum weight of
* an expansion.
*
* @param expansions expansions along with their weights
* @param maxExpansions maximum number of expansions
* @param minExpansionWeight minimum weight of an expansion
* @param strict iff <code>true</code>, the threshold for the maximum number
* of expansions is applied strictly even if that means that
* some of a number of expansion with equal weights have to be
* dropped randomly
* @return minimum weight of an expansion in the reduced map
*/
public static double cutOffExpansions(Map<String, Double> expansions,
int maxExpansions, double minExpansionWeight, boolean strict) {
// drop duplicates and expansions that do not contain keywords
ArrayList<String> dropped = new ArrayList<String>();
HashSet<String> expansionSet = new HashSet<String>();
for (String expansion : expansions.keySet()) {
if (!expansionSet.add(StringUtils.normalize(expansion))) {
dropped.add(expansion);
continue;
}
String[] kws = KeywordExtractor.getKeywords(expansion);
if (kws.length == 0) dropped.add(expansion);
}
for (String expansion : dropped) expansions.remove(expansion);
// get hurdle for weights of expansions to satisfy both thresholds
if (expansions.size() == 0) return 0;
Double[] weights =
expansions.values().toArray(new Double[expansions.size()]);
Arrays.sort(weights);
double hurdle = weights[Math.max(weights.length - maxExpansions, 0)];
hurdle = Math.max(hurdle, minExpansionWeight);
// drop expansions that have a weight below that hurdle
dropped.clear();
for (String expansion : expansions.keySet())
if (expansions.get(expansion) < hurdle)
dropped.add(expansion);
for (String expansion : dropped) expansions.remove(expansion);
dropped.clear();
if (strict) {
// drop as many expansions with a weight equal to the hurdle
// as required to satisfy the MAX_EXPANSIONS threshold
int numToDrop = Math.max(expansions.size() - maxExpansions, 0);
for (String expansion : expansions.keySet()) {
if (numToDrop == 0) break;
if (expansions.get(expansion) == hurdle) {
dropped.add(expansion);
numToDrop--;
}
}
for (String expansion : dropped) expansions.remove(expansion);
}
return hurdle;
}
/**
* Drops expansions that do not contain any keywords and applies the default
* thresholds for the maximum number of expansions and the minimum weight of
* an expansion.
*
* @param expansions expansions along with their weights
* @param strict iff <code>true</code>, the threshold for the maximum number
* of expansions is applied strictly even if that means that
* some of a number of expansion with equal weights have to be
* dropped randomly
* @return minimum weight of an expansion in the reduced map
*/
public static double cutOffExpansions(Map<String, Double> expansions,
boolean strict) {
return cutOffExpansions(expansions, MAX_EXPANSIONS,
MIN_EXPANSION_WEIGHT, strict);
}
/**
* Creates a new expansion map and applies the thresholds for the maximum
* number of expansions and the minimum weight of an expansion for queries.
*
* @param expansions expansions along with their weights
* @param strict iff <code>true</code>, the threshold for the maximum number
* of expansions is applied strictly even if that means that
* some of a number of expansion with equal weights have to be
* dropped randomly
* @return new reduced expansion map for queries
*/
public static Map<String, Double> reduceExpansionsQuery(
Map<String, Double> expansions, boolean strict) {
HashMap<String, Double> map = new HashMap<String, Double>();
for (String expansion : expansions.keySet())
map.put(expansion, expansions.get(expansion));
cutOffExpansions(map, MAX_EXPANSIONS_QUERY, MIN_EXPANSION_WEIGHT_QUERY,
strict);
return map;
}
/**
* Expands a term by looking up related terms in ontologies.
*
* @param term a term
* @param ps predicates in the same sentence
* @param ontologies ontologies used to expand the term
*/
public static void expandTerm(Term term, Predicate[] ps,
Ontology[] ontologies) {
String text = term.getText();
String pos = term.getPos();
Map<String, Double> lemmas = new Hashtable<String, Double>();
Map<String, Double> expansions = new Hashtable<String, Double>();
// expand events, entities and modifiers
if (isTarget(term, ps) || pos.startsWith("VB")) {
// lemmatize verbs that are in WordNet
String lemma = WordNet.getLemma(text, POS.VERB);
if (lemma == null) lemma = text;
// set lemma if the POS was misleading
if (!pos.startsWith("VB")) term.setLemma(lemma);
// expand event
for (Ontology ontology : ontologies) {
Map<String, Double> expanded = ontology.expandEvent(lemma);
lemmas.putAll(expanded);
}
// ensure that there are at most MAX_EXPANSIONS expansions with
// weights of at least MIN_EXPANSION_WEIGHT
cutOffExpansions(lemmas, true);
// restore verb form
if (pos.equals("VBZ")) {
// third person singular
for (String exp : lemmas.keySet()) {
double weight = lemmas.get(exp);
String form =
VerbFormConverter.infinitiveToThirdPersonS(exp);
expansions.put(form, weight);
}
} else if (pos.equals("VBG")) {
// gerund
for (String exp : lemmas.keySet()) {
double weight = lemmas.get(exp);
String[] forms =
VerbFormConverter.infinitiveToGerund(exp);
for (String form : forms) expansions.put(form, weight);
}
} else if (pos.equals("VBD")) {
// simple past
for (String exp : lemmas.keySet()) {
double weight = lemmas.get(exp);
String[] forms =
VerbFormConverter.infinitiveToSimplePast(exp);
for (String form : forms) expansions.put(form, weight);
}
} else if (pos.equals("VBN")) {
// past participle
for (String exp : lemmas.keySet()) {
double weight = lemmas.get(exp);
String[] forms =
VerbFormConverter.infinitiveToPastParticiple(exp);
for (String form : forms) expansions.put(form, weight);
}
}
} else if (pos.startsWith("JJ") || pos.startsWith("RB")) {
// get modifier type
POS modType =
(pos.startsWith("JJ")) ? POS.ADJECTIVE : POS.ADVERB;
// lemmatize adjectives and adverbs that are in WordNet
String lemma = WordNet.getLemma(text, modType);
if (lemma == null) lemma = text;
// expand modifier
for (Ontology ontology : ontologies) {
Map<String, Double> expanded =
ontology.expandModifier(lemma, modType);
lemmas.putAll(expanded);
}
// ensure that there are at most MAX_EXPANSIONS expansions with
// weights of at least MIN_EXPANSION_WEIGHT
cutOffExpansions(lemmas, true);
} else {
// lemmatize nouns that are in WordNet
String lemma;
if (pos.startsWith("COMPOUND"))
lemma = WordNet.getCompoundLemma(text, POS.NOUN); // compound
else
lemma = WordNet.getLemma(text, POS.NOUN); // single token
if (lemma == null) lemma = text;
// expand entity
for (Ontology ontology : ontologies) {
Map<String, Double> expanded = ontology.expandEntity(lemma);
lemmas.putAll(expanded);
}
// ensure that there are at most MAX_EXPANSIONS expansions with
// weights of at least MIN_EXPANSION_WEIGHT
cutOffExpansions(lemmas, true);
// TODO restore plural forms if possible
}
term.setExpansionLemmas(lemmas);
term.setExpansions((expansions.size() > 0) ? expansions : lemmas);
}
/**
* Expands all terms by looking up related terms in ontologies.
*
* @param terms the terms
* @param ps predicates in the same sentence
* @param ontologies ontologies used to expand the term
*/
public static void expandTerms(Term[] terms, Predicate[] ps,
Ontology[] ontologies) {
for (Term term : terms) expandTerm(term, ps, ontologies);
}
/**
* Expands a phrase by replacing the terms that occur within the phrase by
* their expansions. All possible combinations of expansions of the
* individual terms are formed and each is assigned the product of the
* weights of the expansions as a combined weight. The (at most)
* <code>MAX_EXPANSIONS</code> resulting phrases with the highest weights
* are returned.
*
* @param phrase phrase to expand
* @param terms expanded terms that potentially occur within the phrase
* @return expansions and their weights
*/
public static Map<String, Double> expandPhrase(String phrase,
Term[] terms) {
// regular expressions that match the terms
List<String> patterns = new ArrayList<String>();
// maps the terms to their expansions
Map<String, Map<String, Double>> expansionsMap =
new Hashtable<String, Map<String, Double>>();
for (Term term : terms) {
Map<String, Double> expansions = term.getExpansions();
if (expansions.size() > 0) {
String pattern = "\\b" + term.getText() + "\\b";
patterns.add(pattern);
expansionsMap.put(pattern, expansions);
}
}
Map<String, Double> phraseExps = new Hashtable<String, Double>();
phraseExps.put(phrase, 1d);
// obtain phrase expansions by combining term expansions
while (patterns.size() > 0) {
String[] phrases =
phraseExps.keySet().toArray(new String[phraseExps.size()]);
String pattern = patterns.get(0);
Map<String, Double> expansions = expansionsMap.get(pattern);
for (String phraseExp : phrases) {
Matcher m =
Pattern.compile(".*?" + pattern + ".*+").matcher(phraseExp);
if (m.matches()) {
for (String expansion : expansions.keySet()) {
String expanded = phraseExp.replaceFirst(pattern,
expansion);
Double weight = phraseExps.get(phraseExp) *
expansions.get(expansion);
phraseExps.put(expanded, weight);
}
} else {
// no (further) occurrences of the term
patterns.remove(0);
break;
}
}
}
// ensure that there are at most MAX_EXPANSIONS phrases with weights of
// at least MIN_EXPANSION_WEIGHT
phraseExps.remove(phrase);
cutOffExpansions(phraseExps, true);
return phraseExps;
}
}
| 4,391 |
2,360 | <gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAneufinder(RPackage):
"""Analysis of Copy Number Variation in Single-Cell-Sequencing Data
AneuFinder implements functions for copy-number detection, breakpoint
detection, and karyotype and heterogeneity analysis in single-cell whole
genome sequencing and strand-seq data."""
homepage = "https://bioconductor.org/packages/AneuFinder"
git = "https://git.bioconductor.org/packages/AneuFinder.git"
version('1.18.0', commit='<PASSWORD>')
version('1.12.1', commit='<KEY>')
version('1.10.2', commit='<KEY>')
version('1.8.0', commit='36a<PASSWORD>44add5aafbe21c37a1baaea6a5<PASSWORD>')
version('1.6.0', commit='<KEY>')
version('1.4.0', commit='e5bdf4<PASSWORD>4f84ee5680986826ff<PASSWORD>36<PASSWORD>3b8e')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.10.2:', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-cowplot', type=('build', 'run'))
depends_on('r-aneufinderdata', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-biocgenerics', when='@1.4.0:1.6.0', type=('build', 'run'))
depends_on('[email protected]:', when='@1.18.0:', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-bamsignals', type=('build', 'run'))
depends_on('r-dnacopy', type=('build', 'run'))
depends_on('r-ecp', when='@1.8.0:', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
depends_on('r-ggdendro', type=('build', 'run'))
depends_on('r-ggrepel', type=('build', 'run'))
depends_on('r-reordercluster', type=('build', 'run'))
depends_on('r-mclust', type=('build', 'run'))
| 997 |
372 | package com.kongzue.baseframeworkdemo.fragment;
import android.content.Intent;
import android.graphics.Paint;
import android.net.Uri;
import android.view.View;
import android.widget.TextView;
import com.google.android.material.floatingactionbutton.FloatingActionButton;
import com.kongzue.baseframework.BaseApp;
import com.kongzue.baseframework.BaseFragment;
import com.kongzue.baseframework.interfaces.Layout;
import com.kongzue.baseframework.interfaces.OnClick;
import com.kongzue.baseframework.util.JumpParameter;
import com.kongzue.baseframework.util.OnJumpResponseListener;
import com.kongzue.baseframework.util.Preferences;
import com.kongzue.baseframeworkdemo.App;
import com.kongzue.baseframeworkdemo.activity.DemoActivity;
import com.kongzue.baseframeworkdemo.R;
/**
* @author: Kongzue
* @github: https://github.com/kongzue/
* @homepage: http://kongzue.com/
* @mail: <EMAIL>
* @createTime: 2019/8/20 21:41
*/
//使用 @Layout 注解直接绑定要显示的布局
@Layout(R.layout.fragment_introduction)
/**
* 此处泛型是用于约束绑定目标 Activity 的,是可选操作,
* 如果你指定了目标绑定目标 Activity,则使用“me.”关键词可直接调用该 Activity 中的 public 成员或方法
*/
public class IntroductionFragment extends BaseFragment<DemoActivity> {
private TextView linkHome;
private FloatingActionButton btnFab;
@Override
//此处用于绑定布局组件,你也可以使用 @BindView(resId) 来初始化组件
public void initViews() {
linkHome = findViewById(R.id.link_home);
btnFab = findViewById(R.id.btn_fab);
}
@Override
//请在此编写初始化操作,例如读取数据等,以及对 UI 组件进行赋值
public void initDatas() {
linkHome.getPaint().setFlags(Paint.UNDERLINE_TEXT_FLAG);
}
@Override
//此处为组件绑定功能事件、回调等方法
public void setEvents() {
linkHome.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Uri uri = Uri.parse("https://github.com/kongzue/BaseFramework");
Intent intent = new Intent(Intent.ACTION_VIEW, uri);
startActivity(intent);
}
});
}
@OnClick(R.id.btn_fab)
//你也可以使用 @OnClick 注解直接绑定点击事件
public void startTest() {
jump(1, new JumpParameter().put("tip", "开始尝试功能!"), new OnJumpResponseListener() {
@Override
public void OnResponse(JumpParameter jumpParameter) {
toast("你刚刚使用了:" + jumpParameter.getString("function"));
}
});
}
@Override
/**
* 进入 Fragment 时调用此方法,isSwitchFragment 标记说明了是否为从别的 Fragment 切换至此 Fragment 的,
* 若为 false,则有可能是从后台切换至前台触发
*/
public void onShow(boolean isSwitchFragment) {
log("IntroductionFragment: onShow");
super.onShow(isSwitchFragment);
}
@Override
public void onHide() {
log("IntroductionFragment: onHide");
super.onHide();
}
} | 1,541 |
348 | <gh_stars>100-1000
{"nom":"Chadenac","circ":"4ème circonscription","dpt":"Charente-Maritime","inscrits":362,"abs":211,"votants":151,"blancs":11,"nuls":2,"exp":138,"res":[{"nuance":"REM","nom":"<NAME>","voix":73},{"nuance":"LR","nom":"<NAME>","voix":65}]} | 101 |
2,400 | <gh_stars>1000+
/*
* Copyright 2013 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.optaplanner.examples.common.persistence;
import static org.junit.jupiter.api.DynamicTest.dynamicTest;
import java.io.File;
import java.io.FileFilter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.function.Predicate;
import java.util.stream.Stream;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.DirectoryFileFilter;
import org.junit.jupiter.api.DynamicTest;
import org.junit.jupiter.api.TestFactory;
import org.optaplanner.core.api.domain.solution.PlanningSolution;
import org.optaplanner.examples.common.app.CommonApp;
import org.optaplanner.examples.common.app.LoggingTest;
import org.optaplanner.examples.common.business.ProblemFileComparator;
/**
* @param <Solution_> the solution type, the class with the {@link PlanningSolution} annotation
*/
public abstract class ImportDataFilesTest<Solution_> extends LoggingTest {
protected abstract AbstractSolutionImporter<Solution_> createSolutionImporter();
protected abstract String getDataDirName();
protected Predicate<File> dataFileInclusionFilter() {
return file -> true;
}
private static List<File> getInputFiles(String dataDirName, AbstractSolutionImporter<?> solutionImporter) {
File importDir = new File(CommonApp.determineDataDir(dataDirName), "import");
List<File> fileList;
if (solutionImporter.isInputFileDirectory()) {
// Non recursively
fileList = new ArrayList<>(Arrays.asList(
Objects.requireNonNull(importDir.listFiles((FileFilter) DirectoryFileFilter.INSTANCE))));
} else {
// recursively
fileList = new ArrayList<>(
FileUtils.listFiles(importDir, new String[] { solutionImporter.getInputFileSuffix() }, true));
}
fileList.sort(new ProblemFileComparator());
return fileList;
}
@TestFactory
Stream<DynamicTest> readSolution() {
AbstractSolutionImporter<Solution_> solutionImporter = createSolutionImporter();
return getInputFiles(getDataDirName(), solutionImporter).stream()
.filter(dataFileInclusionFilter())
.map(importFile -> dynamicTest(
importFile.getName(),
() -> solutionImporter.readSolution(importFile)));
}
}
| 1,052 |
14,668 | <filename>components/viz/common/features.cc
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/viz/common/features.h"
#include <string>
#include "base/command_line.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "base/system/sys_info.h"
#include "build/chromeos_buildflags.h"
#include "components/viz/common/delegated_ink_prediction_configuration.h"
#include "components/viz/common/switches.h"
#include "components/viz/common/viz_utils.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/gpu_switches.h"
#include "media/media_buildflags.h"
#if defined(OS_ANDROID)
#include "base/android/build_info.h"
#endif
namespace {
// FieldTrialParams for `DynamicSchedulerForDraw` and
// `kDynamicSchedulerForClients`.
const char kDynamicSchedulerPercentile[] = "percentile";
} // namespace
namespace features {
// Enables the use of power hint APIs on Android.
const base::Feature kAdpf{"Adpf", base::FEATURE_DISABLED_BY_DEFAULT};
// Target duration used for power hint on Android.
const base::FeatureParam<int> kAdpfTargetDurationMs{&kAdpf,
"AdpfTargetDurationMs", 12};
const base::Feature kEnableOverlayPrioritization {
"EnableOverlayPrioritization",
#if BUILDFLAG(USE_CHROMEOS_PROTECTED_MEDIA)
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
#endif
};
const base::Feature kUseMultipleOverlays{"UseMultipleOverlays",
base::FEATURE_DISABLED_BY_DEFAULT};
const char kMaxOverlaysParam[] = "max_overlays";
const base::Feature kDelegatedCompositing{"DelegatedCompositing",
base::FEATURE_DISABLED_BY_DEFAULT};
const base::Feature kSimpleFrameRateThrottling{
"SimpleFrameRateThrottling", base::FEATURE_DISABLED_BY_DEFAULT};
// Use the SkiaRenderer.
const base::Feature kUseSkiaRenderer {
"UseSkiaRenderer",
#if defined(OS_WIN) || defined(OS_ANDROID) || BUILDFLAG(IS_CHROMEOS_LACROS) || \
defined(OS_LINUX) || defined(OS_FUCHSIA) || defined(OS_MAC)
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
#endif
};
// Kill-switch to disable de-jelly, even if flags/properties indicate it should
// be enabled.
const base::Feature kDisableDeJelly{"DisableDeJelly",
base::FEATURE_DISABLED_BY_DEFAULT};
// On platform and configuration where viz controls the allocation of frame
// buffers (ie SkiaOutputDeviceBufferQueue is used), allocate and release frame
// buffers on demand.
const base::Feature kDynamicBufferQueueAllocation{
"DynamicBufferQueueAllocation", base::FEATURE_DISABLED_BY_DEFAULT};
#if defined(OS_ANDROID)
// When wide color gamut content from the web is encountered, promote our
// display to wide color gamut if supported.
const base::Feature kDynamicColorGamut{"DynamicColorGamut",
base::FEATURE_DISABLED_BY_DEFAULT};
#endif
// Uses glClear to composite solid color quads whenever possible.
const base::Feature kFastSolidColorDraw{"FastSolidColorDraw",
base::FEATURE_DISABLED_BY_DEFAULT};
// Submit CompositorFrame from SynchronousLayerTreeFrameSink directly to viz in
// WebView.
const base::Feature kVizFrameSubmissionForWebView{
"VizFrameSubmissionForWebView", base::FEATURE_DISABLED_BY_DEFAULT};
const base::Feature kUsePreferredIntervalForVideo{
"UsePreferredIntervalForVideo",
#if defined(OS_ANDROID)
base::FEATURE_DISABLED_BY_DEFAULT
#else
base::FEATURE_ENABLED_BY_DEFAULT
#endif
};
// Whether we should use the real buffers corresponding to overlay candidates in
// order to do a pageflip test rather than allocating test buffers.
const base::Feature kUseRealBuffersForPageFlipTest{
"UseRealBuffersForPageFlipTest", base::FEATURE_ENABLED_BY_DEFAULT};
#if defined(OS_FUCHSIA)
// Enables SkiaOutputDeviceBufferQueue instead of Vulkan swapchain on Fuchsia.
const base::Feature kUseSkiaOutputDeviceBufferQueue{
"UseSkiaOutputDeviceBufferQueue", base::FEATURE_ENABLED_BY_DEFAULT};
#endif
// Whether we should log extra debug information to webrtc native log.
const base::Feature kWebRtcLogCapturePipeline{
"WebRtcLogCapturePipeline", base::FEATURE_DISABLED_BY_DEFAULT};
#if defined(OS_WIN)
// Enables swap chains to call SetPresentDuration to request DWM/OS to reduce
// vsync.
const base::Feature kUseSetPresentDuration{"UseSetPresentDuration",
base::FEATURE_DISABLED_BY_DEFAULT};
#endif // OS_WIN
// Enables platform supported delegated ink trails instead of Skia backed
// delegated ink trails.
const base::Feature kUsePlatformDelegatedInk{"UsePlatformDelegatedInk",
base::FEATURE_ENABLED_BY_DEFAULT};
// Used to debug Android WebView Vulkan composite. Composite to an intermediate
// buffer and draw the intermediate buffer to the secondary command buffer.
const base::Feature kWebViewVulkanIntermediateBuffer{
"WebViewVulkanIntermediateBuffer", base::FEATURE_DISABLED_BY_DEFAULT};
#if defined(OS_ANDROID)
// Hardcoded as disabled for WebView to have a different default for
// UseSurfaceLayerForVideo from chrome.
const base::Feature kUseSurfaceLayerForVideoDefault{
"UseSurfaceLayerForVideoDefault", base::FEATURE_ENABLED_BY_DEFAULT};
// Historically media on android hardcoded SRGB color space because of lack of
// color space support in surface control. This controls if we want to use real
// color space in DisplayCompositor.
const base::Feature kUseRealVideoColorSpaceForDisplay{
"UseRealVideoColorSpaceForDisplay", base::FEATURE_DISABLED_BY_DEFAULT};
#endif
// Used by CC to throttle frame production of older surfaces. Used by the
// Browser to batch SurfaceSync calls sent to the Renderer for properties can
// change in close proximity to each other.
const base::Feature kSurfaceSyncThrottling{"SurfaceSyncThrottling",
base::FEATURE_DISABLED_BY_DEFAULT};
const base::Feature kDrawPredictedInkPoint{"DrawPredictedInkPoint",
base::FEATURE_DISABLED_BY_DEFAULT};
const char kDraw1Point12Ms[] = "1-pt-12ms";
const char kDraw2Points6Ms[] = "2-pt-6ms";
const char kDraw1Point6Ms[] = "1-pt-6ms";
const char kDraw2Points3Ms[] = "2-pt-3ms";
const char kPredictorKalman[] = "kalman";
const char kPredictorLinearResampling[] = "linear-resampling";
const char kPredictorLinear1[] = "linear-1";
const char kPredictorLinear2[] = "linear-2";
const char kPredictorLsq[] = "lsq";
// Used by Viz to parameterize adjustments to scheduler deadlines.
const base::Feature kDynamicSchedulerForDraw{"DynamicSchedulerForDraw",
base::FEATURE_DISABLED_BY_DEFAULT};
// User to parameterize adjustments to clients' deadlines.
const base::Feature kDynamicSchedulerForClients{
"DynamicSchedulerForClients", base::FEATURE_DISABLED_BY_DEFAULT};
#if defined(OS_MAC)
const base::Feature kMacCAOverlayQuad{"MacCAOverlayQuads",
base::FEATURE_DISABLED_BY_DEFAULT};
// The maximum supported overlay quad number on Mac CALayerOverlay.
// The default is set to -1. When MaxNum is < 0, the default in CALayerOverlay
// will be used instead.
const base::FeatureParam<int> kMacCAOverlayQuadMaxNum{
&kMacCAOverlayQuad, "MacCAOverlayQuadMaxNum", -1};
#endif
bool IsAdpfEnabled() {
// TODO(crbug.com/1157620): Limit this to correct android version.
return base::FeatureList::IsEnabled(kAdpf);
}
bool IsClipPrewalkDamageEnabled() {
static constexpr base::Feature kClipPrewalkDamage{
"ClipPrewalkDamage", base::FEATURE_DISABLED_BY_DEFAULT};
return base::FeatureList::IsEnabled(kClipPrewalkDamage);
}
bool IsOverlayPrioritizationEnabled() {
return base::FeatureList::IsEnabled(kEnableOverlayPrioritization);
}
bool IsDelegatedCompositingEnabled() {
return base::FeatureList::IsEnabled(kDelegatedCompositing);
}
// If a synchronous IPC should used when destroying windows. This exists to test
// the impact of removing the sync IPC.
bool IsSyncWindowDestructionEnabled() {
static constexpr base::Feature kSyncWindowDestruction{
"SyncWindowDestruction", base::FEATURE_ENABLED_BY_DEFAULT};
return base::FeatureList::IsEnabled(kSyncWindowDestruction);
}
bool IsSimpleFrameRateThrottlingEnabled() {
return base::FeatureList::IsEnabled(kSimpleFrameRateThrottling);
}
bool IsUsingSkiaRenderer() {
#if defined(OS_ANDROID)
// We don't support KitKat. Check for it before looking at the feature flag
// so that KitKat doesn't show up in Control or Enabled experiment group.
if (base::android::BuildInfo::GetInstance()->sdk_int() <=
base::android::SDK_VERSION_KITKAT)
return false;
#endif
#if BUILDFLAG(IS_CHROMEOS_ASH)
// TODO(https://crbug.com/1145180): SkiaRenderer isn't supported on Chrome
// OS boards that still use the legacy video decoder.
auto* command_line = base::CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(
switches::kPlatformDisallowsChromeOSDirectVideoDecoder))
return false;
#endif
return base::FeatureList::IsEnabled(kUseSkiaRenderer) ||
features::IsUsingVulkan();
}
#if defined(OS_ANDROID)
bool IsDynamicColorGamutEnabled() {
if (viz::AlwaysUseWideColorGamut())
return false;
auto* build_info = base::android::BuildInfo::GetInstance();
if (build_info->sdk_int() < base::android::SDK_VERSION_Q)
return false;
return base::FeatureList::IsEnabled(kDynamicColorGamut);
}
#endif
bool IsUsingFastPathForSolidColorQuad() {
return base::FeatureList::IsEnabled(kFastSolidColorDraw);
}
bool IsUsingVizFrameSubmissionForWebView() {
return base::FeatureList::IsEnabled(kVizFrameSubmissionForWebView);
}
bool IsUsingPreferredIntervalForVideo() {
return base::FeatureList::IsEnabled(kUsePreferredIntervalForVideo);
}
bool IsVizHitTestingDebugEnabled() {
return base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableVizHitTestDebug);
}
bool ShouldUseRealBuffersForPageFlipTest() {
return base::FeatureList::IsEnabled(kUseRealBuffersForPageFlipTest);
}
bool ShouldWebRtcLogCapturePipeline() {
return base::FeatureList::IsEnabled(kWebRtcLogCapturePipeline);
}
#if defined(OS_WIN)
bool ShouldUseSetPresentDuration() {
return base::FeatureList::IsEnabled(kUseSetPresentDuration);
}
#endif // OS_WIN
absl::optional<int> ShouldDrawPredictedInkPoints() {
if (!base::FeatureList::IsEnabled(kDrawPredictedInkPoint))
return absl::nullopt;
std::string predicted_points = GetFieldTrialParamValueByFeature(
kDrawPredictedInkPoint, "predicted_points");
if (predicted_points == kDraw1Point12Ms)
return viz::PredictionConfig::k1Point12Ms;
else if (predicted_points == kDraw2Points6Ms)
return viz::PredictionConfig::k2Points6Ms;
else if (predicted_points == kDraw1Point6Ms)
return viz::PredictionConfig::k1Point6Ms;
else if (predicted_points == kDraw2Points3Ms)
return viz::PredictionConfig::k2Points3Ms;
NOTREACHED();
return absl::nullopt;
}
std::string InkPredictor() {
if (!base::FeatureList::IsEnabled(kDrawPredictedInkPoint))
return "";
return GetFieldTrialParamValueByFeature(kDrawPredictedInkPoint, "predictor");
}
bool ShouldUsePlatformDelegatedInk() {
return base::FeatureList::IsEnabled(kUsePlatformDelegatedInk);
}
#if defined(OS_ANDROID)
bool UseSurfaceLayerForVideo() {
// Allow enabling UseSurfaceLayerForVideo if webview is using surface control.
if (::features::IsAndroidSurfaceControlEnabled()) {
return true;
}
return base::FeatureList::IsEnabled(kUseSurfaceLayerForVideoDefault);
}
bool UseRealVideoColorSpaceForDisplay() {
// We need Android S for proper color space support in SurfaceControl.
if (base::android::BuildInfo::GetInstance()->sdk_int() <
base::android::SdkVersion::SDK_VERSION_S)
return false;
return base::FeatureList::IsEnabled(
features::kUseRealVideoColorSpaceForDisplay);
}
#endif
bool IsSurfaceSyncThrottling() {
return base::FeatureList::IsEnabled(kSurfaceSyncThrottling);
}
// Used by Viz to determine if viz::DisplayScheduler should dynamically adjust
// its frame deadline. Returns the percentile of historic draw times to base the
// deadline on. Or absl::nullopt if the feature is disabled.
absl::optional<double> IsDynamicSchedulerEnabledForDraw() {
if (!base::FeatureList::IsEnabled(kDynamicSchedulerForDraw))
return absl::nullopt;
double result = base::GetFieldTrialParamByFeatureAsDouble(
kDynamicSchedulerForDraw, kDynamicSchedulerPercentile, -1.0);
if (result < 0.0)
return absl::nullopt;
return result;
}
// Used by Viz to determine if the frame deadlines provided to CC should be
// dynamically adjusted. Returns the percentile of historic draw times to base
// the deadline on. Or absl::nullopt if the feature is disabled.
absl::optional<double> IsDynamicSchedulerEnabledForClients() {
if (!base::FeatureList::IsEnabled(kDynamicSchedulerForClients))
return absl::nullopt;
double result = base::GetFieldTrialParamByFeatureAsDouble(
kDynamicSchedulerForClients, kDynamicSchedulerPercentile, -1.0);
if (result < 0.0)
return absl::nullopt;
return result;
}
int MaxOverlaysConsidered() {
if (!IsOverlayPrioritizationEnabled()) {
return 1;
}
if (!base::FeatureList::IsEnabled(kUseMultipleOverlays)) {
return 1;
}
return base::GetFieldTrialParamByFeatureAsInt(kUseMultipleOverlays,
kMaxOverlaysParam, 2);
}
} // namespace features
| 4,859 |
1,253 | <reponame>JD235/algorithms
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.sg.hacktoberfest;
import java.util.Scanner;
/**
*
* @author Gina
*/
public class FactorialFun {
public static void main(String[] args) {
int numberEntered;
int factorial;
numberEntered = readInt("Enter a number: ", 0);
factorial = numberEntered;
int tempNum = numberEntered;
if (numberEntered < 2) {
factorial = 1;
} else {
for (int i = 1; i < numberEntered; i++) {
factorial = factorial * (tempNum - 1);
tempNum = tempNum - 1;
}
}
System.out.println("\nThe factorial of " + numberEntered + " is " + factorial);
}
public static int readInt(String prompt, int min) {
Scanner sc = new Scanner(System.in);
int num = 0;
boolean validInput = false;
while (validInput == false) {
System.out.println(prompt);
String intString = sc.nextLine();
try {
num = Integer.parseInt(intString);
validInput = true;
if (num < min) {
validInput = false;
System.out.println("Please enter a number greater than or equal to " + min + ".");
}
} catch (NumberFormatException e) {
System.out.println("Please enter a valid number.\n");
}
}
return num;
}
} | 762 |
1,088 | <reponame>amznero/graph-learn
/* Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "graphlearn/common/base/host.h"
#include <arpa/inet.h>
#include <unistd.h>
#include <net/if.h>
#include <netinet/in.h>
#include <netdb.h>
#include <stdio.h>
#include <string.h>
#include <strings.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <vector>
#include "graphlearn/common/base/log.h"
namespace graphlearn {
std::string GetLocalEndpoint(int32_t port) {
char host_name[128] = {0};
int ret = gethostname(host_name, sizeof(host_name));
if (ret < 0) {
LOG(FATAL) << "gethostname error: " << ret;
return "";
}
hostent* hptr = gethostbyname(host_name);
if (hptr == NULL) {
LOG(FATAL) << "gethostbyname error";
return "";
}
int i = 0;
while (hptr->h_addr_list[i] != NULL) {
std::string ip = inet_ntoa(*(struct in_addr*)hptr->h_addr_list[i]);
if (ip != "127.0.0.1") {
return ip + ":" + std::to_string(port);
} else {
++i;
}
}
return "";
}
int32_t GetAvailablePort() {
int sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock < 0) {
LOG(FATAL) << "GetAvailablePort with socket error.";
return -1;
}
struct sockaddr_in serv_addr;
bzero(reinterpret_cast<char*>(&serv_addr), sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = INADDR_ANY;
// auto-detect port.
serv_addr.sin_port = 0;
if (bind(sock, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) {
LOG(FATAL) << "GetAvailablePort failed with auto-binding port.";
return -1;
}
socklen_t len = sizeof(serv_addr);
if (getsockname(sock, (struct sockaddr *)&serv_addr, &len) == -1) {
LOG(FATAL) << "GetAvailablePort failed with geting socket name.";
return -1;
}
if (close(sock) < 0) {
LOG(FATAL) << "GetAvailablePort failed with closing socket.";
return -1;
}
return int32_t(ntohs(serv_addr.sin_port));
}
} // namespace graphlearn
| 954 |
1,539 | <reponame>Ayudjj/mvp
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
""" A collection of only the requests that fuzzing will be performed on """
from __future__ import print_function
import time
import engine.core.requests
import engine.primitives as primitives
class FuzzingRequestCollection(object):
def __init__(self):
# Reference to Request objects that will be fuzzed
self._requests = []
# Set of requests that will be processed during pre or post-processing
# These requests will not be included when fuzzing
self._preprocessing_requests = []
self._postprocessing_requests = []
def __iter__(self):
""" Iterate over FuzzingRequests """
return iter(self._requests)
def __deepcopy__(self, memo):
""" Don't deepcopy this object, just return its reference"""
return self
def __contains__(self, request):
""" Returns whether or not a request exists in the request list """
return request in self._requests
def add_request(self, request):
""" Adds a new request to the collection of requests to be fuzzed """
if request not in self._requests:
self._requests.append(request)
def exclude_preprocessing_request(self, request):
""" Removes a request from the collection of requests to be fuzzed
and adds that request to the preprocessing requests set
@param request: The preprocessing request
@type request: Request
@return: None
@rtype : None
"""
if request in self._requests:
self._requests.remove(request)
if request not in self._preprocessing_requests:
self._preprocessing_requests.append(request)
def exclude_postprocessing_request(self, request):
""" Removes a request from the collection of requests to be fuzzed
and adds that request to the postprocessing requests set
@param request: The postprocessing request
@type request: Request
@return: None
@rtype : None
"""
if request in self._requests:
self._requests.remove(request)
if request not in self._postprocessing_requests:
self._postprocessing_requests.append(request)
def set_all_requests(self, requests):
""" Sets all requests in the request list """
self._requests = list(requests)
@property
def all_requests(self):
""" Returns the list of all requests that were defined in this
request collection. This included the current request list and
the excluded requests
@return: The list of requests
@rtype : List[Request]
"""
return list(self._preprocessing_requests) + list(self._requests) + list(self._postprocessing_requests)
@property
def preprocessing_requests(self):
""" Returns the list of the requests handled during preprocessing
@return: The list of preprocessing requests
@rtype : List[Request]
"""
return list(self._preprocessing_requests)
@property
def postprocessing_requests(self):
""" Returns the list of the requests handled during postprocessing
@return: The list of postprocessing requests
@rtype : List[Request]
"""
return list(self._postprocessing_requests)
@property
def requests(self):
""" Returns the list of requests being fuzzed
@return: The list of requests being fuzzed
@rtype : List[Request]
"""
return list(self._requests)
@property
def size(self):
""" Returns the number of requests being fuzzed
@return: The number of request in collection.
@rtype : Int
"""
return len(self._requests)
@property
def size_all_requests(self):
""" Returns the number of requests being fuzzed plus the number
of requests used for pre and post-processing stages
@return: The total number of requests
@rtype : Int
"""
return self.size + len(self._preprocessing_requests) + len(self._postprocessing_requests)
| 1,563 |
1,144 | <gh_stars>1000+
// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright 2016 Rockchip Electronics Co., Ltd
*/
#include <common.h>
#include <dm.h>
#include <dm/pinctrl.h>
#include <dm/uclass-internal.h>
#include <asm/arch/periph.h>
#include <power/regulator.h>
#include <spl.h>
int board_init(void)
{
struct udevice *pinctrl, *regulator;
int ret;
/*
* The PWM do not have decicated interrupt number in dts and can
* not get periph_id by pinctrl framework, so let's init them here.
* The PWM2 and PWM3 are for pwm regulater.
*/
ret = uclass_get_device(UCLASS_PINCTRL, 0, &pinctrl);
if (ret) {
debug("%s: Cannot find pinctrl device\n", __func__);
goto out;
}
/* Enable pwm0 for panel backlight */
ret = pinctrl_request_noflags(pinctrl, PERIPH_ID_PWM0);
if (ret) {
debug("%s PWM0 pinctrl init fail! (ret=%d)\n", __func__, ret);
goto out;
}
ret = pinctrl_request_noflags(pinctrl, PERIPH_ID_PWM2);
if (ret) {
debug("%s PWM2 pinctrl init fail!\n", __func__);
goto out;
}
ret = pinctrl_request_noflags(pinctrl, PERIPH_ID_PWM3);
if (ret) {
debug("%s PWM3 pinctrl init fail!\n", __func__);
goto out;
}
ret = regulators_enable_boot_on(false);
if (ret)
debug("%s: Cannot enable boot on regulator\n", __func__);
ret = regulator_get_by_platname("vcc5v0_host", ®ulator);
if (ret) {
debug("%s vcc5v0_host init fail! ret %d\n", __func__, ret);
goto out;
}
ret = regulator_set_enable(regulator, true);
if (ret) {
debug("%s vcc5v0-host-en set fail!\n", __func__);
goto out;
}
out:
return 0;
}
void spl_board_init(void)
{
struct udevice *pinctrl;
int ret;
ret = uclass_get_device(UCLASS_PINCTRL, 0, &pinctrl);
if (ret) {
debug("%s: Cannot find pinctrl device\n", __func__);
goto err;
}
/* Enable debug UART */
ret = pinctrl_request_noflags(pinctrl, PERIPH_ID_UART_DBG);
if (ret) {
debug("%s: Failed to set up console UART\n", __func__);
goto err;
}
preloader_console_init();
return;
err:
printf("%s: Error %d\n", __func__, ret);
/* No way to report error here */
hang();
}
| 899 |
364 | <reponame>arenadata/vertx-jooq
package io.github.jklingsporn.vertx.jooq.generate;
import io.vertx.core.json.JsonArray;
import org.jooq.*;
import org.jooq.impl.DSL;
import org.junit.BeforeClass;
import java.util.function.Function;
/**
* @author jensklingsporn
*/
public abstract class AbstractPostgresInsertReturningTest {
@BeforeClass
public static void beforeClass() throws Exception {
PostgresConfigurationProvider.getInstance().setupDatabase();
}
protected <R extends UpdatableRecord<R>, T> Function<Object, T> keyMapper(Table<R> table, Configuration configuration){
return o -> {
JsonArray j = (JsonArray) o;
int pkLength = table.getPrimaryKey().getFieldsArray().length;
if(pkLength == 1){
return (T)j.getValue(0);
}
Object[] values = new Object[j.size()];
for(int i=0;i<j.size();i++){
values[i] = j.getValue(i);
}
TableField<R, Object>[] fields = (TableField<R, Object>[]) table.getPrimaryKey().getFieldsArray();
Record result = DSL.using(configuration)
.newRecord(fields);
for (int i = 0; i < values.length; i++)
result.set(fields[i], fields[i].getDataType().convert(values[i]));
return (T) result;
};
}
}
| 615 |
3,271 | <filename>transpose.py
#! /usr/bin/env python3
"""transpose 2d array [[a,b], [c,d], [e,f]] -> [[a,c,e], [b,d,f]]"""
original = [['a', 'b'], ['c', 'd'], ['e', 'f']]
transposed = zip(*original)
print(list(transposed))
| 99 |
1,875 | <filename>testSrc/unit/io/flutter/testing/ProjectFixture.java
/*
* Copyright 2017 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
package io.flutter.testing;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.project.Project;
import com.intellij.testFramework.fixtures.IdeaProjectTestFixture;
abstract public class ProjectFixture<T extends IdeaProjectTestFixture> extends AdaptedFixture<T> {
ProjectFixture(Factory<T> factory, boolean setupOnDispatchThread) {
super(factory, setupOnDispatchThread);
}
public Project getProject() {
return getInner().getProject();
}
public Module getModule() {
return getInner().getModule();
}
}
| 240 |
2,151 | <filename>chromecast/browser/cast_content_browser_client.h<gh_stars>1000+
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_BROWSER_CAST_CONTENT_BROWSER_CLIENT_H_
#define CHROMECAST_BROWSER_CAST_CONTENT_BROWSER_CLIENT_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
#include "build/build_config.h"
#include "build/buildflag.h"
#include "chromecast/chromecast_buildflags.h"
#include "content/public/browser/certificate_request_result_type.h"
#include "content/public/browser/content_browser_client.h"
#include "services/service_manager/public/cpp/binder_registry.h"
class PrefService;
namespace breakpad {
class CrashHandlerHostLinux;
}
namespace device {
class BluetoothAdapterCast;
}
namespace metrics {
class MetricsService;
}
namespace net {
class SSLPrivateKey;
class URLRequestContextGetter;
class X509Certificate;
}
namespace chromecast {
class CastService;
class CastWindowManager;
class MemoryPressureControllerImpl;
namespace media {
class MediaCapsImpl;
class CmaBackendFactory;
class MediaPipelineBackendManager;
class MediaResourceTracker;
class VideoPlaneController;
class VideoModeSwitcher;
class VideoResolutionPolicy;
}
namespace shell {
class CastBrowserMainParts;
class CastResourceDispatcherHostDelegate;
class RendererConfigManager;
class URLRequestContextFactory;
class CastContentBrowserClient : public content::ContentBrowserClient {
public:
// Creates an implementation of CastContentBrowserClient. Platform should
// link in an implementation as needed.
static std::unique_ptr<CastContentBrowserClient> Create();
~CastContentBrowserClient() override;
// Creates and returns the CastService instance for the current process.
// Note: |request_context_getter| might be different than the main request
// getter accessible via CastBrowserProcess.
virtual std::unique_ptr<CastService> CreateCastService(
content::BrowserContext* browser_context,
PrefService* pref_service,
net::URLRequestContextGetter* request_context_getter,
media::VideoPlaneController* video_plane_controller,
CastWindowManager* window_manager);
virtual media::VideoModeSwitcher* GetVideoModeSwitcher();
#if BUILDFLAG(IS_CAST_USING_CMA_BACKEND)
// Gets object for enforcing video resolution policy restrictions.
virtual media::VideoResolutionPolicy* GetVideoResolutionPolicy();
// Returns the task runner that must be used for media IO.
scoped_refptr<base::SingleThreadTaskRunner> GetMediaTaskRunner();
// Creates a CmaBackendFactory.
virtual media::CmaBackendFactory* GetCmaBackendFactory();
media::MediaResourceTracker* media_resource_tracker();
media::MediaPipelineBackendManager* media_pipeline_backend_manager();
std::unique_ptr<::media::AudioManager> CreateAudioManager(
::media::AudioLogFactory* audio_log_factory) override;
bool OverridesAudioManager() override;
std::unique_ptr<::media::CdmFactory> CreateCdmFactory() override;
#endif // BUILDFLAG(IS_CAST_USING_CMA_BACKEND)
media::MediaCapsImpl* media_caps();
#if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
// Create a BluetoothAdapter for WebBluetooth support.
// TODO(slan): This further couples the browser to the Cast service. Remove
// this once the dedicated Bluetooth service has been implemented.
// (b/76155468)
virtual base::WeakPtr<device::BluetoothAdapterCast> CreateBluetoothAdapter();
#endif // !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
// Invoked when the metrics client ID changes.
virtual void SetMetricsClientId(const std::string& client_id);
// Allows registration of extra metrics providers.
virtual void RegisterMetricsProviders(
::metrics::MetricsService* metrics_service);
// Returns whether or not the remote debugging service should be started
// on browser startup.
virtual bool EnableRemoteDebuggingImmediately();
// content::ContentBrowserClient implementation:
content::BrowserMainParts* CreateBrowserMainParts(
const content::MainFunctionParams& parameters) override;
void RenderProcessWillLaunch(
content::RenderProcessHost* host,
service_manager::mojom::ServiceRequest* service_request) override;
bool IsHandledURL(const GURL& url) override;
void SiteInstanceGotProcess(content::SiteInstance* site_instance) override;
void AppendExtraCommandLineSwitches(base::CommandLine* command_line,
int child_process_id) override;
void OverrideWebkitPrefs(content::RenderViewHost* render_view_host,
content::WebPreferences* prefs) override;
void ResourceDispatcherHostCreated() override;
std::string GetApplicationLocale() override;
void GetGeolocationRequestContext(
base::OnceCallback<void(scoped_refptr<net::URLRequestContextGetter>)>
callback) override;
content::QuotaPermissionContext* CreateQuotaPermissionContext() override;
void GetQuotaSettings(
content::BrowserContext* context,
content::StoragePartition* partition,
storage::OptionalQuotaSettingsCallback callback) override;
void AllowCertificateError(
content::WebContents* web_contents,
int cert_error,
const net::SSLInfo& ssl_info,
const GURL& request_url,
content::ResourceType resource_type,
bool strict_enforcement,
bool expired_previous_decision,
const base::Callback<void(content::CertificateRequestResultType)>&
callback) override;
void SelectClientCertificate(
content::WebContents* web_contents,
net::SSLCertRequestInfo* cert_request_info,
net::ClientCertIdentityList client_certs,
std::unique_ptr<content::ClientCertificateDelegate> delegate) override;
bool CanCreateWindow(content::RenderFrameHost* opener,
const GURL& opener_url,
const GURL& opener_top_level_frame_url,
const GURL& source_origin,
content::mojom::WindowContainerType container_type,
const GURL& target_url,
const content::Referrer& referrer,
const std::string& frame_name,
WindowOpenDisposition disposition,
const blink::mojom::WindowFeatures& features,
bool user_gesture,
bool opener_suppressed,
bool* no_javascript_access) override;
void ExposeInterfacesToRenderer(
service_manager::BinderRegistry* registry,
blink::AssociatedInterfaceRegistry* associated_registry,
content::RenderProcessHost* render_process_host) override;
void ExposeInterfacesToMediaService(
service_manager::BinderRegistry* registry,
content::RenderFrameHost* render_frame_host) override;
void RegisterInProcessServices(
StaticServiceMap* services,
content::ServiceManagerConnection* connection) override;
std::unique_ptr<base::Value> GetServiceManifestOverlay(
base::StringPiece service_name) override;
void GetAdditionalMappedFilesForChildProcess(
const base::CommandLine& command_line,
int child_process_id,
content::PosixFileDescriptorInfo* mappings) override;
void GetAdditionalWebUISchemes(
std::vector<std::string>* additional_schemes) override;
content::DevToolsManagerDelegate* GetDevToolsManagerDelegate() override;
std::unique_ptr<content::NavigationUIData> GetNavigationUIData(
content::NavigationHandle* navigation_handle) override;
RendererConfigManager* renderer_config_manager() const {
return renderer_config_manager_.get();
}
protected:
CastContentBrowserClient();
URLRequestContextFactory* url_request_context_factory() const {
return url_request_context_factory_.get();
}
private:
// Create device cert/key
virtual scoped_refptr<net::X509Certificate> DeviceCert();
virtual scoped_refptr<net::SSLPrivateKey> DeviceKey();
void AddNetworkHintsMessageFilter(int render_process_id,
net::URLRequestContext* context);
void SelectClientCertificateOnIOThread(
GURL requesting_url,
const std::string& session_id,
int render_process_id,
scoped_refptr<base::SequencedTaskRunner> original_runner,
const base::Callback<void(scoped_refptr<net::X509Certificate>,
scoped_refptr<net::SSLPrivateKey>)>&
continue_callback);
#if !defined(OS_ANDROID)
// Returns the crash signal FD corresponding to the current process type.
int GetCrashSignalFD(const base::CommandLine& command_line);
// Creates a CrashHandlerHost instance for the given process type.
breakpad::CrashHandlerHostLinux* CreateCrashHandlerHost(
const std::string& process_type);
// A static cache to hold crash_handlers for each process_type
std::map<std::string, breakpad::CrashHandlerHostLinux*> crash_handlers_;
// Notify renderers of memory pressure (Android renderers register directly
// with OS for this).
std::unique_ptr<MemoryPressureControllerImpl> memory_pressure_controller_;
#endif // !defined(OS_ANDROID)
// Created by CastContentBrowserClient but owned by BrowserMainLoop.
CastBrowserMainParts* cast_browser_main_parts_;
std::unique_ptr<URLRequestContextFactory> url_request_context_factory_;
std::unique_ptr<CastResourceDispatcherHostDelegate>
resource_dispatcher_host_delegate_;
std::unique_ptr<media::CmaBackendFactory> cma_backend_factory_;
std::unique_ptr<RendererConfigManager> renderer_config_manager_;
DISALLOW_COPY_AND_ASSIGN(CastContentBrowserClient);
};
} // namespace shell
} // namespace chromecast
#endif // CHROMECAST_BROWSER_CAST_CONTENT_BROWSER_CLIENT_H_
| 3,309 |
655 | package com.samourai.whirlpool.client.wallet;
import com.samourai.http.client.AndroidHttpClient;
import com.samourai.http.client.IHttpClient;
import com.samourai.wallet.api.APIFactory;
import com.samourai.wallet.api.backend.BackendApi;
import com.samourai.wallet.api.backend.BackendServer;
import com.samourai.wallet.api.backend.beans.HttpException;
import com.samourai.wallet.api.backend.beans.UnspentResponse;
import com.samourai.wallet.hd.HD_Wallet;
import com.samourai.wallet.segwit.SegwitAddress;
import com.samourai.wallet.tor.TorManager;
import com.samourai.wallet.util.WebUtil;
import com.samourai.whirlpool.client.exception.NotifiableException;
import com.samourai.whirlpool.client.tx0.AndroidTx0Service;
import com.samourai.whirlpool.client.tx0.Tx0;
import com.samourai.whirlpool.client.tx0.Tx0Config;
import com.samourai.whirlpool.client.tx0.Tx0Preview;
import com.samourai.whirlpool.client.tx0.UnspentOutputWithKey;
import com.samourai.whirlpool.client.utils.ClientUtils;
import com.samourai.whirlpool.client.wallet.beans.MixingState;
import com.samourai.whirlpool.client.wallet.beans.Tx0FeeTarget;
import com.samourai.whirlpool.client.wallet.beans.WhirlpoolUtxo;
import com.samourai.whirlpool.client.wallet.persist.FileWhirlpoolWalletPersistHandler;
import com.samourai.whirlpool.client.wallet.persist.WhirlpoolWalletPersistHandler;
import com.samourai.whirlpool.client.whirlpool.beans.Pool;
import com.samourai.whirlpool.client.whirlpool.beans.Tx0Data;
import junit.framework.Assert;
import org.bitcoinj.core.ECKey;
import org.bitcoinj.params.TestNet3Params;
import org.bouncycastle.util.encoders.Hex;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.Collection;
import java.util.LinkedList;
import java8.util.Optional;
@Ignore
public class WhirlpoolWalletTest extends AbstractWhirlpoolTest {
private Logger log = LoggerFactory.getLogger(WhirlpoolWalletTest.class.getSimpleName());
private WhirlpoolWallet whirlpoolWallet;
private HD_Wallet bip84w;
private WhirlpoolWalletConfig config;
private static final String SEED_WORDS = "wise never behave tornado tool pear aunt consider season swap custom human";
private static final String SEED_PASSPHRASE = "test";
@Before
public void setUp() throws Exception {
super.setUp(TestNet3Params.get());
// configure wallet
boolean testnet = true;
boolean onion = false;
int mixsTarget = 5;
String scode = null;
// backendApi with mocked pushTx
TorManager torManager = TorManager.getInstance(getContext());
IHttpClient httpClient = new AndroidHttpClient(WebUtil.getInstance(getContext()), torManager);
BackendApi backendApi = new BackendApi(httpClient, BackendServer.TESTNET.getBackendUrl(onion), Optional.empty()) {
@Override
public void pushTx(String txHex) throws Exception {
log.info("pushTX ignored for test: "+txHex);
}
};
File fileIndex = File.createTempFile("test-state", "test");
File fileUtxo = File.createTempFile("test-utxos", "test");
WhirlpoolWalletPersistHandler persistHandler =
new FileWhirlpoolWalletPersistHandler(fileIndex, fileUtxo);
// instanciate WhirlpoolWallet
bip84w = computeBip84w(SEED_WORDS, SEED_PASSPHRASE);
config = whirlpoolWalletService.computeWhirlpoolWalletConfig(torManager, persistHandler, testnet, onion, mixsTarget, scode, httpClient, backendApi);
config.setTx0Service(new AndroidTx0Service(config){
@Override
protected Tx0Data fetchTx0Data(String poolId) throws HttpException, NotifiableException {
Tx0Data tx0Data = super.fetchTx0Data(poolId);
// mock fee address for deterministic tests
return new Tx0Data(tx0Data.getFeePaymentCode(), tx0Data.getFeeValue(), tx0Data.getFeeChange(), 0, tx0Data.getFeePayload(), "tb1qgyppvv58rv83eas60trmdgqc06yx9q53qs6skx", 123);
}
});
APIFactory apiFactory = APIFactory.getInstance(context);
WhirlpoolDataService dataService = whirlpoolWalletService.newDataService(config, apiFactory);
whirlpoolWallet = whirlpoolWalletService.openWallet(config, dataService, bip84w);
}
@Test
public void testStart() throws Exception {
// start whirlpool wallet
whirlpoolWallet.start();
// list pools
Collection<Pool> pools = whirlpoolWallet.getPools();
Assert.assertTrue(!pools.isEmpty());
// find pool by poolId
Pool pool = whirlpoolWallet.findPoolById("0.01btc");
Assert.assertNotNull(pool);
// list premix utxos
Collection<WhirlpoolUtxo> utxosPremix = whirlpoolWallet.getUtxosPremix();
log.info(utxosPremix.size()+" PREMIX utxos:");
ClientUtils.logWhirlpoolUtxos(utxosPremix, whirlpoolWallet.getConfig().getMixsTarget());
// list postmix utxos
Collection<WhirlpoolUtxo> utxosPostmix = whirlpoolWallet.getUtxosPremix();
log.info(utxosPostmix.size()+" POSTMIX utxos:");
ClientUtils.logWhirlpoolUtxos(utxosPostmix, whirlpoolWallet.getConfig().getMixsTarget());
// keep running
for(int i=0; i<50; i++) {
MixingState mixingState = whirlpoolWallet.getMixingState();
log.debug("WHIRLPOOL: "+mixingState.getNbQueued()+" queued, "+mixingState.getNbMixing()+" mixing: "+mixingState.getUtxosMixing());
synchronized (this) {
wait(10000);
}
}
}
@Test
public void testTx0() throws Exception {
Collection<UnspentOutputWithKey> spendFroms = new LinkedList<>();
ECKey ecKey = bip84w.getAccountAt(0).getChain(0).getAddressAt(61).getECKey();
UnspentResponse.UnspentOutput unspentOutput = newUnspentOutput(
"cc588cdcb368f894a41c372d1f905770b61ecb3fb8e5e01a97e7cedbf5e324ae", 1, 500000000);
unspentOutput.addr = new SegwitAddress(ecKey, networkParameters).getBech32AsString();
spendFroms.add(new UnspentOutputWithKey(unspentOutput, ecKey.getPrivKeyBytes()));
Pool pool = whirlpoolWallet.findPoolById("0.01btc");
Tx0Config tx0Config = whirlpoolWallet.getTx0Config(pool).setMaxOutputs(1);
Tx0Preview tx0Preview = whirlpoolWallet.tx0Preview(pool, spendFroms, tx0Config, Tx0FeeTarget.BLOCKS_2);
Tx0 tx0 = whirlpoolWallet.tx0(spendFroms, pool, tx0Config, Tx0FeeTarget.BLOCKS_2);
Assert.assertEquals("dc398c99cf9ce18123ea916d69bb99da44a3979a625eeaac5e17837f879a8874", tx0.getTx().getHashAsString());
Assert.assertEquals("01000000000101ae24e3f5dbcee7971ae0e5b83fcb1eb67057901f2d371ca494f868b3dc8c58cc0100000000ffffffff040000000000000000426a408a9eb379a44ff4d4579118c64b64bbd327cd95ba826ac68f334155fd9ca4e3acd64acdfd75dd7c3cc5bc34d31af6c6e68b4db37eac62b574890f6cfc7b904d9950c300000000000016001441021632871b0f1cf61a7ac7b6a0187e88628291b44b0f00000000001600147e4a4628dd8fbd638681a728e39f7d92ada04070e954bd1d00000000160014df3a4bc83635917ad18621f3ba78cef6469c5f5902483045022100c48f02762ab9877533b5c7b0bc729479ce7809596b89cb9f62b740ea3350068f02205ef46ca67df39d35f940e33223c5ddd56669d953b6ef4948e355c1f3430f32e10121032e46baef8bcde0c3a19cadb378197fa31d69adb21535de3f84de699a1cf88b4500000000", new String(Hex.encode(tx0.getTx().bitcoinSerialize())));
}
} | 3,134 |
785 | <gh_stars>100-1000
/*
* Copyright © 2019 Apple Inc. and the ServiceTalk project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicetalk.concurrent.api;
import static java.lang.System.identityHashCode;
final class AsyncContextMapUtils {
private AsyncContextMapUtils() {
// no instances
}
static String contextMapToString(AsyncContextMap map) {
final String simpleName = map.getClass().getSimpleName();
// 13 is 2 characters for () + 11 hash code integer
// assume size of 25 (5 overhead characters for formatting) for each key/value pair
StringBuilder sb = new StringBuilder(simpleName.length() + 13 + map.size() * 25);
sb.append(simpleName)
.append('(')
// there are many copies of these maps around, the content maybe equal but a differentiating factor is the
// object reference. this may help folks understand why state is not visible across AsyncContext boundaries.
.append(identityHashCode(map))
.append(')');
map.forEach((key, value) -> {
sb.append(" {").append(key).append(", ").append(value).append('}');
return true;
});
return sb.toString();
}
}
| 581 |
764 | <reponame>641589523/token-profile<gh_stars>100-1000
{"symbol": "FERA","address": "0x539F3615C1dBAfa0D008d87504667458acBd16Fa","overview":{"en": "FERA provides trading strategies for crypto traders and investors, focusing on low-cap projects. The full service includes a trading plan that covers funds management and short-term predictions."},"email": "<EMAIL>","website": "https://www.ferastrategies.com/","state": "NORMAL","links": {"blog": "https://medium.com/@ferastrategies","twitter": "https://twitter.com/FERAStrategies","telegram": "https://t.me/ferastrategies","github": "https://github.com/FeraStrategies"}} | 197 |
1,801 | #pragma once
#include <vector>
#include <memory>
#include "defines.h"
#include "trajectory.h"
#include "TrackerSettings.h"
///
/// \brief The CTracker class
///
class BaseTracker
{
public:
BaseTracker() = default;
BaseTracker(const BaseTracker&) = delete;
BaseTracker(BaseTracker&&) = delete;
BaseTracker& operator=(const BaseTracker&) = delete;
BaseTracker& operator=(BaseTracker&&) = delete;
virtual ~BaseTracker(void) = default;
virtual void Update(const regions_t& regions, cv::UMat currFrame, float fps) = 0;
virtual bool CanGrayFrameToTrack() const = 0;
virtual bool CanColorFrameToTrack() const = 0;
virtual size_t GetTracksCount() const = 0;
virtual void GetTracks(std::vector<TrackingObject>& tracks) const = 0;
virtual void GetRemovedTracks(std::vector<track_id_t>& trackIDs) const = 0;
static std::unique_ptr<BaseTracker> CreateTracker(const TrackerSettings& settings);
};
| 315 |
5,169 | <filename>Specs/b/c/6/ZLNavigationBar/1.0.5/ZLNavigationBar.podspec.json<gh_stars>1000+
{
"name": "ZLNavigationBar",
"version": "1.0.5",
"summary": "基础导航栏",
"description": "提供一个基础的导航栏视图,结构简单。",
"homepage": "https://gitee.com/ZLPublicKits/ZLNavigationBar",
"license": "MIT",
"authors": {
"Mr.Zhao": "<EMAIL>"
},
"source": {
"git": "https://gitee.com/ZLPublicKits/ZLNavigationBar.git",
"tag": "1.0.5"
},
"platforms": {
"ios": "8.0"
},
"public_header_files": "ZLNavigationBar/Classes/ZLNavigationBarHeader.h",
"source_files": "ZLNavigationBar/Classes/ZLNavigationBarHeader.h",
"frameworks": "UIKit",
"subspecs": [
{
"name": "Units",
"vendored_frameworks": "ZLNavigationBar/Classes/ZLNavigationBar.framework",
"resource_bundles": {
"ZLNavigationBar": [
"ZLNavigationBar/Assets/*/*"
]
}
}
]
}
| 469 |
824 | package com.me.silencedut.nbaplus.app;
import android.app.Application;
import android.content.Context;
//import com.squareup.leakcanary.LeakCanary;
//import com.squareup.leakcanary.RefWatcher;
import im.fir.sdk.FIR;
/**
* Created by SilenceDut on 2015/11/28.
*/
public class App extends Application {
private static Application sInstance;
@Override
public void onCreate() {
super.onCreate();
FIR.init(this);
// mRefWatcher = LeakCanary.install(this);
sInstance = this;
AppService.getInstance().initService();
}
public static Context getContext() {
return sInstance.getApplicationContext();
}
}
| 254 |
16,461 | // Copyright 2018-present 650 Industries. All rights reserved.
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
#import <ABI43_0_0ExpoModulesCore/ABI43_0_0EXSingletonModule.h>
NS_ASSUME_NONNULL_BEGIN
@protocol ABI43_0_0EXRemoteNotificationPermissionDelegate
- (void)handleDidFinishRegisteringForRemoteNotifications;
@end
@protocol ABI43_0_0EXRemoteNotificationPermissionProgressPublisher
- (void)addDelegate:(id<ABI43_0_0EXRemoteNotificationPermissionDelegate>)delegate;
- (void)removeDelegate:(id<ABI43_0_0EXRemoteNotificationPermissionDelegate>)delegate;
@end
@interface ABI43_0_0EXRemoteNotificationPermissionSingletonModule : ABI43_0_0EXSingletonModule <UIApplicationDelegate, ABI43_0_0EXRemoteNotificationPermissionProgressPublisher>
- (void)application:(UIApplication *)application didRegisterForRemoteNotificationsWithDeviceToken:(NSData *)token;
- (void)application:(UIApplication *)application didFailToRegisterForRemoteNotificationsWithError:(NSError *)error;
@end
NS_ASSUME_NONNULL_END
| 339 |
60,067 | #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
namespace torch {
namespace distributed {
namespace autograd {
AutogradMetadata::AutogradMetadata(
int64_t autogradContextId_,
int64_t autogradMessageId_)
: autogradContextId(autogradContextId_),
autogradMessageId(autogradMessageId_) {}
} // namespace autograd
} // namespace distributed
} // namespace torch
| 149 |
1,599 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Helper functions for the XXDecomposer test suite.
"""
from qiskit.circuit.library import RXXGate, RYYGate, RZZGate
def canonical_matrix(a=0.0, b=0.0, c=0.0):
"""
Produces the matrix form of a "canonical operator"
exp(-i (a XX + b YY + c ZZ)) .
"""
return RXXGate(2 * a).to_matrix() @ RYYGate(2 * b).to_matrix() @ RZZGate(-2 * c).to_matrix()
| 290 |
60,067 | #pragma once
#include <c10/util/SmallVector.h>
| 20 |
348 | {"nom":"Saint-Etienne-de-Mer-Morte","circ":"9ème circonscription","dpt":"Loire-Atlantique","inscrits":1129,"abs":656,"votants":473,"blancs":28,"nuls":17,"exp":428,"res":[{"nuance":"MDM","nom":"<NAME>","voix":246},{"nuance":"LR","nom":"<NAME>","voix":182}]} | 104 |
15,577 | <filename>src/Access/IAccessEntity.cpp
#include <Access/IAccessEntity.h>
namespace DB
{
bool IAccessEntity::equal(const IAccessEntity & other) const
{
return (name == other.name) && (getType() == other.getType());
}
}
| 79 |
754 | /*-
* <<
* UAVStack
* ==
* Copyright (C) 2016 - 2017 UAVStack
* ==
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* >>
*/
package com.creditease.uav.datastore.source;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
import com.creditease.uav.datastore.api.DataStoreConnection;
import com.creditease.uav.datastore.api.DataStoreProtocol;
/**
* 实现HBase的heavyweight联接 必须参数包括:zookeeper的联接地址以及cluster地址
*
* @author 201211070016
*
*/
public class HBaseDataSource extends AbstractDataSource<Connection> {
private Connection conn = null;
public HBaseDataSource(DataStoreConnection connection) {
super(connection);
}
@Override
protected Connection initSourceConnect() throws IOException, ServiceException {
// 目前只有zklist转成serverlist和dbname
Configuration config = HBaseConfiguration.create();
String address = connection.toString(",");
config.set(DataStoreProtocol.HBASE_ZK_QUORUM, address);
config.set("hbase.client.scanner.caching",
(String) connection.getContext(DataStoreProtocol.HBASE_QUERY_CACHING));
config.set("hbase.client.scanner.max.result.size",
(String) connection.getContext(DataStoreProtocol.HBASE_QUERY_MAXRESULTSIZE));
config.set("zookeeper.recovery.retry", String.valueOf(connection.getRetryTimes()));
// Failed to replace a bad datanode exception protection configuration
config.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
config.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
HBaseAdmin.checkHBaseAvailable(config);
conn = ConnectionFactory.createConnection(config);
// hbase.client.retries.number = 1 and zookeeper.recovery.retry = 1.
return conn;
}
@Override
public void stop() {
try {
conn.close();
}
catch (IOException e) {
}
super.stop();
}
}
| 1,063 |
323 | # flake8: noqa
"""
# 追加/更新股票除权除息
"""
import os
import sys
import json
from typing import Any
from collections import OrderedDict
import pandas as pd
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
if vnpy_root not in sys.path:
sys.path.append(vnpy_root)
os.environ["VNPY_TESTING"] = "1"
import baostock as bs
from vnpy.trader.constant import Exchange
from vnpy.trader.utility import load_json, load_data_from_pkb2, save_data_to_pkb2, extract_vt_symbol
from vnpy.data.tdx.tdx_common import get_stock_type
from vnpy.data.stock.stock_base import get_stock_base
import baostock as bs
import pandas as pd
STOCK_DIVIDEND_FILE = 'stock_dividend.csv'
years = [str(y) for y in range(2006, 2020)]
def update_stock_devidend():
"""
更新股票除权除息信息
:return:
"""
stocks_data = get_stock_base()
login_msg = bs.login()
if login_msg.error_code != '0':
print(f'证券宝登录错误代码:{login_msg.error_code}, 错误信息:{login_msg.error_msg}')
return
result_list = []
for k, v in stocks_data.items():
if v.get('类型') != '股票':
continue
symbol, exchange = extract_vt_symbol(k)
bs_code = '.'.join(['sh' if exchange == Exchange.SSE else 'sz', symbol])
stock_name = v.get('name')
print(f'开始获取{stock_name} {bs_code}得除权除息')
for year in years:
rs = bs.query_dividend_data(
code=bs_code,
year=year
)
if rs.error_code != '0':
print(f'证券宝获取沪深A股除权除息数据,错误代码:{rs.error_code}, 错误信息:{rs.error_msg}')
continue
# [dict] => dataframe
#print(f'返回字段:{rs.fields}')
while (rs.error_code == '0') and rs.next():
row = rs.get_row_data()
exchange_code, stock_code = row[0].split('.')
exchange = Exchange.SSE if exchange_code == 'sh' else Exchange.SZSE
d = {
'exchange': exchange.value,
'code': stock_code,
'name': stock_name,
'dividPreNoticeDate': row[1], # 预批露公告日
'dividAgmPumDate': row[2], # 股东大会公告日期
'dividPlanAnnounceDate': row[3], # 预案公告日
'dividPlanDate': row[4], # 分红实施公告日
'dividRegistDate': row[5], # 股权登记告日
'dividOperateDate': row[6], # 除权除息日期
'dividPayDate': row[7], # 派息日
'dividStockMarketDate': row[8], # 红股上市交易日
'dividCashPsBeforeTax': row[9], # 每股股利税前
'dividCashPsAfterTax': row[10], # 每股股利税后
'dividStocksPs': row[11], # 每股红股
'dividCashStock': row[12], # 分红送转
'dividReserveToStockPs': row[13] # 每股转增资本
}
result_list.append(d)
print(f'{d}')
if len(result_list) > 0:
df = pd.DataFrame(result_list)
export_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), STOCK_DIVIDEND_FILE))
df.to_csv(export_file_name)
print(f'保存除权除息至文件:{export_file_name}')
if __name__ == '__main__':
update_stock_devidend()
| 2,023 |
3,428 | {"id":"00174","group":"spam-2","checksum":{"type":"MD5","value":"94a8f3a8826ff937c38640422784ce86"},"text":"From <EMAIL> Mon Jun 24 17:42:12 2002\nReturn-Path: [email protected]\nDelivery-Date: Wed Mar 13 00:58:21 2002\nReceived: from tomts20-srv.bellnexxia.net (tomts20.bellnexxia.net\n [209.226.175.74]) by dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id\n g2D0wK101942 for <<EMAIL>>; Wed, 13 Mar 2002 00:58:20 GMT\nReceived: from Sender ([65.92.116.203]) by tomts20-srv.bellnexxia.net\n (InterMail vM.4.01.03.23 201-229-121-123-20010418) with SMTP id\n <20020313005814.XFKX7112.tomts20-srv.bellnexxia.net@Sender>;\n Tue, 12 Mar 2002 19:58:14 -0500\nFrom: <NAME><<EMAIL>>\nTo: \"\"<>\nSubject: Traderlist.com being shut down due to FRAUD!\nReply-To: [email protected]\nX-Mailer: Advanced Mass Sender v 3.21b (Smtp MultiSender v 2.5)\nMIME-Version: 1.0\nContent-Type: text/plain; charset=\"koi8-r\"\nDate: Wed, 13 Mar 2002 07:59:51 -0500\nMessage-Id: <<EMAIL>>\nX-Status: \nX-Keywords: \n\nTraderlist.com is a fraud and is sending out your e-mail to 3rd parties!! \n\nWith the help of an email extracting program anyone can get your email adress and home adress from scanning Traderlist.com.\n\nTraderlist.com is breach the Internet Privacy Act by adding your email adress and adress without the consent of the owner.\n\nWe are working towards a class action law suit which will see Traderlist.com ordered to be shut down and pay a fine.\n\nIf you are interested and would like to find out how Traderlist.com sends out your email and other personal information to 3rd parties just reply to this message.\n\nRegards,\n\n<NAME>\n"} | 623 |
353 | package com.github.trang.druid.example.jpa.model;
import java.io.Serializable;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import lombok.Data;
/**
* City
*
* @author trang
*/
@Entity
@Table(name = "city")
@Data
public class City implements Serializable {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private Long id;
@Column(name = "name")
private String name;
@Column(name = "state")
private String state;
@Column(name = "country")
private String country;
} | 270 |
778 | <reponame>clazaro/Kratos
import setuptools
import os
import json
import glob
import shutil
kratos_version = os.environ["KRATOS_VERSION"]
def replaceKeyword(str):
return str.replace("${KRATOS_VERSION}", kratos_version).replace("${PYTHON}", os.environ["PYTHON"])
def replaceKeywords(stringArray):
return list(map(lambda str: replaceKeyword(str), stringArray))
with open("wheel.json", "r") as conf_file:
conf = json.loads(conf_file.read())
with open(os.path.join(os.environ["KRATOS_ROOT"], conf["readme"]), "r") as fh:
long_description = fh.read()
for module in conf["included_modules"]:
shutil.copytree(os.path.join(os.environ["KRATOS_ROOT"], "bin", "Release", replaceKeyword("python_${PYTHON}"), "KratosMultiphysics", module), os.path.join("KratosMultiphysics", module))
for binary in conf["included_binaries"]:
for file in glob.glob(os.path.join(os.environ["KRATOS_ROOT"], "bin", "Release", replaceKeyword("python_${PYTHON}"), "libs", replaceKeyword(binary))):
print("Adding {} matching binary: {}".format(file, binary))
shutil.copy(file, os.path.join("KratosMultiphysics", ".libs"))
if "excluded_binaries" in conf:
f = open("excluded.txt", "w")
f.writelines(list(map(lambda x: replaceKeyword(x) + "\n", conf["excluded_binaries"])))
f.close()
class BinaryDistribution(setuptools.Distribution):
def has_ext_modules(foo):
return True
class EmptyListWithLength(list):
def __len__(self):
return 1
setuptools.setup(
name=conf["wheel_name"],
version=kratos_version,
author=conf["author"],
author_email=conf["author_email"],
description=conf["description"],
url="https://github.com/KratosMultiphysics/",
packages=setuptools.find_namespace_packages(),
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=replaceKeywords(conf["dependencies"]),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Mathematics",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Intended Audience :: Science/Research",
"Intended Audience :: Other Audience",
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
"Environment :: Console"
],
package_data={
'KratosMultiphysics': list(map(lambda x: ".libs/" + x, os.listdir("KratosMultiphysics/.libs")))
},
python_requires='>=3.5',
ext_modules=EmptyListWithLength(),
distclass=BinaryDistribution
)
| 1,162 |
1,330 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.opensymphony.xwork2.conversion.impl;
import com.opensymphony.xwork2.conversion.ObjectTypeDeterminer;
import com.opensymphony.xwork2.conversion.TypeConverter;
import com.opensymphony.xwork2.inject.Inject;
import java.lang.reflect.Member;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
public class CollectionConverter extends DefaultTypeConverter {
private ObjectTypeDeterminer objectTypeDeterminer;
@Inject
public void setObjectTypeDeterminer(ObjectTypeDeterminer determiner) {
this.objectTypeDeterminer = determiner;
}
public Object convertValue(Map<String, Object> context, Object target, Member member, String propertyName, Object value, Class toType) {
Collection result;
Class memberType = String.class;
if (target != null) {
memberType = objectTypeDeterminer.getElementClass(target.getClass(), propertyName, null);
if (memberType == null) {
memberType = String.class;
}
}
if (toType.isAssignableFrom(value.getClass())) {
// no need to do anything
result = (Collection) value;
} else if (value.getClass().isArray()) {
Object[] objArray = (Object[]) value;
TypeConverter converter = getTypeConverter(context);
result = createCollection(toType, memberType, objArray.length);
for (Object anObjArray : objArray) {
Object convertedValue = converter.convertValue(context, target, member, propertyName, anObjArray, memberType);
if (!TypeConverter.NO_CONVERSION_POSSIBLE.equals(convertedValue)) {
result.add(convertedValue);
}
}
} else if (Collection.class.isAssignableFrom(value.getClass())) {
Collection col = (Collection) value;
TypeConverter converter = getTypeConverter(context);
result = createCollection(toType, memberType, col.size());
for (Object aCol : col) {
Object convertedValue = converter.convertValue(context, target, member, propertyName, aCol, memberType);
if (!TypeConverter.NO_CONVERSION_POSSIBLE.equals(convertedValue)) {
result.add(convertedValue);
}
}
} else {
result = createCollection(toType, memberType, -1);
TypeConverter converter = getTypeConverter(context);
Object convertedValue = converter.convertValue(context, target, member, propertyName, value, memberType);
if (!TypeConverter.NO_CONVERSION_POSSIBLE.equals(convertedValue)) {
result.add(convertedValue);
}
}
return result;
}
private Collection createCollection(Class toType, Class memberType, int size) {
Collection result;
if (toType == Set.class) {
if (size > 0) {
result = new HashSet(size);
} else {
result = new HashSet();
}
} else if (toType == SortedSet.class) {
result = new TreeSet();
} else {
if (size > 0) {
result = new XWorkList(memberType, size);
} else {
result = new XWorkList(memberType);
}
}
return result;
}
}
| 1,702 |
2,338 | <gh_stars>1000+
#include <cstdint>
struct alignas(64) zmm_t {
uint64_t a, b, c, d, e, f, g, h;
};
int main() {
constexpr zmm_t zmm[] = {
{ 0x0706050403020100, 0x0F0E0D0C0B0A0908,
0x1716151413121110, 0x1F1E1D1C1B1A1918,
0x2726252423222120, 0x2F2E2D2C2B2A2928,
0x3736353433323130, 0x3F3E3D3C3B3A3938, },
{ 0x0807060504030201, 0x100F0E0D0C0B0A09,
0x1817161514131211, 0x201F1E1D1C1B1A19,
0x2827262524232221, 0x302F2E2D2C2B2A29,
0x3837363534333231, 0x403F3E3D3C3B3A39, },
{ 0x0908070605040302, 0x11100F0E0D0C0B0A,
0x1918171615141312, 0x21201F1E1D1C1B1A,
0x2928272625242322, 0x31302F2E2D2C2B2A,
0x3938373635343332, 0x41403F3E3D3C3B3A, },
{ 0x0A09080706050403, 0x1211100F0E0D0C0B,
0x1A19181716151413, 0x2221201F1E1D1C1B,
0x2A29282726252423, 0x3231302F2E2D2C2B,
0x3A39383736353433, 0x4241403F3E3D3C3B, },
{ 0x0B0A090807060504, 0x131211100F0E0D0C,
0x1B1A191817161514, 0x232221201F1E1D1C,
0x2B2A292827262524, 0x333231302F2E2D2C,
0x3B3A393837363534, 0x434241403F3E3D3C, },
{ 0x0C0B0A0908070605, 0x14131211100F0E0D,
0x1C1B1A1918171615, 0x24232221201F1E1D,
0x2C2B2A2928272625, 0x34333231302F2E2D,
0x3C3B3A3938373635, 0x44434241403F3E3D, },
{ 0x0D0C0B0A09080706, 0x1514131211100F0E,
0x1D1C1B1A19181716, 0x2524232221201F1E,
0x2D2C2B2A29282726, 0x3534333231302F2E,
0x3D3C3B3A39383736, 0x4544434241403F3E, },
{ 0x0E0D0C0B0A090807, 0x161514131211100F,
0x1E1D1C1B1A191817, 0x262524232221201F,
0x2E2D2C2B2A292827, 0x363534333231302F,
0x3E3D3C3B3A393837, 0x464544434241403F, },
#if defined(__x86_64__) || defined(_M_X64)
{ 0x0F0E0D0C0B0A0908, 0x1716151413121110,
0x1F1E1D1C1B1A1918, 0x2726252423222120,
0x2F2E2D2C2B2A2928, 0x3736353433323130,
0x3F3E3D3C3B3A3938, 0x4746454443424140, },
{ 0x100F0E0D0C0B0A09, 0x1817161514131211,
0x201F1E1D1C1B1A19, 0x2827262524232221,
0x302F2E2D2C2B2A29, 0x3837363534333231,
0x403F3E3D3C3B3A39, 0x4847464544434241, },
{ 0x11100F0E0D0C0B0A, 0x1918171615141312,
0x21201F1E1D1C1B1A, 0x2928272625242322,
0x31302F2E2D2C2B2A, 0x3938373635343332,
0x41403F3E3D3C3B3A, 0x4948474645444342, },
{ 0x1211100F0E0D0C0B, 0x1A19181716151413,
0x2221201F1E1D1C1B, 0x2A29282726252423,
0x3231302F2E2D2C2B, 0x3A39383736353433,
0x4241403F3E3D3C3B, 0x4A49484746454443, },
{ 0x131211100F0E0D0C, 0x1B1A191817161514,
0x232221201F1E1D1C, 0x2B2A292827262524,
0x333231302F2E2D2C, 0x3B3A393837363534,
0x434241403F3E3D3C, 0x4B4A494847464544, },
{ 0x14131211100F0E0D, 0x1C1B1A1918171615,
0x24232221201F1E1D, 0x2C2B2A2928272625,
0x34333231302F2E2D, 0x3C3B3A3938373635,
0x44434241403F3E3D, 0x4C4B4A4948474645, },
{ 0x1514131211100F0E, 0x1D1C1B1A19181716,
0x2524232221201F1E, 0x2D2C2B2A29282726,
0x3534333231302F2E, 0x3D3C3B3A39383736,
0x4544434241403F3E, 0x4D4C4B4A49484746, },
{ 0x161514131211100F, 0x1E1D1C1B1A191817,
0x262524232221201F, 0x2E2D2C2B2A292827,
0x363534333231302F, 0x3E3D3C3B3A393837,
0x464544434241403F, 0x4E4D4C4B4A494847, },
{ 0x1716151413121110, 0x1F1E1D1C1B1A1918,
0x2726252423222120, 0x2F2E2D2C2B2A2928,
0x3736353433323130, 0x3F3E3D3C3B3A3938,
0x4746454443424140, 0x4F4E4D4C4B4A4948, },
{ 0x1817161514131211, 0x201F1E1D1C1B1A19,
0x2827262524232221, 0x302F2E2D2C2B2A29,
0x3837363534333231, 0x403F3E3D3C3B3A39,
0x4847464544434241, 0x504F4E4D4C4B4A49, },
{ 0x1918171615141312, 0x21201F1E1D1C1B1A,
0x2928272625242322, 0x31302F2E2D2C2B2A,
0x3938373635343332, 0x41403F3E3D3C3B3A,
0x4948474645444342, 0x51504F4E4D4C4B4A, },
{ 0x1A19181716151413, 0x2221201F1E1D1C1B,
0x2A29282726252423, 0x3231302F2E2D2C2B,
0x3A39383736353433, 0x4241403F3E3D3C3B,
0x4A49484746454443, 0x5251504F4E4D4C4B, },
{ 0x1B1A191817161514, 0x232221201F1E1D1C,
0x2B2A292827262524, 0x333231302F2E2D2C,
0x3B3A393837363534, 0x434241403F3E3D3C,
0x4B4A494847464544, 0x535251504F4E4D4C, },
{ 0x1C1B1A1918171615, 0x24232221201F1E1D,
0x2C2B2A2928272625, 0x34333231302F2E2D,
0x3C3B3A3938373635, 0x44434241403F3E3D,
0x4C4B4A4948474645, 0x54535251504F4E4D, },
{ 0x1D1C1B1A19181716, 0x2524232221201F1E,
0x2D2C2B2A29282726, 0x3534333231302F2E,
0x3D3C3B3A39383736, 0x4544434241403F3E,
0x4D4C4B4A49484746, 0x5554535251504F4E, },
{ 0x1E1D1C1B1A191817, 0x262524232221201F,
0x2E2D2C2B2A292827, 0x363534333231302F,
0x3E3D3C3B3A393837, 0x464544434241403F,
0x4E4D4C4B4A494847, 0x565554535251504F, },
{ 0x1F1E1D1C1B1A1918, 0x2726252423222120,
0x2F2E2D2C2B2A2928, 0x3736353433323130,
0x3F3E3D3C3B3A3938, 0x4746454443424140,
0x4F4E4D4C4B4A4948, 0x5756555453525150, },
{ 0x201F1E1D1C1B1A19, 0x2827262524232221,
0x302F2E2D2C2B2A29, 0x3837363534333231,
0x403F3E3D3C3B3A39, 0x4847464544434241,
0x504F4E4D4C4B4A49, 0x5857565554535251, },
{ 0x21201F1E1D1C1B1A, 0x2928272625242322,
0x31302F2E2D2C2B2A, 0x3938373635343332,
0x41403F3E3D3C3B3A, 0x4948474645444342,
0x51504F4E4D4C4B4A, 0x5958575655545352, },
{ 0x2221201F1E1D1C1B, 0x2A29282726252423,
0x3231302F2E2D2C2B, 0x3A39383736353433,
0x4241403F3E3D3C3B, 0x4A49484746454443,
0x5251504F4E4D4C4B, 0x5A59585756555453, },
{ 0x232221201F1E1D1C, 0x2B2A292827262524,
0x333231302F2E2D2C, 0x3B3A393837363534,
0x434241403F3E3D3C, 0x4B4A494847464544,
0x535251504F4E4D4C, 0x5B5A595857565554, },
{ 0x24232221201F1E1D, 0x2C2B2A2928272625,
0x34333231302F2E2D, 0x3C3B3A3938373635,
0x44434241403F3E3D, 0x4C4B4A4948474645,
0x54535251504F4E4D, 0x5C5B5A5958575655, },
{ 0x2524232221201F1E, 0x2D2C2B2A29282726,
0x3534333231302F2E, 0x3D3C3B3A39383736,
0x4544434241403F3E, 0x4D4C4B4A49484746,
0x5554535251504F4E, 0x5D5C5B5A59585756, },
{ 0x262524232221201F, 0x2E2D2C2B2A292827,
0x363534333231302F, 0x3E3D3C3B3A393837,
0x464544434241403F, 0x4E4D4C4B4A494847,
0x565554535251504F, 0x5E5D5C5B5A595857, },
#endif
};
asm volatile(
"vmovaps 0x000(%0), %%zmm0\n\t"
"vmovaps 0x040(%0), %%zmm1\n\t"
"vmovaps 0x080(%0), %%zmm2\n\t"
"vmovaps 0x0C0(%0), %%zmm3\n\t"
"vmovaps 0x100(%0), %%zmm4\n\t"
"vmovaps 0x140(%0), %%zmm5\n\t"
"vmovaps 0x180(%0), %%zmm6\n\t"
"vmovaps 0x1C0(%0), %%zmm7\n\t"
#if defined(__x86_64__) || defined(_M_X64)
"vmovaps 0x200(%0), %%zmm8\n\t"
"vmovaps 0x240(%0), %%zmm9\n\t"
"vmovaps 0x280(%0), %%zmm10\n\t"
"vmovaps 0x2C0(%0), %%zmm11\n\t"
"vmovaps 0x300(%0), %%zmm12\n\t"
"vmovaps 0x340(%0), %%zmm13\n\t"
"vmovaps 0x380(%0), %%zmm14\n\t"
"vmovaps 0x3C0(%0), %%zmm15\n\t"
"vmovaps 0x400(%0), %%zmm16\n\t"
"vmovaps 0x440(%0), %%zmm17\n\t"
"vmovaps 0x480(%0), %%zmm18\n\t"
"vmovaps 0x4C0(%0), %%zmm19\n\t"
"vmovaps 0x500(%0), %%zmm20\n\t"
"vmovaps 0x540(%0), %%zmm21\n\t"
"vmovaps 0x580(%0), %%zmm22\n\t"
"vmovaps 0x5C0(%0), %%zmm23\n\t"
"vmovaps 0x600(%0), %%zmm24\n\t"
"vmovaps 0x640(%0), %%zmm25\n\t"
"vmovaps 0x680(%0), %%zmm26\n\t"
"vmovaps 0x6C0(%0), %%zmm27\n\t"
"vmovaps 0x700(%0), %%zmm28\n\t"
"vmovaps 0x740(%0), %%zmm29\n\t"
"vmovaps 0x780(%0), %%zmm30\n\t"
"vmovaps 0x7C0(%0), %%zmm31\n\t"
#endif
"\n\t"
"int3\n\t"
:
: "b"(zmm)
: "%zmm0", "%zmm1", "%zmm2", "%zmm3", "%zmm4", "%zmm5", "%zmm6", "%zmm7"
#if defined(__x86_64__) || defined(_M_X64)
, "%zmm8", "%zmm9", "%zmm10", "%zmm11", "%zmm12", "%zmm13", "%zmm14",
"%zmm15", "%zmm16", "%zmm17", "%zmm18", "%zmm19", "%zmm20", "%zmm21",
"%zmm22", "%zmm23", "%zmm24", "%zmm25", "%zmm26", "%zmm27", "%zmm28",
"%zmm29", "%zmm30", "%zmm31"
#endif
);
return 0;
}
| 5,407 |
980 | from django.test import TestCase as DjangoTestCase
class TestCase(DjangoTestCase):
def __init__(self, *args, **kwargs):
if not hasattr(self, 'assertItemsEqual'):
self.assertItemsEqual = self.assertCountEqual
super(TestCase, self).__init__(*args, **kwargs)
| 115 |
418 | <reponame>ThakurKarthik/expo<filename>ios/Pods/Headers/Private/Bolts/BFAppLinkReturnToRefererView_Internal.h<gh_stars>100-1000
../../../Bolts/Bolts/iOS/Internal/BFAppLinkReturnToRefererView_Internal.h | 79 |
5,813 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.storage.aliyun;
import com.aliyun.oss.OSS;
import com.aliyun.oss.OSSClient;
import com.aliyun.oss.model.ListObjectsRequest;
import com.aliyun.oss.model.OSSObjectSummary;
import com.aliyun.oss.model.ObjectListing;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import org.junit.Assert;
import org.junit.Test;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class OssObjectSummaryIteratorTest
{
private static final ImmutableList<OSSObjectSummary> TEST_OBJECTS =
ImmutableList.of(
makeObjectSummary("b", "foo", 10L),
makeObjectSummary("b", "foo/", 0L), // directory
makeObjectSummary("b", "foo/bar1", 10L),
makeObjectSummary("b", "foo/bar2", 10L),
makeObjectSummary("b", "foo/bar3", 10L),
makeObjectSummary("b", "foo/bar4", 10L),
makeObjectSummary("b", "foo/bar5", 0L), // empty object
makeObjectSummary("b", "foo/baz", 10L),
makeObjectSummary("bucketnotmine", "a/different/bucket", 10L),
makeObjectSummary("b", "foo/bar/", 0L) // another directory at the end of list
);
@Test
public void testSingleObject()
{
test(
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/foo/baz"),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/foo/baz"),
5
);
}
@Test
public void testMultiObjectOneKeyAtATime()
{
test(
ImmutableList.of(
OssStorageDruidModule.SCHEME + "://b/foo/bar1",
OssStorageDruidModule.SCHEME + "://b/foo/bar2",
OssStorageDruidModule.SCHEME + "://b/foo/bar3",
OssStorageDruidModule.SCHEME + "://b/foo/bar4",
OssStorageDruidModule.SCHEME + "://b/foo/baz"
),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/foo/"),
1
);
}
@Test
public void testMultiObjectTwoKeysAtATime()
{
test(
ImmutableList.of(
OssStorageDruidModule.SCHEME + "://b/foo/bar1",
OssStorageDruidModule.SCHEME + "://b/foo/bar2",
OssStorageDruidModule.SCHEME + "://b/foo/bar3",
OssStorageDruidModule.SCHEME + "://b/foo/bar4",
OssStorageDruidModule.SCHEME + "://b/foo/baz"
),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/foo/"),
2
);
}
@Test
public void testMultiObjectTenKeysAtATime()
{
test(
ImmutableList.of(
OssStorageDruidModule.SCHEME + "://b/foo/bar1",
OssStorageDruidModule.SCHEME + "://b/foo/bar2",
OssStorageDruidModule.SCHEME + "://b/foo/bar3",
OssStorageDruidModule.SCHEME + "://b/foo/bar4",
OssStorageDruidModule.SCHEME + "://b/foo/baz"
),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/foo/"),
10
);
}
@Test
public void testPrefixInMiddleOfKey()
{
test(
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/foo/bar1", OssStorageDruidModule.SCHEME + "://b/foo/bar2", OssStorageDruidModule.SCHEME + "://b/foo/bar3", OssStorageDruidModule.SCHEME + "://b/foo/bar4"),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/foo/bar"),
10
);
}
@Test
public void testNoPath()
{
test(
ImmutableList.of(
OssStorageDruidModule.SCHEME + "://b/foo",
OssStorageDruidModule.SCHEME + "://b/foo/bar1",
OssStorageDruidModule.SCHEME + "://b/foo/bar2",
OssStorageDruidModule.SCHEME + "://b/foo/bar3",
OssStorageDruidModule.SCHEME + "://b/foo/bar4",
OssStorageDruidModule.SCHEME + "://b/foo/baz"
),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b"),
10
);
}
@Test
public void testSlashPath()
{
test(
ImmutableList.of(
OssStorageDruidModule.SCHEME + "://b/foo",
OssStorageDruidModule.SCHEME + "://b/foo/bar1",
OssStorageDruidModule.SCHEME + "://b/foo/bar2",
OssStorageDruidModule.SCHEME + "://b/foo/bar3",
OssStorageDruidModule.SCHEME + "://b/foo/bar4",
OssStorageDruidModule.SCHEME + "://b/foo/baz"
),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/"),
10
);
}
@Test
public void testDifferentBucket()
{
test(
ImmutableList.of(),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://bx/foo/"),
10
);
}
@Test
public void testWithMultiplePrefixesReturningAllNonEmptyObjectsStartingWithOneOfPrefixes()
{
test(
ImmutableList.of(
OssStorageDruidModule.SCHEME + "://b/foo/bar1",
OssStorageDruidModule.SCHEME + "://b/foo/bar2",
OssStorageDruidModule.SCHEME + "://b/foo/bar3",
OssStorageDruidModule.SCHEME + "://b/foo/bar4",
OssStorageDruidModule.SCHEME + "://b/foo/baz"
),
ImmutableList.of(OssStorageDruidModule.SCHEME + "://b/foo/bar", OssStorageDruidModule.SCHEME + "://b/foo/baz"),
10
);
}
private static void test(
final List<String> expectedUris,
final List<String> prefixes,
final int maxListingLength
)
{
final List<OSSObjectSummary> expectedObjects = new ArrayList<>();
// O(N^2) but who cares -- the list is short.
for (final String uri : expectedUris) {
final List<OSSObjectSummary> matches = TEST_OBJECTS.stream()
.filter(
summary ->
OssUtils.summaryToUri(summary).toString().equals(uri)
)
.collect(Collectors.toList());
expectedObjects.add(Iterables.getOnlyElement(matches));
}
final List<OSSObjectSummary> actualObjects = ImmutableList.copyOf(
OssUtils.objectSummaryIterator(
makeMockClient(TEST_OBJECTS),
prefixes.stream().map(URI::create).collect(Collectors.toList()),
maxListingLength
)
);
Assert.assertEquals(
prefixes.toString(),
expectedObjects.stream().map(OssUtils::summaryToUri).collect(Collectors.toList()),
actualObjects.stream().map(OssUtils::summaryToUri).collect(Collectors.toList())
);
}
/**
* Makes a mock OSS client that handles enough of "listObjects" to test the functionality of the
* {@link OssObjectSummaryIterator} class.
*/
private static OSS makeMockClient(
final List<OSSObjectSummary> objects
)
{
return new OSSClient("endpoint", "accessKey", "keySecret")
{
@Override
public ObjectListing listObjects(final ListObjectsRequest request)
{
// Continuation token is an index in the "objects" list.q
final String continuationToken = request.getMarker();
final int startIndex = continuationToken == null ? 0 : Integer.parseInt(continuationToken);
// Find matching objects.
final List<OSSObjectSummary> summaries = new ArrayList<>();
int nextIndex = -1;
for (int i = startIndex; i < objects.size(); i++) {
final OSSObjectSummary summary = objects.get(i);
if (summary.getBucketName().equals(request.getBucketName())
&& summary.getKey().startsWith(request.getPrefix())) {
if (summaries.size() == request.getMaxKeys()) {
// We reached our max key limit; set nextIndex (which will lead to a result with truncated = true).
nextIndex = i;
break;
}
// Generate a summary.
summaries.add(summary);
}
}
// Generate the result.
final ObjectListing retVal = new ObjectListing();
retVal.getObjectSummaries().addAll(summaries);
if (nextIndex >= 0) {
retVal.setTruncated(true);
retVal.setNextMarker(String.valueOf(nextIndex));
}
return retVal;
}
};
}
private static OSSObjectSummary makeObjectSummary(final String bucket, final String key, final long size)
{
final OSSObjectSummary summary = new OSSObjectSummary();
summary.setBucketName(bucket);
summary.setKey(key);
summary.setSize(size);
return summary;
}
}
| 4,246 |
890 | <reponame>light1021/asylo
/*
*
* Copyright 2018 Asylo authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <stdlib.h>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "asylo/platform/core/test/proto_test.pb.h"
#include "asylo/test/util/enclave_test_application.h"
#include "asylo/util/status_macros.h"
namespace asylo {
class EnclaveGetenvTest : public EnclaveTestCase {
public:
EnclaveGetenvTest() = default;
Status RunTest(const std::string &test_string) {
const char *positive = strchr(test_string.c_str(), '=');
if (positive) {
std::string name(test_string.c_str(), positive - test_string.c_str());
std::string value(positive + 1, test_string.size() - name.size() - 1);
const char *test_value = getenv(name.c_str());
if (!test_value) {
return absl::InternalError(
absl::StrCat("getenv returned null for name ", name));
} else if (value != test_value) {
return absl::InternalError(
absl::StrCat("getenv returned an unexpected value for name ", name,
": ", test_value, ", expected ", value));
}
} else {
const char *negative = strchr(test_string.c_str(), '~');
if (!negative) {
return absl::InternalError(
absl::StrCat("Bad test string: ", test_string));
}
std::string name(test_string.c_str(), negative - test_string.c_str());
const char *negative_value = getenv(name.c_str());
if (negative_value) {
return absl::InternalError(
absl::StrCat("Test for unset getenv returned a value for name ",
name, ": ", negative_value));
}
}
return absl::OkStatus();
}
Status Run(const EnclaveInput &input, EnclaveOutput *) {
std::string buf = GetEnclaveInputTestString(input);
EnclaveApiTest enclave_input_test;
if (!enclave_input_test.ParseFromArray(buf.data(), buf.size())) {
return absl::InvalidArgumentError(
"Failed to deserialize string to enclave_input_test");
}
if (enclave_input_test.test_repeated_size() != 2) {
return absl::InternalError(
"Deserialized field(s) of enclave_input_test doesn't "
"match the value set");
}
for (const auto &test : enclave_input_test.test_repeated()) {
ASYLO_RETURN_IF_ERROR(RunTest(test));
}
return absl::OkStatus();
}
};
TrustedApplication *BuildTrustedApplication() { return new EnclaveGetenvTest; }
} // namespace asylo
| 1,170 |
2,268 | <filename>sp/src/game/client/c_slideshow_display.h
//========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//
//=============================================================================//
#ifndef C_SLIDESHOW_DISPLAY_H
#define C_SLIDESHOW_DISPLAY_H
#include "cbase.h"
#include "utlvector.h"
struct SlideMaterialList_t
{
char szSlideKeyword[64];
CUtlVector<int> iSlideMaterials;
CUtlVector<int> iSlideIndex;
};
class C_SlideshowDisplay : public C_BaseEntity
{
public:
DECLARE_CLASS( C_SlideshowDisplay, CBaseEntity );
DECLARE_CLIENTCLASS();
C_SlideshowDisplay();
virtual ~C_SlideshowDisplay();
void Spawn( void );
virtual void OnDataChanged( DataUpdateType_t updateType );
void ClientThink( void );
bool IsEnabled( void ) { return m_bEnabled; }
void GetDisplayText( char *pchText ) { Q_strcpy( pchText, m_szDisplayText ); }
int CurrentMaterialIndex( void ) { return m_iCurrentMaterialIndex; }
int GetMaterialIndex( int iSlideIndex );
int NumMaterials( void );
int CurrentSlideIndex( void ) { return m_iCurrentSlideIndex; }
private:
void BuildSlideShowImagesList( void );
private:
bool m_bEnabled;
char m_szDisplayText[ 128 ];
char m_szSlideshowDirectory[ 128 ];
CUtlVector<SlideMaterialList_t*> m_SlideMaterialLists;
unsigned char m_chCurrentSlideLists[ 16 ];
int m_iCurrentMaterialIndex;
int m_iCurrentSlideIndex;
float m_fMinSlideTime;
float m_fMaxSlideTime;
float m_NextSlideTime;
int m_iCycleType;
bool m_bNoListRepeats;
int m_iCurrentSlideList;
int m_iCurrentSlide;
};
extern CUtlVector< C_SlideshowDisplay* > g_SlideshowDisplays;
#endif //C_SLIDESHOW_STATS_DISPLAY_H | 641 |
335 | {
"word": "Weatherman",
"definitions": [
"A man who broadcasts a description and forecast of weather conditions."
],
"parts-of-speech": "Noun"
} | 64 |
1,444 |
package mage.abilities.keyword;
import mage.constants.Zone;
import mage.abilities.MageSingleton;
import mage.abilities.StaticAbility;
import java.io.ObjectStreamException;
/**
*
* @author <EMAIL>
*/
public class ReachAbility extends StaticAbility implements MageSingleton {
private static final ReachAbility instance = new ReachAbility();
private Object readResolve() throws ObjectStreamException {
return instance;
}
public static ReachAbility getInstance() {
return instance;
}
private ReachAbility() {
super(Zone.ALL, null);
}
@Override
public String getRule() {
return "reach";
}
@Override
public ReachAbility copy() {
return instance;
}
}
| 251 |
1,830 | <filename>atomix/cluster/src/main/java/io/atomix/raft/storage/log/entry/ConfigurationEntry.java
/*
* Copyright 2015-present Open Networking Foundation
* Copyright © 2020 camunda services GmbH (<EMAIL>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.atomix.raft.storage.log.entry;
import static com.google.common.base.MoreObjects.toStringHelper;
import io.atomix.raft.cluster.RaftMember;
import io.atomix.utils.misc.TimestampPrinter;
import java.util.Collection;
import java.util.Objects;
/**
* Stores a cluster configuration.
*
* <p>The {@code ConfigurationEntry} stores information relevant to a single cluster configuration
* change. Configuration change entries store a collection of {@link RaftMember members} which each
* represent a server in the cluster. Each time the set of members changes or a property of a single
* member changes, a new {@code ConfigurationEntry} must be logged for the configuration change.
*/
public class ConfigurationEntry implements RaftEntry {
protected final Collection<RaftMember> members;
private final long timestamp;
public ConfigurationEntry(final long timestamp, final Collection<RaftMember> members) {
this.members = members;
this.timestamp = timestamp;
}
public long timestamp() {
return timestamp;
}
/**
* Returns the members.
*
* @return The members.
*/
public Collection<RaftMember> members() {
return members;
}
@Override
public String toString() {
return toStringHelper(this)
.add("timestamp", new TimestampPrinter(timestamp))
.add("members", members)
.toString();
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final ConfigurationEntry that = (ConfigurationEntry) o;
return timestamp == that.timestamp && Objects.equals(members, that.members);
}
@Override
public int hashCode() {
return Objects.hash(members, timestamp);
}
}
| 744 |
1,314 | <filename>spring-boot-redis/src/main/java/cn/javastack/springboot/redis/pojo/User.java
package cn.javastack.springboot.redis.pojo;
import lombok.Data;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* 微信公众号:Java技术栈
*/
@Data
public class User {
private int id;
private String name;
private Date birthday;
private List<String> interesting;
private Map<String, Object> others;
}
| 177 |
903 | <filename>jphp-core/tests/org/develnext/jphp/core/compiler/jvm/ArgUnpackTest.java<gh_stars>100-1000
package org.develnext.jphp.core.compiler.jvm;
import org.develnext.jphp.zend.ext.ZendExtension;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.junit.runners.MethodSorters;
import php.runtime.env.CompileScope;
@RunWith(JUnit4.class)
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class ArgUnpackTest extends JvmCompilerCase {
@Override
protected CompileScope newScope() {
CompileScope scope = super.newScope();
scope.registerExtension(new ZendExtension());
return scope;
}
@Test
public void testBasic() {
check("arg_unpack/basic.php");
}
@Test
public void testByRef() {
check("arg_unpack/by_ref.php");
}
@Test
public void testDynamic() {
check("arg_unpack/dynamic.php");
}
@Test
public void testInvalidType() {
check("arg_unpack/invalid_type.php");
}
@Test
public void testManyArgs() {
check("arg_unpack/many_args.php");
}
@Test
public void testMethod() {
check("arg_unpack/method.php");
}
@Test
public void testNew() {
check("arg_unpack/new.php");
}
@Test
public void testPositionalArgAfterUnpackError() {
check("arg_unpack/positional_arg_after_unpack_error.php", true);
}
@Test
public void testTraversableThrowingException() {
check("arg_unpack/traversable_throwing_exception.php", true);
}
}
| 673 |
819 | """
Build a scaler normalized
``pandas.DataFrame`` from an existing ``pandas.DataFrame``
"""
import sklearn.preprocessing as preproc
import analysis_engine.consts as ae_consts
import spylunking.log.setup_logging as log_utils
log = log_utils.build_colorized_logger(
name=__name__)
def build_scaler_dataset_from_df(
df,
min_feature=-1,
max_feature=1):
"""build_scaler_dataset_from_df
Helper for building scaler datasets from an existing ``pandas.DataFrame``
returns a dictionary:
.. code-block:: python
return {
'status': status, # NOT_RUN | SUCCESS | ERR
'scaler': scaler, # MinMaxScaler
'df': df # scaled df from df arg
}
:param df: ``pandas.DataFrame`` to convert to scalers
:param min_feature: min feature range for scaler normalization
with default ``-1``
:param max_feature: max feature range for scaler normalization
with default ``1``
"""
status = ae_consts.NOT_RUN
scaler = None
output_df = None
res = {
'status': status,
'scaler': scaler,
'df': output_df
}
try:
log.debug(
f'building scaler range=[{min_feature}, {max_feature}]')
scaler = preproc.MinMaxScaler(
feature_range=(
min_feature,
max_feature))
log.info(
f'scaler.fit_transform(df) rows={len(df.index)}')
output_df = scaler.fit_transform(
df.values)
status = ae_consts.SUCCESS
except Exception as e:
log.error(
f'failed build_scaler_dataset_from_df '
f'with ex={e} '
f'range=[{min_feature}, {max_feature}]')
status = ae_consts.ERR
# end of try/ex
res = {
'status': status,
'scaler': scaler,
'df': output_df
}
return res
# end of build_scaler_dataset_from_df
| 907 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000
{"nom":"Veynes","circ":"1ère circonscription","dpt":"Hautes-Alpes","inscrits":2449,"abs":1453,"votants":996,"blancs":126,"nuls":113,"exp":757,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":465},{"nuance":"LR","nom":"Mme <NAME>","voix":292}]} | 122 |
402 | <reponame>xdenser/flow<gh_stars>100-1000
package com.vaadin.client;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Assert;
import org.junit.Test;
import com.vaadin.client.UILifecycle.UIState;
public class UILifecycleTest {
UILifecycle lifecycle = new UILifecycle();
@Test
public void initialState() {
Assert.assertEquals(UIState.INITIALIZING, lifecycle.getState());
}
@Test
public void initialToRunningToTerminated() {
lifecycle.setState(UIState.RUNNING);
Assert.assertEquals(UIState.RUNNING, lifecycle.getState());
lifecycle.setState(UIState.TERMINATED);
Assert.assertEquals(UIState.TERMINATED, lifecycle.getState());
}
@Test(expected = IllegalArgumentException.class)
public void invalidStateChangeInitToTerminated() {
lifecycle.setState(UIState.INITIALIZING);
lifecycle.setState(UIState.TERMINATED);
}
@Test(expected = IllegalArgumentException.class)
public void invalidStateChangeRunningToInit() {
lifecycle.setState(UIState.INITIALIZING);
lifecycle.setState(UIState.RUNNING);
lifecycle.setState(UIState.INITIALIZING);
}
@Test(expected = IllegalArgumentException.class)
public void invalidStateChangeTerminatedToInit() {
lifecycle.setState(UIState.INITIALIZING);
lifecycle.setState(UIState.RUNNING);
lifecycle.setState(UIState.TERMINATED);
lifecycle.setState(UIState.INITIALIZING);
}
@Test(expected = IllegalArgumentException.class)
public void invalidStateChangeTerminatedToRunning() {
lifecycle.setState(UIState.INITIALIZING);
lifecycle.setState(UIState.RUNNING);
lifecycle.setState(UIState.TERMINATED);
lifecycle.setState(UIState.RUNNING);
}
@Test
public void stateChangeEvents() {
AtomicInteger events = new AtomicInteger(0);
lifecycle.addHandler(e -> {
events.incrementAndGet();
});
Assert.assertEquals(0, events.get());
lifecycle.setState(UIState.RUNNING);
Assert.assertEquals(1, events.get());
lifecycle.setState(UIState.TERMINATED);
Assert.assertEquals(2, events.get());
}
}
| 903 |
1,144 | // SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright 2007 Semihalf
*
* Written by: <NAME> <<EMAIL>>
*
* This file contains routines that fetch data from ARM-dependent sources
* (bd_info etc.)
*/
#include <config.h>
#include <linux/types.h>
#include <api_public.h>
#include <asm/u-boot.h>
#include <asm/global_data.h>
#include "api_private.h"
DECLARE_GLOBAL_DATA_PTR;
/*
* Important notice: handling of individual fields MUST be kept in sync with
* include/asm-arm/u-boot.h and include/asm-arm/global_data.h, so any changes
* need to reflect their current state and layout of structures involved!
*/
int platform_sys_info(struct sys_info *si)
{
int i;
for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++)
platform_set_mr(si, gd->bd->bi_dram[i].start,
gd->bd->bi_dram[i].size, MR_ATTR_DRAM);
return 1;
}
| 314 |
305 | from __future__ import print_function
import os
import lldb
import time
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
from lldbsuite.test import configuration
class TestIntelPTSimpleBinary(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
TestBase.setUp(self)
if 'intel-pt' not in configuration.enabled_plugins:
self.skipTest("The intel-pt test plugin is not enabled")
plugin_path = os.path.join(configuration.lldb_libs_dir, "liblldbIntelFeatures.so")
self.runCmd("plugin load " + plugin_path)
@skipIf(oslist=no_match(['linux']))
@skipIf(archs=no_match(['i386', 'x86_64']))
@skipIfRemote
def test_basic_flow(self):
"""Test collection, decoding, and dumping instructions"""
self.build()
exe = self.getBuildArtifact("a.out")
lldbutil.run_to_name_breakpoint(self, "main", exe_name=exe)
# We start tracing from main
self.runCmd("processor-trace start all")
# We check the trace after the for loop
self.runCmd("b " + str(line_number('main.cpp', '// Break 1')))
self.runCmd("c")
# We wait a little bit to ensure the processor has send the PT packets to
# the memory
time.sleep(.1)
# We find the start address of the 'fun' function for a later check
target = self.dbg.GetSelectedTarget()
fun_start_adddress = target.FindFunctions("fun")[0].GetSymbol() \
.GetStartAddress().GetLoadAddress(target)
# We print the last instructions
self.expect("processor-trace show-instr-log -c 100",
patterns=[
# We expect to have seen the first instruction of 'fun'
hex(fun_start_adddress),
# We expect to see the exit condition of the for loop
"at main.cpp:" + str(line_number('main.cpp', '// Break for loop'))
])
self.runCmd("processor-trace stop")
| 866 |
722 | package org.ahocorasick.interval;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class IntervalNode {
private enum Direction {LEFT, RIGHT}
private IntervalNode left;
private IntervalNode right;
private int point;
private List<Intervalable> intervals = new ArrayList<>();
public IntervalNode(final List<Intervalable> intervals) {
this.point = determineMedian(intervals);
final List<Intervalable> toLeft = new ArrayList<>();
final List<Intervalable> toRight = new ArrayList<>();
for (Intervalable interval : intervals) {
if (interval.getEnd() < this.point) {
toLeft.add(interval);
} else if (interval.getStart() > this.point) {
toRight.add(interval);
} else {
this.intervals.add(interval);
}
}
if (toLeft.size() > 0) {
this.left = new IntervalNode(toLeft);
}
if (toRight.size() > 0) {
this.right = new IntervalNode(toRight);
}
}
public int determineMedian(final List<Intervalable> intervals) {
int start = -1;
int end = -1;
for (Intervalable interval : intervals) {
int currentStart = interval.getStart();
int currentEnd = interval.getEnd();
if (start == -1 || currentStart < start) {
start = currentStart;
}
if (end == -1 || currentEnd > end) {
end = currentEnd;
}
}
return (start + end) / 2;
}
public List<Intervalable> findOverlaps(final Intervalable interval) {
final List<Intervalable> overlaps = new ArrayList<>();
if (this.point < interval.getStart()) {
// Tends to the right
addToOverlaps(interval, overlaps, findOverlappingRanges(this.right, interval));
addToOverlaps(interval, overlaps, checkForOverlapsToTheRight(interval));
} else if (this.point > interval.getEnd()) {
// Tends to the left
addToOverlaps(interval, overlaps, findOverlappingRanges(this.left, interval));
addToOverlaps(interval, overlaps, checkForOverlapsToTheLeft(interval));
} else {
// Somewhere in the middle
addToOverlaps(interval, overlaps, this.intervals);
addToOverlaps(interval, overlaps, findOverlappingRanges(this.left, interval));
addToOverlaps(interval, overlaps, findOverlappingRanges(this.right, interval));
}
return overlaps;
}
protected void addToOverlaps(
final Intervalable interval,
final List<Intervalable> overlaps,
final List<Intervalable> newOverlaps) {
for (final Intervalable currentInterval : newOverlaps) {
if (!currentInterval.equals(interval)) {
overlaps.add(currentInterval);
}
}
}
protected List<Intervalable> checkForOverlapsToTheLeft(final Intervalable interval) {
return checkForOverlaps(interval, Direction.LEFT);
}
protected List<Intervalable> checkForOverlapsToTheRight(final Intervalable interval) {
return checkForOverlaps(interval, Direction.RIGHT);
}
protected List<Intervalable> checkForOverlaps(
final Intervalable interval, final Direction direction) {
final List<Intervalable> overlaps = new ArrayList<>();
for (final Intervalable currentInterval : this.intervals) {
switch (direction) {
case LEFT:
if (currentInterval.getStart() <= interval.getEnd()) {
overlaps.add(currentInterval);
}
break;
case RIGHT:
if (currentInterval.getEnd() >= interval.getStart()) {
overlaps.add(currentInterval);
}
break;
}
}
return overlaps;
}
protected List<Intervalable> findOverlappingRanges(IntervalNode node, Intervalable interval) {
return node == null
? Collections.<Intervalable>emptyList()
: node.findOverlaps(interval);
}
}
| 1,966 |
5,447 | <reponame>Kh4L/gluon-cv
# pylint: disable=wildcard-import
"""Video action recognition, HMDB51 dataset.
http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/
"""
from __future__ import absolute_import
from .classification import *
| 87 |
558 | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2021 Micron Technology, Inc. All rights reserved.
*/
#ifndef HSE_CONFIG_ARGV_H
#define HSE_CONFIG_ARGV_H
#include <stddef.h>
#include <hse_ikvdb/kvdb_cparams.h>
#include <hse_ikvdb/kvdb_rparams.h>
#include <hse_ikvdb/kvs_cparams.h>
#include <hse_ikvdb/kvs_rparams.h>
#include <hse_ikvdb/hse_gparams.h>
#include <hse_util/hse_err.h>
/**
* Deserialize list of key=value parameters to KVDB rparams
*
* @param paramc: number of parameters
* @param paramv: list of key=value strings
* @param params: params struct
* @returns error status
* @retval 0 success
* @retval !0 failure
*/
merr_t
argv_deserialize_to_kvdb_rparams(
size_t paramc,
const char *const * paramv,
struct kvdb_rparams *params);
/**
* Deserialize list of key=value parameters to KVDB cparams
*
* @param paramc: number of parameters
* @param paramv: list of key=value strings
* @param params: params struct
* @returns error status
* @retval 0 success
* @retval !0 failure
*/
merr_t
argv_deserialize_to_kvdb_cparams(
size_t paramc,
const char *const * paramv,
struct kvdb_cparams *params);
/**
* Deserialize list of key=value parameters to KVS rparams
*
* @param paramc: number of parameters
* @param paramv: list of key=value strings
* @param params: params struct
* @returns error status
* @retval 0 success
* @retval !0 failure
*/
merr_t
argv_deserialize_to_kvs_rparams(
size_t paramc,
const char *const * paramv,
struct kvs_rparams *params);
/**
* Deserialize list of key=value parameters to KVS cparams
*
* @param paramc: number of parameters
* @param paramv: list of key=value strings
* @param params: params struct
* @returns error status
* @retval 0 success
* @retval !0 failure
*/
merr_t
argv_deserialize_to_kvs_cparams(
size_t paramc,
const char *const * paramv,
struct kvs_cparams *params);
/**
* Deserialize list of key=value parameters to global params
*
* @param paramc: number of parameters
* @param paramv: list of key=value strings
* @param params: params struct
* @returns error status
* @retval 0 success
* @retval !0 failure
*/
merr_t
argv_deserialize_to_hse_gparams(
size_t paramc,
const char *const * paramv,
struct hse_gparams *params);
#endif
| 929 |
389 | <filename>gosu-lab/src/main/java/editor/IGosuPanel.java
package editor;
import gw.lang.parser.IGosuParser;
import gw.lang.parser.IScriptPartId;
import gw.lang.parser.ISymbolTable;
import gw.lang.parser.ITypeUsesMap;
import gw.lang.reflect.IType;
import javax.swing.*;
import java.io.IOException;
/**
* Copyright 2010 Guidewire Software, Inc.
*/
public interface IGosuPanel
{
JComponent asJComponent();
void read( IScriptPartId partId, String strSource ) throws IOException;
void parse();
String getText();
ISymbolTable getSymbolTable();
IGosuParser getParser();
void setTypeUsesMap( ITypeUsesMap typeUsesMap );
void setProgramSuperType( IType baseClass );
}
| 235 |
2,071 | /*
* Copyright 2019 WeBank
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.webank.wedatasphere.dss.appconn.manager.service;
import com.webank.wedatasphere.dss.appconn.manager.entity.AppConnInfo;
import com.webank.wedatasphere.dss.appconn.manager.entity.AppInstanceInfo;
import java.util.List;
public interface AppConnInfoService {
List<? extends AppConnInfo> getAppConnInfos();
AppConnInfo getAppConnInfo(String appConnName);
List<? extends AppInstanceInfo> getAppInstancesByAppConnInfo(AppConnInfo appConnInfo);
List<? extends AppInstanceInfo> getAppInstancesByAppConnName(String appConnName);
}
| 330 |
430 | <reponame>asheraryam/stabilizer
#include <stdio.h>
void ctor() __attribute__((constructor));
void ctor() {
printf("Hello Constructor!\n");
}
int main(int argc, char** argv) {
printf("Hello World!\n");
return 0;
}
| 87 |
1,336 | <filename>demo/demo-multi-service-center/demo-multi-service-center-client/src/main/java/org/apache/servicecomb/demo/multiServiceCenterClient/Application.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.demo.multiServiceCenterClient;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.apache.servicecomb.demo.CategorizedTestCaseRunner;
import org.apache.servicecomb.demo.TestMgr;
import org.apache.servicecomb.springboot2.starter.EnableServiceComb;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.stereotype.Component;
@SpringBootApplication
@EnableServiceComb
@Component
public class Application {
public static void main(final String[] args) throws Exception {
new SpringApplicationBuilder().sources(Application.class)
.web(WebApplicationType.NONE).build().run(args);
runTest();
}
public static void runTest() throws Exception {
CategorizedTestCaseRunner.runCategorizedTestCase("demo-multi-service-center-serverA");
testRegistryThreads();
testTransportThreads();
TestMgr.summary();
if (!TestMgr.errors().isEmpty()) {
throw new IllegalStateException("tests failed");
}
}
private static void testTransportThreads() throws Exception {
Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
List<String> expectedThread = new ArrayList<>();
threadSet.forEach(thread -> {
if (thread.getName().contains("transport-")) {
expectedThread.add(thread.getName());
}
});
TestMgr.check(6, expectedThread.size());
}
private static void testRegistryThreads() throws Exception {
Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
List<String> expectedThread = new ArrayList<>();
threadSet.forEach(thread -> {
if (thread.getName().contains("registry-")) {
expectedThread.add(thread.getName());
}
});
//registry-watch-vert.x-eventloop-thread-3(option)
//registry-watch-vert.x-eventloop-thread-2(option)
//registry-watch-vert.x-eventloop-thread-1
//registry-watch-vert.x-eventloop-thread-0
//registry-watch-serverB-vert.x-eventloop-thread-3(option)
//registry-watch-serverB-vert.x-eventloop-thread-2(option)
//registry-watch-serverB-vert.x-eventloop-thread-1
//registry-watch-serverB-vert.x-eventloop-thread-0
//registry-watch-pool-worker-service-center-watch-1
//registry-watch-pool-worker-service-center-watch-0
//registry-watch-serverB-pool-worker-service-center-watch-1
//registry-watch-serverB-pool-worker-service-center-watch-0
//registry-vert.x-eventloop-thread-3(option)
//registry-vert.x-eventloop-thread-2(option)
//registry-vert.x-eventloop-thread-1
//registry-vert.x-eventloop-thread-0
//registry-serverB-vert.x-eventloop-thread-3(option)
//registry-serverB-vert.x-eventloop-thread-2(option)
//registry-serverB-vert.x-eventloop-thread-1
//registry-serverB-vert.x-eventloop-thread-0
TestMgr.check(expectedThread.size() >= 12, expectedThread.size() <= 20);
}
}
| 1,321 |
1,817 | package test.org.springdoc.api.app104;
/**
* The type Having pk.
*/
public class HavingPK {
}
| 35 |
3,010 | #include "shared-bindings/board/__init__.h"
STATIC const mp_rom_map_elem_t board_module_globals_table[] = {
CIRCUITPYTHON_BOARD_DICT_STANDARD_ITEMS
{ MP_ROM_QSTR(MP_QSTR_TOUCH), MP_ROM_PTR(&pin_PA07) },
{ MP_ROM_QSTR(MP_QSTR_NEOPIXEL), MP_ROM_PTR(&pin_PA15) },
{ MP_ROM_QSTR(MP_QSTR_SWITCH), MP_ROM_PTR(&pin_PA28) },
};
MP_DEFINE_CONST_DICT(board_module_globals, board_module_globals_table);
| 203 |
841 | <filename>jbpm-test-coverage/src/test/java/org/jbpm/test/JbpmTestCase.java
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.test;
import java.io.File;
import java.nio.file.Files;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import org.apache.commons.io.IOUtils;
import org.assertj.core.api.Assertions;
import org.drools.compiler.kie.builder.impl.InternalKieModule;
import org.jbpm.test.persistence.util.PersistenceUtil;
import org.junit.Rule;
import org.junit.rules.TestRule;
import org.junit.rules.TestWatcher;
import org.junit.runner.Description;
import org.kie.api.KieServices;
import org.kie.api.builder.KieBuilder;
import org.kie.api.builder.KieFileSystem;
import org.kie.api.builder.Message;
import org.kie.api.builder.ReleaseId;
import org.kie.api.builder.Results;
import org.kie.api.command.KieCommands;
import org.kie.api.io.Resource;
import org.kie.api.io.ResourceType;
import org.kie.api.runtime.KieSession;
import org.kie.internal.builder.InternalKieBuilder;
import org.kie.scanner.KieMavenRepository;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class JbpmTestCase extends JbpmJUnitBaseTestCase {
protected final Logger logger = LoggerFactory.getLogger(getClass());
protected static final String EMPTY_CASE = "EmptyCase.bpmn2";
public JbpmTestCase() {
this(true);
}
public JbpmTestCase(boolean persistence) {
this(persistence, persistence);
}
public JbpmTestCase(boolean setupDataSource, boolean sessionPersistence) {
this(setupDataSource, sessionPersistence, "org.jbpm.test.persistence");
}
public JbpmTestCase(boolean setupDataSource, boolean sessionPersistence, String persistenceUnit) {
super(setupDataSource, sessionPersistence, persistenceUnit);
}
@Rule
public TestRule watcher = new TestWatcher() {
@Override
protected void starting(Description description) {
System.out.println(" >>> " + description.getMethodName() + " <<< ");
}
@Override
protected void finished(Description description) {
System.out.println();
}
};
@Override
protected Properties getDataSourceProperties(){
Properties dsProps = PersistenceUtil.getDatasourceProperties();
dsProps.setProperty("POOL_CONNECTIONS", "false");
return dsProps;
}
public KieSession createKSession(String... process) {
createRuntimeManager(process);
return getRuntimeEngine().getKieSession();
}
public KieSession createKSession(Map<String, ResourceType> res) {
createRuntimeManager(res);
return getRuntimeEngine().getKieSession();
}
public KieSession restoreKSession(String... process) {
disposeRuntimeManager();
createRuntimeManager(process);
return getRuntimeEngine().getKieSession();
}
public void assertProcessInstanceNeverRun(long processId) {
Assertions.assertThat(getLogService().findProcessInstance(processId)).as("Process has been running").isNull();
}
protected static KieServices getServices() {
return KieServices.Factory.get();
}
protected static KieCommands getCommands() {
return getServices().getCommands();
}
protected byte[] createAndDeployJar(KieServices ks, ReleaseId releaseId, Resource... resources) throws Exception {
KieFileSystem kfs = ks.newKieFileSystem().generateAndWritePomXML(releaseId);
for (int i = 0; i < resources.length; i++) {
if (resources[i] != null) {
kfs.write(resources[i]);
}
}
KieBuilder kieBuilder = ks.newKieBuilder(kfs);
((InternalKieBuilder) kieBuilder).buildAll(o -> true);
Results results = kieBuilder.getResults();
if (results.hasMessages(Message.Level.ERROR)) {
throw new IllegalStateException(results.getMessages(Message.Level.ERROR).toString());
}
InternalKieModule kieModule = (InternalKieModule) ks.getRepository().getKieModule(releaseId);
byte[] pomXmlContent = IOUtils.toByteArray(kieModule.getPomAsStream());
File pom = new File("target",
UUID.randomUUID().toString());
Files.write(pom.toPath(),
pomXmlContent);
KieMavenRepository.getKieMavenRepository().installArtifact(releaseId,
kieModule,
pom);
byte[] jar = kieModule.getBytes();
return jar;
}
}
| 1,907 |
1,483 | /*
* Copyright Lealone Database Group.
* Licensed under the Server Side Public License, v 1.
* Initial Developer: zhh
*/
package org.lealone.server.protocol.replication;
import java.io.IOException;
import org.lealone.net.NetInputStream;
import org.lealone.net.NetOutputStream;
import org.lealone.server.protocol.PacketDecoder;
import org.lealone.server.protocol.PacketType;
import org.lealone.server.protocol.statement.StatementUpdateAck;
import org.lealone.storage.replication.ReplicaCommand;
import org.lealone.storage.replication.ReplicationConflictType;
public class ReplicationUpdateAck extends StatementUpdateAck {
public final long first;
public String uncommittedReplicationName;
public final ReplicationConflictType replicationConflictType;
public final int ackVersion; // 复制操作有可能返回多次,这个字段表示第几次返回响应结果
public final boolean isIfDDL;
public final boolean isFinalResult;
private ReplicaCommand replicaCommand;
private int packetId;
public ReplicationUpdateAck(int updateCount, long first, String uncommittedReplicationName,
ReplicationConflictType replicationConflictType, int ackVersion, boolean isIfDDL, boolean isFinalResult) {
super(updateCount);
this.first = first;
this.uncommittedReplicationName = uncommittedReplicationName;
this.replicationConflictType = replicationConflictType == null ? ReplicationConflictType.NONE
: replicationConflictType;
this.ackVersion = ackVersion;
this.isIfDDL = isIfDDL;
this.isFinalResult = isFinalResult;
}
public ReplicationUpdateAck(NetInputStream in, int version) throws IOException {
super(in, version);
first = in.readLong();
uncommittedReplicationName = in.readString();
replicationConflictType = ReplicationConflictType.getType(in.readInt());
ackVersion = in.readInt();
isIfDDL = in.readBoolean();
isFinalResult = in.readBoolean();
}
@Override
public PacketType getType() {
return PacketType.REPLICATION_UPDATE_ACK;
}
public ReplicaCommand getReplicaCommand() {
return replicaCommand;
}
public void setReplicaCommand(ReplicaCommand replicaCommand) {
this.replicaCommand = replicaCommand;
}
public int getPacketId() {
return packetId;
}
public void setPacketId(int packetId) {
this.packetId = packetId;
}
@Override
public void encode(NetOutputStream out, int version) throws IOException {
super.encode(out, version);
out.writeLong(first);
out.writeString(uncommittedReplicationName);
out.writeInt(replicationConflictType.value);
out.writeInt(ackVersion);
out.writeBoolean(isIfDDL);
out.writeBoolean(isFinalResult);
}
public static final Decoder decoder = new Decoder();
private static class Decoder implements PacketDecoder<ReplicationUpdateAck> {
@Override
public ReplicationUpdateAck decode(NetInputStream in, int version) throws IOException {
return new ReplicationUpdateAck(in, version);
}
}
}
| 1,175 |
1,374 | <reponame>norzak/jsweet
package def.dom;
public class MSAppAsyncOperation extends EventTarget {
public DOMError error;
public java.util.function.Function<Event,Object> oncomplete;
public java.util.function.Function<Event,Object> onerror;
public double readyState;
public Object result;
native public void start();
public double COMPLETED;
public double ERROR;
public double STARTED;
native public void addEventListener(jsweet.util.StringTypes.complete type, java.util.function.Function<Event,Object> listener, Boolean useCapture);
native public void addEventListener(jsweet.util.StringTypes.error type, java.util.function.Function<ErrorEvent,Object> listener, Boolean useCapture);
native public void addEventListener(String type, EventListener listener, Boolean useCapture);
public static MSAppAsyncOperation prototype;
public MSAppAsyncOperation(){}
native public void addEventListener(jsweet.util.StringTypes.complete type, java.util.function.Function<Event,Object> listener);
native public void addEventListener(jsweet.util.StringTypes.error type, java.util.function.Function<ErrorEvent,Object> listener);
native public void addEventListener(String type, EventListener listener);
native public void addEventListener(String type, EventListenerObject listener, Boolean useCapture);
native public void addEventListener(String type, EventListenerObject listener);
}
| 394 |
1,281 |
import sys
import time
import numpy as np
import pandas as pd
import datetime as dt
import multiprocessing as mp
class MultiProcessingFunctions:
""" This static functions in this class enable multi-processing"""
def __init__(self):
pass
@staticmethod
def lin_parts(num_atoms, num_threads):
""" This function partitions a list of atoms in subsets (molecules) of equal size.
An atom is a set of indivisible set of tasks.
Reference: Snippet 20.6 (page 308)
"""
# partition of atoms with a single loop
parts = np.linspace(0, num_atoms, min(num_threads, num_atoms) + 1)
parts = np.ceil(parts).astype(int)
return parts
@staticmethod
def nested_parts(num_atoms, num_threads, upper_triangle=False):
""" This function enables parallelization of nested loops.
Reference: Snippet 20.5 (page 306)
"""
# partition of atoms with an inner loop
parts = []
num_threads_ = min(num_threads, num_atoms)
for num in range(num_threads_):
part = 1 + 4 * (parts[-1] ** 2 + parts[-1] + num_atoms * (num_atoms + 1.) / num_threads_)
part = (-1 + part ** .5) / 2.
parts.append(part)
parts = np.round(parts).astype(int)
if upper_triangle: # the first rows are heaviest
parts = np.cumsum(np.diff(parts)[::-1])
parts = np.append(np.array([0]), parts)
return parts
@staticmethod
def mp_pandas_obj(func, pd_obj, num_threads=24, mp_batches=1, lin_mols=True, **kargs):
""" Parallelize jobs, return a dataframe or series
Example: df1=mp_pandas_obj(func,('molecule',df0.index),24,**kwds)
Reference: Snippet 20.7 (page 310)
:param func: (string) function to be parallelized
:param pd_obj: (vector) Element 0, is name of argument used to pass the molecule;
Element 1, is the list of atoms to be grouped into a molecule
:param num_threads: (int) number of threads
:param mp_batches: (int) number of batches
:param lin_mols: (bool) Tells if the method should use linear or nested partitioning
:param kargs: (var args)
:return: (data frame) of results
"""
if lin_mols:
parts = MultiProcessingFunctions.lin_parts(len(pd_obj[1]), num_threads * mp_batches)
else:
parts = MultiProcessingFunctions.nested_parts(len(pd_obj[1]), num_threads * mp_batches)
jobs = []
for i in range(1, len(parts)):
job = {pd_obj[0]: pd_obj[1][parts[i - 1]:parts[i]], 'func': func}
job.update(kargs)
jobs.append(job)
if num_threads == 1:
out = MultiProcessingFunctions.process_jobs_(jobs)
else:
out = MultiProcessingFunctions.process_jobs(jobs, num_threads=num_threads)
if isinstance(out[0], pd.DataFrame):
df0 = pd.DataFrame()
elif isinstance(out[0], pd.Series):
df0 = pd.Series()
else:
return out
for i in out:
df0 = df0.append(i)
df0 = df0.sort_index()
return df0
@staticmethod
def process_jobs_(jobs):
""" Run jobs sequentially, for debugging """
out = []
for job in jobs:
out_ = MultiProcessingFunctions.expand_call(job)
out.append(out_)
return out
@staticmethod
def expand_call(kargs):
""" Expand the arguments of a callback function, kargs['func'] """
func = kargs['func']
del kargs['func']
out = func(**kargs)
return out
@staticmethod
def report_progress(job_num, num_jobs, time0, task):
# Report progress as asynch jobs are completed
msg = [float(job_num) / num_jobs, (time.time() - time0)/60.]
msg.append(msg[1] * (1/msg[0] - 1))
time_stamp = str(dt.datetime.fromtimestamp(time.time()))
msg = time_stamp + ' ' + str(round(msg[0]*100, 2)) + '% '+task+' done after ' + \
str(round(msg[1], 2)) + ' minutes. Remaining ' + str(round(msg[2], 2)) + ' minutes.'
if job_num < num_jobs:
sys.stderr.write(msg+'\r')
else:
sys.stderr.write(msg+'\n')
return
@staticmethod
def process_jobs(jobs, task=None, num_threads=24):
""" Run in parallel. jobs must contain a 'func' callback, for expand_call"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=num_threads)
# outputs, out, time0 = pool.imap_unordered(MultiProcessingFunctions.expand_call,jobs),[],time.time()
outputs = pool.imap_unordered(MultiProcessingFunctions.expand_call, jobs)
out = []
time0 = time.time()
# Process asyn output, report progress
for i, out_ in enumerate(outputs, 1):
out.append(out_)
MultiProcessingFunctions.report_progress(i, len(jobs), time0, task)
pool.close()
pool.join() # this is needed to prevent memory leaks
return out
| 1,723 |
310 | {
"name": "SE2416H",
"description": "A 24 inch LCD monitor.",
"url": "https://www.dell.com/en-us/shop/dell-24-monitor-se2416h/apd/210-afvh/monitors-monitor-accessories"
}
| 77 |
1,155 | /********************************************************************
* COPYRIGHT:
* Copyright (c) 2002-2006, International Business Machines Corporation and
* others. All Rights Reserved.
********************************************************************
*
* @author <NAME>
* @author <NAME>
*/
/**
* Test Canonical Iterator
*/
#ifndef _CANITTST
#define _CANITTST
#include "unicode/utypes.h"
#if !UCONFIG_NO_NORMALIZATION
U_NAMESPACE_BEGIN
class Transliterator;
U_NAMESPACE_END
#include "unicode/translit.h"
#include "unicode/caniter.h"
#include "intltest.h"
#include "hash.h"
class CanonicalIteratorTest : public IntlTest {
public:
CanonicalIteratorTest();
virtual ~CanonicalIteratorTest();
void runIndexedTest( int32_t index, UBool exec, const char* &name, char* par = NULL );
void TestCanonicalIterator(void);
void TestExhaustive(void);
void TestBasic();
void TestAPI();
UnicodeString collectionToString(Hashtable *col);
//static UnicodeString collectionToString(Collection col);
private:
void expectEqual(const UnicodeString &message, const UnicodeString &item, const UnicodeString &a, const UnicodeString &b);
void characterTest(UnicodeString &s, UChar32 ch, CanonicalIterator &it);
Transliterator *nameTrans;
Transliterator *hexTrans;
UnicodeString getReadable(const UnicodeString &obj);
};
#endif /* #if !UCONFIG_NO_NORMALIZATION */
#endif // _CANITTST
| 465 |
986 | <filename>ibis/backends/base_sql/identifiers.py<gh_stars>100-1000
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Base identifiers
base_identifiers = [
'add',
'aggregate',
'all',
'alter',
'and',
'api_version',
'as',
'asc',
'avro',
'between',
'bigint',
'binary',
'boolean',
'by',
'cached',
'case',
'cast',
'change',
'char',
'class',
'close_fn',
'column',
'columns',
'comment',
'compute',
'create',
'cross',
'data',
'database',
'databases',
'date',
'datetime',
'decimal',
'delimited',
'desc',
'describe',
'distinct',
'div',
'double',
'drop',
'else',
'end',
'escaped',
'exists',
'explain',
'external',
'false',
'fields',
'fileformat',
'finalize_fn',
'first',
'float',
'format',
'formatted',
'from',
'full',
'function',
'functions',
'group',
'having',
'if',
'in',
'incremental',
'init_fn',
'inner',
'inpath',
'insert',
'int',
'integer',
'intermediate',
'interval',
'into',
'invalidate',
'is',
'join',
'last',
'left',
'like',
'limit',
'lines',
'load',
'location',
'merge_fn',
'metadata',
'not',
'null',
'nulls',
'offset',
'on',
'or',
'order',
'outer',
'overwrite',
'parquet',
'parquetfile',
'partition',
'partitioned',
'partitions',
'prepare_fn',
'produced',
'rcfile',
'real',
'refresh',
'regexp',
'rename',
'replace',
'returns',
'right',
'rlike',
'row',
'schema',
'schemas',
'select',
'semi',
'sequencefile',
'serdeproperties',
'serialize_fn',
'set',
'show',
'smallint',
'stats',
'stored',
'straight_join',
'string',
'symbol',
'table',
'tables',
'tblproperties',
'terminated',
'textfile',
'then',
'timestamp',
'tinyint',
'to',
'true',
'uncached',
'union',
'update_fn',
'use',
'using',
'values',
'view',
'when',
'where',
'with',
]
| 1,287 |
506 | // https://cses.fi/problemset/task/1132
#include <iostream>
#include <vector>
using namespace std;
typedef vector<int> vi;
typedef vector<vi> vvi;
vi dp1, dp2, p1, p2, m;
vvi g;
void dfs(int u, int p) {
int a = 0, b = 0, c, d;
for (auto z:g[u]) {
if (z == p) continue;
dfs(z, u);
if (dp1[z] >= b) {
b = dp1[z] + 1;
d = z;
if (b > a) {
swap(a, b);
swap(c, d);
}
}
}
dp1[u] = a;
dp2[u] = b;
p1[u] = c;
p2[u] = d;
}
void dfs2(int u, int p) {
for (auto z:g[u]) {
if (z == p) continue;
if (z == p1[u]) m[z] = max(m[u] + 1, 1 + dp2[u]);
else m[z] = max(m[u] + 1, 1 + dp1[u]);
dfs2(z, u);
}
}
int main() {
int n, a, b;
cin >> n;
dp1 = vi(n);
dp2 = vi(n);
p1 = vi(n);
p2 = vi(n);
m = vi(n);
g = vvi(n);
for (int i = 0; i < n - 1; i++) {
cin >> a >> b;
a--; b--;
g[a].push_back(b);
g[b].push_back(a);
}
dfs(0, 0);
dfs2(0, -1);
for (int i = 0; i < n; i++) {
cout << max(m[i], dp1[i]);
if (i < n - 1) cout << " ";
}
cout << endl;
}
| 635 |
310 | /*
!@
MIT License
Copyright (c) 2020 Skylicht Technology CO., LTD
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This file is part of the "Skylicht Engine".
https://github.com/skylicht-lab/skylicht-engine
!#
*/
#include "pch.h"
#include "CParticleInstancingSystem.h"
#include "ParticleSystem/Particles/CParticle.h"
#include "ParticleSystem/Particles/CGroup.h"
#include "ParticleSystem/Particles/Renderers/CQuadRenderer.h"
namespace Skylicht
{
namespace Particle
{
CParticleInstancingSystem::CParticleInstancingSystem()
{
}
CParticleInstancingSystem::~CParticleInstancingSystem()
{
}
void CParticleInstancingSystem::update(CParticle *particles, int num, CGroup *group, float dt)
{
CVertexBuffer<SParticleInstance>* buffer = group->getIntancing()->getInstanceBuffer();
buffer->set_used(num);
if (num == 0)
return;
SParticleInstance *vtx = (SParticleInstance*)buffer->getVertices();
CParticle *p;
float *params;
SParticleInstance *data;
u32 frameX = 1;
u32 frameY = 1;
IRenderer *renderer = group->getRenderer();
float sx = 1.0f;
float sy = 1.0f;
float sz = 1.0f;
if (renderer != NULL)
{
sx = renderer->SizeX;
sy = renderer->SizeY;
sz = renderer->SizeZ;
if (renderer->getType() == Particle::Quad)
{
CQuadRenderer *quadRenderer = (CQuadRenderer*)renderer;
frameX = quadRenderer->getAtlasX();
frameY = quadRenderer->getAtlasY();
}
}
u32 totalFrames = frameX * frameY;
float frameW = 1.0f / frameX;
float frameH = 1.0f / frameY;
u32 frame, row, col;
#pragma omp parallel for private(p, params, data, frame, row, col)
for (int i = 0; i < num; i++)
{
p = particles + i;
params = p->Params;
data = vtx + i;
data->Pos = p->Position;
data->Color.set(
(u32)(params[ColorA] * 255.0f),
(u32)(params[ColorR] * 255.0f),
(u32)(params[ColorG] * 255.0f),
(u32)(params[ColorB] * 255.0f)
);
data->Size.set(
sx * params[ScaleX],
sy * params[ScaleY],
sz * params[ScaleZ]
);
data->Rotation = p->Rotation;
data->Velocity = p->Velocity;
frame = (u32)params[FrameIndex];
frame = frame < 0 ? 0 : frame;
frame = frame >= totalFrames ? totalFrames - 1 : frame;
row = frame / frameX;
col = frame - (row * frameX);
data->UVScale.set(frameW, frameH);
data->UVOffset.set(col * frameW, row * frameH);
}
buffer->setDirty();
}
}
} | 1,324 |
1,125 | <reponame>ZenMX/elasticsearch<gh_stars>1000+
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules;
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.test.ESTestCase;
import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class PluginInfoTests extends ESTestCase {
public void testReadFromProperties() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"name", "my_plugin",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"classname", "FakePlugin");
PluginInfo info = PluginInfo.readFromProperties(pluginDir);
assertEquals("my_plugin", info.getName());
assertEquals("fake desc", info.getDescription());
assertEquals("1.0", info.getVersion());
assertEquals("FakePlugin", info.getClassname());
assertThat(info.getExtendedPlugins(), empty());
}
public void testReadFromPropertiesNameMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("property [name] is missing in"));
PluginTestUtil.writePluginProperties(pluginDir, "name", "");
e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("property [name] is missing in"));
}
public void testReadFromPropertiesDescriptionMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir, "name", "fake-plugin");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("[description] is missing"));
}
public void testReadFromPropertiesVersionMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(
pluginDir, "description", "fake desc", "name", "fake-plugin");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("[version] is missing"));
}
public void testReadFromPropertiesElasticsearchVersionMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"name", "my_plugin",
"version", "1.0");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("[elasticsearch.version] is missing"));
}
public void testReadFromPropertiesJavaVersionMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"name", "my_plugin",
"elasticsearch.version", Version.CURRENT.toString(),
"version", "1.0");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("[java.version] is missing"));
}
public void testReadFromPropertiesBadJavaVersionFormat() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"name", pluginName,
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", "1.7.0_80",
"classname", "FakePlugin",
"version", "1.0");
IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), equalTo("version string must be a sequence of nonnegative decimal integers separated" +
" by \".\"'s and may have leading zeros but was 1.7.0_80"));
}
public void testReadFromPropertiesBogusElasticsearchVersion() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"version", "1.0",
"name", "my_plugin",
"elasticsearch.version", "bogus");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("version needs to contain major, minor, and revision"));
}
public void testReadFromPropertiesJvmMissingClassname() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"name", "my_plugin",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("property [classname] is missing"));
}
public void testExtendedPluginsSingleExtension() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"name", "my_plugin",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"classname", "FakePlugin",
"extended.plugins", "foo");
PluginInfo info = PluginInfo.readFromProperties(pluginDir);
assertThat(info.getExtendedPlugins(), contains("foo"));
}
public void testExtendedPluginsMultipleExtensions() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"name", "my_plugin",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"classname", "FakePlugin",
"extended.plugins", "foo,bar,baz");
PluginInfo info = PluginInfo.readFromProperties(pluginDir);
assertThat(info.getExtendedPlugins(), contains("foo", "bar", "baz"));
}
public void testExtendedPluginsEmpty() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"description", "fake desc",
"name", "my_plugin",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"classname", "FakePlugin",
"extended.plugins", "");
PluginInfo info = PluginInfo.readFromProperties(pluginDir);
assertThat(info.getExtendedPlugins(), empty());
}
public void testSerialize() throws Exception {
PluginInfo info = new PluginInfo("c", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass",
Collections.singletonList("foo"), randomBoolean());
BytesStreamOutput output = new BytesStreamOutput();
info.writeTo(output);
ByteBuffer buffer = ByteBuffer.wrap(output.bytes().toBytesRef().bytes);
ByteBufferStreamInput input = new ByteBufferStreamInput(buffer);
PluginInfo info2 = new PluginInfo(input);
assertThat(info2.toString(), equalTo(info.toString()));
}
public void testPluginListSorted() {
List<PluginInfo> plugins = new ArrayList<>();
plugins.add(new PluginInfo("c", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass",
Collections.emptyList(), randomBoolean()));
plugins.add(new PluginInfo("b", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass",
Collections.emptyList(), randomBoolean()));
plugins.add(new PluginInfo( "e", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass",
Collections.emptyList(), randomBoolean()));
plugins.add(new PluginInfo("a", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass",
Collections.emptyList(), randomBoolean()));
plugins.add(new PluginInfo("d", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass",
Collections.emptyList(), randomBoolean()));
PluginsAndModules pluginsInfo = new PluginsAndModules(plugins, Collections.emptyList());
final List<PluginInfo> infos = pluginsInfo.getPluginInfos();
List<String> names = infos.stream().map(PluginInfo::getName).collect(Collectors.toList());
assertThat(names, contains("a", "b", "c", "d", "e"));
}
public void testUnknownProperties() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
PluginTestUtil.writePluginProperties(pluginDir,
"extra", "property",
"unknown", "property",
"description", "fake desc",
"classname", "Foo",
"name", "my_plugin",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir));
assertThat(e.getMessage(), containsString("Unknown properties in plugin descriptor"));
}
}
| 4,383 |
3,372 | <gh_stars>1000+
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.route53resolver.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigs"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ListResolverQueryLogConfigsRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* <p>
* The maximum number of query logging configurations that you want to return in the response to a
* <code>ListResolverQueryLogConfigs</code> request. If you don't specify a value for <code>MaxResults</code>,
* Resolver returns up to 100 query logging configurations.
* </p>
*/
private Integer maxResults;
/**
* <p>
* For the first <code>ListResolverQueryLogConfigs</code> request, omit this value.
* </p>
* <p>
* If there are more than <code>MaxResults</code> query logging configurations that match the values that you
* specify for <code>Filters</code>, you can submit another <code>ListResolverQueryLogConfigs</code> request to get
* the next group of configurations. In the next request, specify the value of <code>NextToken</code> from the
* previous response.
* </p>
*/
private String nextToken;
/**
* <p>
* An optional specification to return a subset of query logging configurations.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in the
* previous request.
* </p>
* </note>
*/
private java.util.List<Filter> filters;
/**
* <p>
* The element that you want Resolver to sort query logging configurations by.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortBy</code>, if any, as in the previous
* request.
* </p>
* </note>
* <p>
* Valid values include the following elements:
* </p>
* <ul>
* <li>
* <p>
* <code>Arn</code>: The ARN of the query logging configuration
* </p>
* </li>
* <li>
* <p>
* <code>AssociationCount</code>: The number of VPCs that are associated with the specified configuration
* </p>
* </li>
* <li>
* <p>
* <code>CreationTime</code>: The date and time that Resolver returned when the configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>CreatorRequestId</code>: The value that was specified for <code>CreatorRequestId</code> when the
* configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>DestinationArn</code>: The location that logs are sent to
* </p>
* </li>
* <li>
* <p>
* <code>Id</code>: The ID of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>Name</code>: The name of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>OwnerId</code>: The Amazon Web Services account number of the account that created the configuration
* </p>
* </li>
* <li>
* <p>
* <code>ShareStatus</code>: Whether the configuration is shared with other Amazon Web Services accounts or shared
* with the current account by another Amazon Web Services account. Sharing is configured through Resource Access
* Manager (RAM).
* </p>
* </li>
* <li>
* <p>
* <code>Status</code>: The current status of the configuration. Valid values include the following:
* </p>
* <ul>
* <li>
* <p>
* <code>CREATING</code>: Resolver is creating the query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>CREATED</code>: The query logging configuration was successfully created. Resolver is logging queries that
* originate in the specified VPC.
* </p>
* </li>
* <li>
* <p>
* <code>DELETING</code>: Resolver is deleting this query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>FAILED</code>: Resolver either couldn't create or couldn't delete the query logging configuration. Here are
* two common causes:
* </p>
* <ul>
* <li>
* <p>
* The specified destination (for example, an Amazon S3 bucket) was deleted.
* </p>
* </li>
* <li>
* <p>
* Permissions don't allow sending logs to the destination.
* </p>
* </li>
* </ul>
* </li>
* </ul>
* </li>
* </ul>
*/
private String sortBy;
/**
* <p>
* If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to be
* listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in the
* previous request.
* </p>
* </note>
*/
private String sortOrder;
/**
* <p>
* The maximum number of query logging configurations that you want to return in the response to a
* <code>ListResolverQueryLogConfigs</code> request. If you don't specify a value for <code>MaxResults</code>,
* Resolver returns up to 100 query logging configurations.
* </p>
*
* @param maxResults
* The maximum number of query logging configurations that you want to return in the response to a
* <code>ListResolverQueryLogConfigs</code> request. If you don't specify a value for <code>MaxResults</code>
* , Resolver returns up to 100 query logging configurations.
*/
public void setMaxResults(Integer maxResults) {
this.maxResults = maxResults;
}
/**
* <p>
* The maximum number of query logging configurations that you want to return in the response to a
* <code>ListResolverQueryLogConfigs</code> request. If you don't specify a value for <code>MaxResults</code>,
* Resolver returns up to 100 query logging configurations.
* </p>
*
* @return The maximum number of query logging configurations that you want to return in the response to a
* <code>ListResolverQueryLogConfigs</code> request. If you don't specify a value for
* <code>MaxResults</code>, Resolver returns up to 100 query logging configurations.
*/
public Integer getMaxResults() {
return this.maxResults;
}
/**
* <p>
* The maximum number of query logging configurations that you want to return in the response to a
* <code>ListResolverQueryLogConfigs</code> request. If you don't specify a value for <code>MaxResults</code>,
* Resolver returns up to 100 query logging configurations.
* </p>
*
* @param maxResults
* The maximum number of query logging configurations that you want to return in the response to a
* <code>ListResolverQueryLogConfigs</code> request. If you don't specify a value for <code>MaxResults</code>
* , Resolver returns up to 100 query logging configurations.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListResolverQueryLogConfigsRequest withMaxResults(Integer maxResults) {
setMaxResults(maxResults);
return this;
}
/**
* <p>
* For the first <code>ListResolverQueryLogConfigs</code> request, omit this value.
* </p>
* <p>
* If there are more than <code>MaxResults</code> query logging configurations that match the values that you
* specify for <code>Filters</code>, you can submit another <code>ListResolverQueryLogConfigs</code> request to get
* the next group of configurations. In the next request, specify the value of <code>NextToken</code> from the
* previous response.
* </p>
*
* @param nextToken
* For the first <code>ListResolverQueryLogConfigs</code> request, omit this value.</p>
* <p>
* If there are more than <code>MaxResults</code> query logging configurations that match the values that you
* specify for <code>Filters</code>, you can submit another <code>ListResolverQueryLogConfigs</code> request
* to get the next group of configurations. In the next request, specify the value of <code>NextToken</code>
* from the previous response.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* For the first <code>ListResolverQueryLogConfigs</code> request, omit this value.
* </p>
* <p>
* If there are more than <code>MaxResults</code> query logging configurations that match the values that you
* specify for <code>Filters</code>, you can submit another <code>ListResolverQueryLogConfigs</code> request to get
* the next group of configurations. In the next request, specify the value of <code>NextToken</code> from the
* previous response.
* </p>
*
* @return For the first <code>ListResolverQueryLogConfigs</code> request, omit this value.</p>
* <p>
* If there are more than <code>MaxResults</code> query logging configurations that match the values that
* you specify for <code>Filters</code>, you can submit another <code>ListResolverQueryLogConfigs</code>
* request to get the next group of configurations. In the next request, specify the value of
* <code>NextToken</code> from the previous response.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* For the first <code>ListResolverQueryLogConfigs</code> request, omit this value.
* </p>
* <p>
* If there are more than <code>MaxResults</code> query logging configurations that match the values that you
* specify for <code>Filters</code>, you can submit another <code>ListResolverQueryLogConfigs</code> request to get
* the next group of configurations. In the next request, specify the value of <code>NextToken</code> from the
* previous response.
* </p>
*
* @param nextToken
* For the first <code>ListResolverQueryLogConfigs</code> request, omit this value.</p>
* <p>
* If there are more than <code>MaxResults</code> query logging configurations that match the values that you
* specify for <code>Filters</code>, you can submit another <code>ListResolverQueryLogConfigs</code> request
* to get the next group of configurations. In the next request, specify the value of <code>NextToken</code>
* from the previous response.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListResolverQueryLogConfigsRequest withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* <p>
* An optional specification to return a subset of query logging configurations.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in the
* previous request.
* </p>
* </note>
*
* @return An optional specification to return a subset of query logging configurations.</p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in
* the previous request.
* </p>
*/
public java.util.List<Filter> getFilters() {
return filters;
}
/**
* <p>
* An optional specification to return a subset of query logging configurations.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in the
* previous request.
* </p>
* </note>
*
* @param filters
* An optional specification to return a subset of query logging configurations.</p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in the
* previous request.
* </p>
*/
public void setFilters(java.util.Collection<Filter> filters) {
if (filters == null) {
this.filters = null;
return;
}
this.filters = new java.util.ArrayList<Filter>(filters);
}
/**
* <p>
* An optional specification to return a subset of query logging configurations.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in the
* previous request.
* </p>
* </note>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setFilters(java.util.Collection)} or {@link #withFilters(java.util.Collection)} if you want to override
* the existing values.
* </p>
*
* @param filters
* An optional specification to return a subset of query logging configurations.</p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in the
* previous request.
* </p>
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListResolverQueryLogConfigsRequest withFilters(Filter... filters) {
if (this.filters == null) {
setFilters(new java.util.ArrayList<Filter>(filters.length));
}
for (Filter ele : filters) {
this.filters.add(ele);
}
return this;
}
/**
* <p>
* An optional specification to return a subset of query logging configurations.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in the
* previous request.
* </p>
* </note>
*
* @param filters
* An optional specification to return a subset of query logging configurations.</p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same values for <code>Filters</code>, if any, as in the
* previous request.
* </p>
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListResolverQueryLogConfigsRequest withFilters(java.util.Collection<Filter> filters) {
setFilters(filters);
return this;
}
/**
* <p>
* The element that you want Resolver to sort query logging configurations by.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortBy</code>, if any, as in the previous
* request.
* </p>
* </note>
* <p>
* Valid values include the following elements:
* </p>
* <ul>
* <li>
* <p>
* <code>Arn</code>: The ARN of the query logging configuration
* </p>
* </li>
* <li>
* <p>
* <code>AssociationCount</code>: The number of VPCs that are associated with the specified configuration
* </p>
* </li>
* <li>
* <p>
* <code>CreationTime</code>: The date and time that Resolver returned when the configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>CreatorRequestId</code>: The value that was specified for <code>CreatorRequestId</code> when the
* configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>DestinationArn</code>: The location that logs are sent to
* </p>
* </li>
* <li>
* <p>
* <code>Id</code>: The ID of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>Name</code>: The name of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>OwnerId</code>: The Amazon Web Services account number of the account that created the configuration
* </p>
* </li>
* <li>
* <p>
* <code>ShareStatus</code>: Whether the configuration is shared with other Amazon Web Services accounts or shared
* with the current account by another Amazon Web Services account. Sharing is configured through Resource Access
* Manager (RAM).
* </p>
* </li>
* <li>
* <p>
* <code>Status</code>: The current status of the configuration. Valid values include the following:
* </p>
* <ul>
* <li>
* <p>
* <code>CREATING</code>: Resolver is creating the query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>CREATED</code>: The query logging configuration was successfully created. Resolver is logging queries that
* originate in the specified VPC.
* </p>
* </li>
* <li>
* <p>
* <code>DELETING</code>: Resolver is deleting this query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>FAILED</code>: Resolver either couldn't create or couldn't delete the query logging configuration. Here are
* two common causes:
* </p>
* <ul>
* <li>
* <p>
* The specified destination (for example, an Amazon S3 bucket) was deleted.
* </p>
* </li>
* <li>
* <p>
* Permissions don't allow sending logs to the destination.
* </p>
* </li>
* </ul>
* </li>
* </ul>
* </li>
* </ul>
*
* @param sortBy
* The element that you want Resolver to sort query logging configurations by. </p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortBy</code>, if any, as in the
* previous request.
* </p>
* </note>
* <p>
* Valid values include the following elements:
* </p>
* <ul>
* <li>
* <p>
* <code>Arn</code>: The ARN of the query logging configuration
* </p>
* </li>
* <li>
* <p>
* <code>AssociationCount</code>: The number of VPCs that are associated with the specified configuration
* </p>
* </li>
* <li>
* <p>
* <code>CreationTime</code>: The date and time that Resolver returned when the configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>CreatorRequestId</code>: The value that was specified for <code>CreatorRequestId</code> when the
* configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>DestinationArn</code>: The location that logs are sent to
* </p>
* </li>
* <li>
* <p>
* <code>Id</code>: The ID of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>Name</code>: The name of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>OwnerId</code>: The Amazon Web Services account number of the account that created the configuration
* </p>
* </li>
* <li>
* <p>
* <code>ShareStatus</code>: Whether the configuration is shared with other Amazon Web Services accounts or
* shared with the current account by another Amazon Web Services account. Sharing is configured through
* Resource Access Manager (RAM).
* </p>
* </li>
* <li>
* <p>
* <code>Status</code>: The current status of the configuration. Valid values include the following:
* </p>
* <ul>
* <li>
* <p>
* <code>CREATING</code>: Resolver is creating the query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>CREATED</code>: The query logging configuration was successfully created. Resolver is logging
* queries that originate in the specified VPC.
* </p>
* </li>
* <li>
* <p>
* <code>DELETING</code>: Resolver is deleting this query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>FAILED</code>: Resolver either couldn't create or couldn't delete the query logging configuration.
* Here are two common causes:
* </p>
* <ul>
* <li>
* <p>
* The specified destination (for example, an Amazon S3 bucket) was deleted.
* </p>
* </li>
* <li>
* <p>
* Permissions don't allow sending logs to the destination.
* </p>
* </li>
* </ul>
* </li>
* </ul>
* </li>
*/
public void setSortBy(String sortBy) {
this.sortBy = sortBy;
}
/**
* <p>
* The element that you want Resolver to sort query logging configurations by.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortBy</code>, if any, as in the previous
* request.
* </p>
* </note>
* <p>
* Valid values include the following elements:
* </p>
* <ul>
* <li>
* <p>
* <code>Arn</code>: The ARN of the query logging configuration
* </p>
* </li>
* <li>
* <p>
* <code>AssociationCount</code>: The number of VPCs that are associated with the specified configuration
* </p>
* </li>
* <li>
* <p>
* <code>CreationTime</code>: The date and time that Resolver returned when the configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>CreatorRequestId</code>: The value that was specified for <code>CreatorRequestId</code> when the
* configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>DestinationArn</code>: The location that logs are sent to
* </p>
* </li>
* <li>
* <p>
* <code>Id</code>: The ID of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>Name</code>: The name of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>OwnerId</code>: The Amazon Web Services account number of the account that created the configuration
* </p>
* </li>
* <li>
* <p>
* <code>ShareStatus</code>: Whether the configuration is shared with other Amazon Web Services accounts or shared
* with the current account by another Amazon Web Services account. Sharing is configured through Resource Access
* Manager (RAM).
* </p>
* </li>
* <li>
* <p>
* <code>Status</code>: The current status of the configuration. Valid values include the following:
* </p>
* <ul>
* <li>
* <p>
* <code>CREATING</code>: Resolver is creating the query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>CREATED</code>: The query logging configuration was successfully created. Resolver is logging queries that
* originate in the specified VPC.
* </p>
* </li>
* <li>
* <p>
* <code>DELETING</code>: Resolver is deleting this query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>FAILED</code>: Resolver either couldn't create or couldn't delete the query logging configuration. Here are
* two common causes:
* </p>
* <ul>
* <li>
* <p>
* The specified destination (for example, an Amazon S3 bucket) was deleted.
* </p>
* </li>
* <li>
* <p>
* Permissions don't allow sending logs to the destination.
* </p>
* </li>
* </ul>
* </li>
* </ul>
* </li>
* </ul>
*
* @return The element that you want Resolver to sort query logging configurations by. </p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortBy</code>, if any, as in the
* previous request.
* </p>
* </note>
* <p>
* Valid values include the following elements:
* </p>
* <ul>
* <li>
* <p>
* <code>Arn</code>: The ARN of the query logging configuration
* </p>
* </li>
* <li>
* <p>
* <code>AssociationCount</code>: The number of VPCs that are associated with the specified configuration
* </p>
* </li>
* <li>
* <p>
* <code>CreationTime</code>: The date and time that Resolver returned when the configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>CreatorRequestId</code>: The value that was specified for <code>CreatorRequestId</code> when the
* configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>DestinationArn</code>: The location that logs are sent to
* </p>
* </li>
* <li>
* <p>
* <code>Id</code>: The ID of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>Name</code>: The name of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>OwnerId</code>: The Amazon Web Services account number of the account that created the
* configuration
* </p>
* </li>
* <li>
* <p>
* <code>ShareStatus</code>: Whether the configuration is shared with other Amazon Web Services accounts or
* shared with the current account by another Amazon Web Services account. Sharing is configured through
* Resource Access Manager (RAM).
* </p>
* </li>
* <li>
* <p>
* <code>Status</code>: The current status of the configuration. Valid values include the following:
* </p>
* <ul>
* <li>
* <p>
* <code>CREATING</code>: Resolver is creating the query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>CREATED</code>: The query logging configuration was successfully created. Resolver is logging
* queries that originate in the specified VPC.
* </p>
* </li>
* <li>
* <p>
* <code>DELETING</code>: Resolver is deleting this query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>FAILED</code>: Resolver either couldn't create or couldn't delete the query logging configuration.
* Here are two common causes:
* </p>
* <ul>
* <li>
* <p>
* The specified destination (for example, an Amazon S3 bucket) was deleted.
* </p>
* </li>
* <li>
* <p>
* Permissions don't allow sending logs to the destination.
* </p>
* </li>
* </ul>
* </li>
* </ul>
* </li>
*/
public String getSortBy() {
return this.sortBy;
}
/**
* <p>
* The element that you want Resolver to sort query logging configurations by.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortBy</code>, if any, as in the previous
* request.
* </p>
* </note>
* <p>
* Valid values include the following elements:
* </p>
* <ul>
* <li>
* <p>
* <code>Arn</code>: The ARN of the query logging configuration
* </p>
* </li>
* <li>
* <p>
* <code>AssociationCount</code>: The number of VPCs that are associated with the specified configuration
* </p>
* </li>
* <li>
* <p>
* <code>CreationTime</code>: The date and time that Resolver returned when the configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>CreatorRequestId</code>: The value that was specified for <code>CreatorRequestId</code> when the
* configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>DestinationArn</code>: The location that logs are sent to
* </p>
* </li>
* <li>
* <p>
* <code>Id</code>: The ID of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>Name</code>: The name of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>OwnerId</code>: The Amazon Web Services account number of the account that created the configuration
* </p>
* </li>
* <li>
* <p>
* <code>ShareStatus</code>: Whether the configuration is shared with other Amazon Web Services accounts or shared
* with the current account by another Amazon Web Services account. Sharing is configured through Resource Access
* Manager (RAM).
* </p>
* </li>
* <li>
* <p>
* <code>Status</code>: The current status of the configuration. Valid values include the following:
* </p>
* <ul>
* <li>
* <p>
* <code>CREATING</code>: Resolver is creating the query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>CREATED</code>: The query logging configuration was successfully created. Resolver is logging queries that
* originate in the specified VPC.
* </p>
* </li>
* <li>
* <p>
* <code>DELETING</code>: Resolver is deleting this query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>FAILED</code>: Resolver either couldn't create or couldn't delete the query logging configuration. Here are
* two common causes:
* </p>
* <ul>
* <li>
* <p>
* The specified destination (for example, an Amazon S3 bucket) was deleted.
* </p>
* </li>
* <li>
* <p>
* Permissions don't allow sending logs to the destination.
* </p>
* </li>
* </ul>
* </li>
* </ul>
* </li>
* </ul>
*
* @param sortBy
* The element that you want Resolver to sort query logging configurations by. </p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortBy</code>, if any, as in the
* previous request.
* </p>
* </note>
* <p>
* Valid values include the following elements:
* </p>
* <ul>
* <li>
* <p>
* <code>Arn</code>: The ARN of the query logging configuration
* </p>
* </li>
* <li>
* <p>
* <code>AssociationCount</code>: The number of VPCs that are associated with the specified configuration
* </p>
* </li>
* <li>
* <p>
* <code>CreationTime</code>: The date and time that Resolver returned when the configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>CreatorRequestId</code>: The value that was specified for <code>CreatorRequestId</code> when the
* configuration was created
* </p>
* </li>
* <li>
* <p>
* <code>DestinationArn</code>: The location that logs are sent to
* </p>
* </li>
* <li>
* <p>
* <code>Id</code>: The ID of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>Name</code>: The name of the configuration
* </p>
* </li>
* <li>
* <p>
* <code>OwnerId</code>: The Amazon Web Services account number of the account that created the configuration
* </p>
* </li>
* <li>
* <p>
* <code>ShareStatus</code>: Whether the configuration is shared with other Amazon Web Services accounts or
* shared with the current account by another Amazon Web Services account. Sharing is configured through
* Resource Access Manager (RAM).
* </p>
* </li>
* <li>
* <p>
* <code>Status</code>: The current status of the configuration. Valid values include the following:
* </p>
* <ul>
* <li>
* <p>
* <code>CREATING</code>: Resolver is creating the query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>CREATED</code>: The query logging configuration was successfully created. Resolver is logging
* queries that originate in the specified VPC.
* </p>
* </li>
* <li>
* <p>
* <code>DELETING</code>: Resolver is deleting this query logging configuration.
* </p>
* </li>
* <li>
* <p>
* <code>FAILED</code>: Resolver either couldn't create or couldn't delete the query logging configuration.
* Here are two common causes:
* </p>
* <ul>
* <li>
* <p>
* The specified destination (for example, an Amazon S3 bucket) was deleted.
* </p>
* </li>
* <li>
* <p>
* Permissions don't allow sending logs to the destination.
* </p>
* </li>
* </ul>
* </li>
* </ul>
* </li>
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListResolverQueryLogConfigsRequest withSortBy(String sortBy) {
setSortBy(sortBy);
return this;
}
/**
* <p>
* If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to be
* listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in the
* previous request.
* </p>
* </note>
*
* @param sortOrder
* If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to
* be listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.</p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in
* the previous request.
* </p>
* @see SortOrder
*/
public void setSortOrder(String sortOrder) {
this.sortOrder = sortOrder;
}
/**
* <p>
* If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to be
* listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in the
* previous request.
* </p>
* </note>
*
* @return If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to
* be listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.</p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in
* the previous request.
* </p>
* @see SortOrder
*/
public String getSortOrder() {
return this.sortOrder;
}
/**
* <p>
* If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to be
* listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in the
* previous request.
* </p>
* </note>
*
* @param sortOrder
* If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to
* be listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.</p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in
* the previous request.
* </p>
* @return Returns a reference to this object so that method calls can be chained together.
* @see SortOrder
*/
public ListResolverQueryLogConfigsRequest withSortOrder(String sortOrder) {
setSortOrder(sortOrder);
return this;
}
/**
* <p>
* If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to be
* listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.
* </p>
* <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in the
* previous request.
* </p>
* </note>
*
* @param sortOrder
* If you specified a value for <code>SortBy</code>, the order that you want query logging configurations to
* be listed in, <code>ASCENDING</code> or <code>DESCENDING</code>.</p> <note>
* <p>
* If you submit a second or subsequent <code>ListResolverQueryLogConfigs</code> request and specify the
* <code>NextToken</code> parameter, you must use the same value for <code>SortOrder</code>, if any, as in
* the previous request.
* </p>
* @return Returns a reference to this object so that method calls can be chained together.
* @see SortOrder
*/
public ListResolverQueryLogConfigsRequest withSortOrder(SortOrder sortOrder) {
this.sortOrder = sortOrder.toString();
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getMaxResults() != null)
sb.append("MaxResults: ").append(getMaxResults()).append(",");
if (getNextToken() != null)
sb.append("NextToken: ").append(getNextToken()).append(",");
if (getFilters() != null)
sb.append("Filters: ").append(getFilters()).append(",");
if (getSortBy() != null)
sb.append("SortBy: ").append(getSortBy()).append(",");
if (getSortOrder() != null)
sb.append("SortOrder: ").append(getSortOrder());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ListResolverQueryLogConfigsRequest == false)
return false;
ListResolverQueryLogConfigsRequest other = (ListResolverQueryLogConfigsRequest) obj;
if (other.getMaxResults() == null ^ this.getMaxResults() == null)
return false;
if (other.getMaxResults() != null && other.getMaxResults().equals(this.getMaxResults()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false)
return false;
if (other.getFilters() == null ^ this.getFilters() == null)
return false;
if (other.getFilters() != null && other.getFilters().equals(this.getFilters()) == false)
return false;
if (other.getSortBy() == null ^ this.getSortBy() == null)
return false;
if (other.getSortBy() != null && other.getSortBy().equals(this.getSortBy()) == false)
return false;
if (other.getSortOrder() == null ^ this.getSortOrder() == null)
return false;
if (other.getSortOrder() != null && other.getSortOrder().equals(this.getSortOrder()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getMaxResults() == null) ? 0 : getMaxResults().hashCode());
hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode());
hashCode = prime * hashCode + ((getFilters() == null) ? 0 : getFilters().hashCode());
hashCode = prime * hashCode + ((getSortBy() == null) ? 0 : getSortBy().hashCode());
hashCode = prime * hashCode + ((getSortOrder() == null) ? 0 : getSortOrder().hashCode());
return hashCode;
}
@Override
public ListResolverQueryLogConfigsRequest clone() {
return (ListResolverQueryLogConfigsRequest) super.clone();
}
}
| 19,456 |
450 | /*-------------------------------------------------------------------------
*
* hashsearch.c
* search code for postgres hash tables
*
* Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.45 2006/07/14 14:52:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/hash.h"
#include "access/relscan.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "utils/rel.h"
/*
* _hash_next() -- Get the next item in a scan.
*
* On entry, we have a valid currentItemData in the scan, and a
* pin and read lock on the page that contains that item.
* We find the next item in the scan, if any.
* On success exit, we have the page containing the next item
* pinned and locked.
*/
bool
_hash_next(IndexScanDesc scan, ScanDirection dir)
{
Relation rel = scan->indexRelation;
HashScanOpaque so = (HashScanOpaque) scan->opaque;
Buffer buf;
Page page;
OffsetNumber offnum;
ItemPointer current;
IndexTuple itup;
/* we still have the buffer pinned and read-locked */
buf = so->hashso_curbuf;
Assert(BufferIsValid(buf));
/*
* step to next valid tuple.
*/
if (!_hash_step(scan, &buf, dir))
return false;
/* if we're here, _hash_step found a valid tuple */
current = &(scan->currentItemData);
offnum = ItemPointerGetOffsetNumber(current);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
scan->xs_ctup.t_self = itup->t_tid;
return true;
}
/*
* Advance to next page in a bucket, if any.
*/
static void
_hash_readnext(Relation rel,
Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
{
BlockNumber blkno;
MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD;
blkno = (*opaquep)->hasho_nextblkno;
_hash_relbuf(rel, *bufp);
*bufp = InvalidBuffer;
/* check for interrupts while we're not holding any buffer lock */
CHECK_FOR_INTERRUPTS();
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
_hash_checkpage(rel, *bufp, LH_OVERFLOW_PAGE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
}
}
/*
* Advance to previous page in a bucket, if any.
*/
static void
_hash_readprev(Relation rel,
Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
{
BlockNumber blkno;
MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD;
blkno = (*opaquep)->hasho_prevblkno;
_hash_relbuf(rel, *bufp);
*bufp = InvalidBuffer;
/* check for interrupts while we're not holding any buffer lock */
CHECK_FOR_INTERRUPTS();
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
_hash_checkpage(rel, *bufp, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
}
}
/*
* _hash_first() -- Find the first item in a scan.
*
* Find the first item in the index that
* satisfies the qualification associated with the scan descriptor. On
* success, the page containing the current index tuple is read locked
* and pinned, and the scan's opaque data entry is updated to
* include the buffer.
*/
bool
_hash_first(IndexScanDesc scan, ScanDirection dir)
{
Relation rel = scan->indexRelation;
HashScanOpaque so = (HashScanOpaque) scan->opaque;
uint32 hashkey;
Bucket bucket;
BlockNumber blkno;
Buffer buf;
Buffer metabuf;
Page page;
HashPageOpaque opaque;
HashMetaPage metap;
IndexTuple itup;
ItemPointer current;
OffsetNumber offnum;
MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD;
pgstat_count_index_scan(rel);
current = &(scan->currentItemData);
ItemPointerSetInvalid(current);
/*
* We do not support hash scans with no index qualification, because we
* would have to read the whole index rather than just one bucket. That
* creates a whole raft of problems, since we haven't got a practical way
* to lock all the buckets against splits or compactions.
*/
if (scan->numberOfKeys < 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("hash indexes do not support whole-index scans")));
/*
* If the constant in the index qual is NULL, assume it cannot match any
* items in the index.
*/
if (scan->keyData[0].sk_flags & SK_ISNULL)
return false;
/*
* Okay to compute the hash key. We want to do this before acquiring any
* locks, in case a user-defined hash function happens to be slow.
*/
hashkey = _hash_datum2hashkey(rel, scan->keyData[0].sk_argument);
/*
* Acquire shared split lock so we can compute the target bucket safely
* (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
/* Read the metapage */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
/*
* Compute the target bucket number, and convert to block number.
*/
bucket = _hash_hashkey2bucket(hashkey,
metap->hashm_maxbucket,
metap->hashm_highmask,
metap->hashm_lowmask);
blkno = BUCKET_TO_BLKNO(metap, bucket);
/* done with the metapage */
_hash_relbuf(rel, metabuf);
/*
* Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
_hash_droplock(rel, 0, HASH_SHARE);
/* Update scan opaque state to show we have lock on the bucket */
so->hashso_bucket = bucket;
so->hashso_bucket_valid = true;
so->hashso_bucket_blkno = blkno;
/* Fetch the primary bucket page for the bucket */
buf = _hash_getbuf(rel, blkno, HASH_READ);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
/* If a backwards scan is requested, move to the end of the chain */
if (ScanDirectionIsBackward(dir))
{
while (BlockNumberIsValid(opaque->hasho_nextblkno))
_hash_readnext(rel, &buf, &page, &opaque);
}
/* Now find the first tuple satisfying the qualification */
if (!_hash_step(scan, &buf, dir))
{
return false;
}
/* if we're here, _hash_step found a valid tuple */
offnum = ItemPointerGetOffsetNumber(current);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
scan->xs_ctup.t_self = itup->t_tid;
return true;
}
/*
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
* false. Else, return true and set the CurrentItemData for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.
* On success exit, we have pin and read-lock on whichever page
* contains the right item; on failure, we have released all buffers.
*/
bool
_hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
Relation rel = scan->indexRelation;
HashScanOpaque so = (HashScanOpaque) scan->opaque;
ItemPointer current;
Buffer buf;
Page page;
HashPageOpaque opaque;
OffsetNumber maxoff;
OffsetNumber offnum;
BlockNumber blkno;
IndexTuple itup;
current = &(scan->currentItemData);
buf = *bufP;
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
/*
* If _hash_step is called from _hash_first, current will not be valid, so
* we can't dereference it. However, in that case, we presumably want to
* start at the beginning/end of the page...
*/
maxoff = PageGetMaxOffsetNumber(page);
if (ItemPointerIsValid(current))
offnum = ItemPointerGetOffsetNumber(current);
else
offnum = InvalidOffsetNumber;
/*
* 'offnum' now points to the last tuple we examined (if any).
*
* continue to step through tuples until: 1) we get to the end of the
* bucket chain or 2) we find a valid tuple.
*/
do
{
switch (dir)
{
case ForwardScanDirection:
if (offnum != InvalidOffsetNumber)
offnum = OffsetNumberNext(offnum); /* move forward */
else
offnum = FirstOffsetNumber; /* new page */
while (offnum > maxoff)
{
/*
* either this page is empty (maxoff ==
* InvalidOffsetNumber) or we ran off the end.
*/
_hash_readnext(rel, &buf, &page, &opaque);
if (BufferIsValid(buf))
{
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
}
else
{
/* end of bucket */
maxoff = offnum = InvalidOffsetNumber;
break; /* exit while */
}
}
break;
case BackwardScanDirection:
if (offnum != InvalidOffsetNumber)
offnum = OffsetNumberPrev(offnum); /* move back */
else
offnum = maxoff; /* new page */
while (offnum < FirstOffsetNumber)
{
/*
* either this page is empty (offnum ==
* InvalidOffsetNumber) or we ran off the end.
*/
_hash_readprev(rel, &buf, &page, &opaque);
if (BufferIsValid(buf))
maxoff = offnum = PageGetMaxOffsetNumber(page);
else
{
/* end of bucket */
maxoff = offnum = InvalidOffsetNumber;
break; /* exit while */
}
}
break;
default:
/* NoMovementScanDirection */
/* this should not be reached */
break;
}
/* we ran off the end of the world without finding a match */
if (offnum == InvalidOffsetNumber)
{
/* we ran off the end of the bucket without finding a match */
*bufP = so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(current);
return false;
}
/* get ready to check this tuple */
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
} while (!_hash_checkqual(scan, itup));
/* if we made it to here, we've found a valid tuple */
blkno = BufferGetBlockNumber(buf);
*bufP = so->hashso_curbuf = buf;
ItemPointerSet(current, blkno, offnum);
return true;
}
| 3,787 |
315 | <filename>src/Tools/Code/LDPC/Update_rule/MS/Update_rule_MS.hxx
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <algorithm>
#include <limits>
#include <cmath>
#include "Tools/Code/LDPC/Update_rule/MS/Update_rule_MS.hpp"
namespace aff3ct
{
namespace tools
{
template <typename R>
Update_rule_MS<R>
::Update_rule_MS()
: name("MS"), sign(0), min1(std::numeric_limits<R>::max()), min2(min1), cst1(0), cst2(0), n_ite(0), ite(0)
{
}
template <typename R>
inline std::string Update_rule_MS<R>
::get_name() const
{
return this->name;
}
template <typename R>
inline void Update_rule_MS<R>
::begin_decoding(const int n_ite)
{
this->n_ite = n_ite;
}
template <typename R>
inline void Update_rule_MS<R>
::begin_ite(const int ite)
{
this->ite = ite;
}
template <typename R>
inline void Update_rule_MS<R>
::begin_chk_node_in(const int chk_id, const int chk_degree)
{
this->sign = 0;
this->min1 = std::numeric_limits<R>::max();
this->min2 = std::numeric_limits<R>::max();
}
template <typename R>
inline void Update_rule_MS<R>
::compute_chk_node_in(const int var_id, const R var_val)
{
const auto var_abs = (R)std::abs(var_val);
const auto var_sign = std::signbit((float)var_val) ? -1 : 0;
this->sign ^= var_sign;
this->min2 = std::min(this->min2, std::max(var_abs, this->min1));
this->min1 = std::min(this->min1, var_abs );
}
template <typename R>
inline void Update_rule_MS<R>
::end_chk_node_in()
{
this->cst1 = std::max((R)0, this->min2);
this->cst2 = std::max((R)0, this->min1);
}
template <typename R>
inline void Update_rule_MS<R>
::begin_chk_node_out(const int chk_id, const int chk_degree)
{
}
template <typename R>
inline R Update_rule_MS<R>
::compute_chk_node_out(const int var_id, const R var_val)
{
const auto var_abs = (R)std::abs(var_val);
const auto res_abs = ((var_abs == this->min1) ? this->cst1 : this->cst2);
const auto res_sng = this->sign ^ (std::signbit((float)var_val) ? -1 : 0);
return (R)std::copysign(res_abs, res_sng);
}
template <typename R>
inline void Update_rule_MS<R>
::end_chk_node_out()
{
}
template <typename R>
inline void Update_rule_MS<R>
::end_ite()
{
}
template <typename R>
inline void Update_rule_MS<R>
::end_decoding()
{
}
}
}
| 982 |
305 | <filename>libc/test/src/string/strtok_test.cpp
//===-- Unittests for strtok ---------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "src/string/strtok.h"
#include "utils/UnitTest/Test.h"
TEST(StrTokTest, NoTokenFound) {
char empty[] = "";
ASSERT_STREQ(__llvm_libc::strtok(empty, ""), nullptr);
ASSERT_STREQ(__llvm_libc::strtok(empty, "_"), nullptr);
char single[] = "_";
ASSERT_STREQ(__llvm_libc::strtok(single, ""), "_");
char multiple[] = "1,2";
ASSERT_STREQ(__llvm_libc::strtok(multiple, ":"), "1,2");
}
TEST(StrTokTest, DelimiterAsFirstCharacterShouldBeIgnored) {
char src[] = ".123";
ASSERT_STREQ(__llvm_libc::strtok(src, "."), "123");
}
TEST(StrTokTest, DelimiterIsMiddleCharacter) {
char src[] = "12,34";
ASSERT_STREQ(__llvm_libc::strtok(src, ","), "12");
}
TEST(StrTokTest, DelimiterAsLastCharacterShouldBeIgnored) {
char src[] = "1234:";
ASSERT_STREQ(__llvm_libc::strtok(src, ":"), "1234");
}
TEST(StrTokTest, MultipleDelimiters) {
char src[] = "12,.34";
ASSERT_STREQ(__llvm_libc::strtok(src, "."), "12,");
ASSERT_STREQ(__llvm_libc::strtok(src, ".,"), "12");
ASSERT_STREQ(__llvm_libc::strtok(src, ",."), "12");
ASSERT_STREQ(__llvm_libc::strtok(src, ":,."), "12");
}
TEST(StrTokTest, ShouldNotGoPastNullTerminator) {
char src[] = {'1', '2', '\0', ',', '3'};
ASSERT_STREQ(__llvm_libc::strtok(src, ","), "12");
}
TEST(StrTokTest, SubsequentCallsShouldFindFollowingDelimiters) {
char src[] = "12,34.56";
char *token = __llvm_libc::strtok(src, ",.");
ASSERT_STREQ(token, "12");
token = __llvm_libc::strtok(nullptr, ",.");
ASSERT_STREQ(token, "34");
token = __llvm_libc::strtok(nullptr, ",.");
ASSERT_STREQ(token, "56");
token = __llvm_libc::strtok(nullptr, "_:,_");
ASSERT_STREQ(token, nullptr);
// Subsequent calls after hitting the end of the string should also return
// nullptr.
token = __llvm_libc::strtok(nullptr, "_:,_");
ASSERT_STREQ(token, nullptr);
}
TEST(StrTokTest, DelimitersShouldNotBeIncludedInToken) {
char src[] = "__ab__:_cd__:__ef__:__";
char *token = __llvm_libc::strtok(src, "_:");
ASSERT_STREQ(token, "ab");
token = __llvm_libc::strtok(nullptr, ":_");
ASSERT_STREQ(token, "cd");
token = __llvm_libc::strtok(nullptr, "_:,");
ASSERT_STREQ(token, "ef");
token = __llvm_libc::strtok(nullptr, "_:,_");
ASSERT_STREQ(token, nullptr);
}
| 1,111 |
335 | {
"word": "Matte",
"definitions": [
"An impure product of the smelting of sulphide ores, especially those of copper or nickel."
],
"parts-of-speech": "Noun"
} | 74 |
365 | # -*- coding: utf-8 -*-
import logging
import subprocess
import os
CLIENT_DEPLOY_ENTRY = os.getenv("CLIENT_DEPLOY_ENTRY")
ENV_PYTHON = os.getenv("ENV_PYTHON")
logger = logging.getLogger('quota')
def get_quota_summary(cluster_name):
res = []
try:
cmd_user = [ENV_PYTHON, CLIENT_DEPLOY_ENTRY, 'shell', 'hdfs', cluster_name, 'dfs', '-quota', '/user/*']
cmd_hbase = [ENV_PYTHON, CLIENT_DEPLOY_ENTRY, 'shell', 'hdfs', cluster_name, 'dfs', '-quota', '/hbase']
for cmd in [cmd_user, cmd_hbase]:
content = subprocess.check_output(cmd)
for line in content.strip().split('\n'):
dir_info = {}
(dir_info['quota'], dir_info['used_quota'],
dir_info['remaining_quota'], dir_info['space_quota'],
dir_info['used_space_quota'], dir_info['remaining_space_quota'],
dir_info['name']) = line.split()
# discard prefix '/user/', only keep with user name
if len(dir_info['name']) > 7:
dir_info['name'] = dir_info['name'][6:]
else:
dir_info['name'] = 'hbase'
res.append(dir_info)
except Exception, e:
if repr(e).find("No such file") == -1:
return ""
raise e
return res
| 549 |
458 | {
"class": "AcceptEventAction",
"entity": "_earthquake_ether_inspector",
"option": {
"event_uuid": "69672de4-c4ef-4d47-8e17-562a81cf4def"
},
"type": "action",
"uuid": "dcef3c78-27f0-40fb-abd2-4d6ee93a103c"
} | 114 |
348 | {"nom":"Moncourt","circ":"4ème circonscription","dpt":"Moselle","inscrits":56,"abs":20,"votants":36,"blancs":2,"nuls":1,"exp":33,"res":[{"nuance":"LR","nom":"<NAME>","voix":20},{"nuance":"REM","nom":"<NAME>","voix":13}]} | 89 |
1,442 | <filename>escher/src/tab_view_data_source.cpp
#include <escher/tab_view_data_source.h>
namespace Escher {
TabViewDataSource::TabViewDataSource() :
m_activeChildIndex(-1),
m_selectedChildIndex(-1)
{
}
int TabViewDataSource::activeTab() const {
return m_activeChildIndex;
}
int TabViewDataSource::selectedTab() const {
return m_selectedChildIndex;
}
void TabViewDataSource::setActiveTab(int i) {
m_activeChildIndex = i;
}
void TabViewDataSource::setSelectedTab(int i) {
m_selectedChildIndex = i;
}
}
| 186 |
369 | /*
* Copyright © 2019 <NAME>, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package io.cdap.cdap.metadata.elastic;
import io.cdap.cdap.api.data.schema.Schema;
import io.cdap.cdap.api.metadata.MetadataEntity;
import org.junit.Assert;
import org.junit.Test;
public class MetadataDocumentTest {
@Test
public void testSchema() {
Schema bytesArraySchema = Schema.arrayOf(Schema.of(Schema.Type.BYTES));
Schema stringArraySchema = Schema.arrayOf(Schema.of(Schema.Type.STRING));
Schema booleanBytesMapSchema = Schema.mapOf(Schema.of(Schema.Type.BOOLEAN), Schema.of(Schema.Type.BYTES));
Schema nestedMapSchema = Schema.mapOf(bytesArraySchema, booleanBytesMapSchema);
Schema record22Schema = Schema.recordOf("record22", Schema.Field.of("a", nestedMapSchema));
Schema record22ArraySchema = Schema.arrayOf(record22Schema);
Schema bytesDoubleMapSchema = Schema.mapOf(Schema.of(Schema.Type.BYTES), Schema.of(Schema.Type.DOUBLE));
Schema record21Schema = Schema.recordOf("record21",
Schema.Field.of("x", Schema.of(Schema.Type.STRING)),
Schema.Field.of("y", stringArraySchema),
Schema.Field.of("z", bytesDoubleMapSchema));
Schema record21to22MapSchema = Schema.mapOf(record21Schema, record22ArraySchema);
Schema nullableIntSchema = Schema.nullableOf(Schema.of(Schema.Type.INT));
Schema tripeUnionSchema = Schema.unionOf(Schema.of(Schema.Type.INT), Schema.of(Schema.Type.LONG),
Schema.of(Schema.Type.NULL));
Schema complexSchema = Schema.recordOf("record1",
Schema.Field.of("map1", record21to22MapSchema),
Schema.Field.of("i", nullableIntSchema),
Schema.Field.of("j", tripeUnionSchema));
Schema anotherComplexSchema = Schema.arrayOf(Schema.of(Schema.Type.STRING));
Schema superComplexSchema = Schema.unionOf(complexSchema, anotherComplexSchema, Schema.of(Schema.Type.NULL));
String[] results = MetadataDocument.Builder.parseSchema(MetadataEntity.ofDataset("ds"),
superComplexSchema.toString()).split(" ");
String[] expected = {
"record1", "record1:RECORD",
"map1", "map1:MAP",
"record21", "record21:RECORD",
"x", "x:STRING",
"y", "y:ARRAY",
"z", "z:MAP",
"record22", "record22:RECORD",
"a", "a:MAP",
"i", "i:INT",
"j", "j:UNION",
};
Assert.assertArrayEquals(expected, results);
}
@Test
public void testInvalidSchema() {
Schema schema = Schema.recordOf("mystruct",
Schema.Field.of("x", Schema.of(Schema.Type.STRING)),
Schema.Field.of("y", Schema.of(Schema.Type.INT)),
Schema.Field.of("z", Schema.of(Schema.Type.DOUBLE)));
String schemaString = schema.toString();
MetadataEntity entity = MetadataEntity.ofDataset("ds");
String[] results = MetadataDocument.Builder.parseSchema(entity, schemaString).split(" ");
String[] expected = {"mystruct", "mystruct:RECORD", "x", "x:STRING", "y", "y:INT", "z", "z:DOUBLE"};
Assert.assertArrayEquals(expected, results);
String schemaWithInvalidTypes = schemaString.replace("string", "nosuchtype");
Assert.assertEquals(schemaWithInvalidTypes, MetadataDocument.Builder.parseSchema(entity, schemaWithInvalidTypes));
String truncatedSchema = schemaString.substring(10, schemaString.length() - 10);
Assert.assertEquals(truncatedSchema, MetadataDocument.Builder.parseSchema(entity, truncatedSchema));
}
}
| 1,865 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.