max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,135 | /**
* example code for trends resources
*/
package twitter4j.examples.trends; | 23 |
880 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from textwrap import dedent
import libcst as cst
from libcst.testing.utils import data_provider, UnitTest
class DuplicateLeafNodeTest(UnitTest):
@data_provider(
(
# Simple program
(
"""
foo = 'toplevel'
fn1(foo)
fn2(foo)
def fn_def():
foo = 'shadow'
fn3(foo)
""",
),
)
)
def test_tokenize(self, code: str) -> None:
test_case = self
class CountVisitor(cst.CSTVisitor):
def __init__(self) -> None:
self.count = Counter()
self.nodes = {}
def on_visit(self, node: cst.CSTNode) -> bool:
self.count[id(node)] += 1
test_case.assertTrue(
self.count[id(node)] == 1,
f"Node duplication detected between {node} and {self.nodes.get(id(node))}",
)
self.nodes[id(node)] = node
return True
module = cst.parse_module(dedent(code))
module.visit(CountVisitor())
| 713 |
1,695 | <gh_stars>1000+
///////////////////////////////////////////////////////////////////////////////////
/// OpenGL Mathematics (glm.g-truc.net)
///
/// Copyright (c) 2005 - 2015 G-Truc Creation (www.g-truc.net)
/// Permission is hereby granted, free of charge, to any person obtaining a copy
/// of this software and associated documentation files (the "Software"), to deal
/// in the Software without restriction, including without limitation the rights
/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
/// copies of the Software, and to permit persons to whom the Software is
/// furnished to do so, subject to the following conditions:
///
/// The above copyright notice and this permission notice shall be included in
/// all copies or substantial portions of the Software.
///
/// Restrictions:
/// By making use of the Software for military purposes, you choose to make
/// a Bunny unhappy.
///
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
/// THE SOFTWARE.
///
/// @ref gtc_type_ptr
/// @file glm/gtc/type_ptr.inl
/// @date 2011-06-15 / 2011-12-07
/// @author <NAME>
///////////////////////////////////////////////////////////////////////////////////
#include <cstring>
namespace glm
{
/// @addtogroup gtc_type_ptr
/// @{
/// Return the constant address to the data of the vector input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tvec2<T, P> const & vec
)
{
return &(vec.x);
}
//! Return the address to the data of the vector input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tvec2<T, P> & vec
)
{
return &(vec.x);
}
/// Return the constant address to the data of the vector input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tvec3<T, P> const & vec
)
{
return &(vec.x);
}
//! Return the address to the data of the vector input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tvec3<T, P> & vec
)
{
return &(vec.x);
}
/// Return the constant address to the data of the vector input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tvec4<T, P> const & vec
)
{
return &(vec.x);
}
//! Return the address to the data of the vector input.
//! From GLM_GTC_type_ptr extension.
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tvec4<T, P> & vec
)
{
return &(vec.x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat2x2<T, P> const & mat
)
{
return &(mat[0].x);
}
//! Return the address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tmat2x2<T, P> & mat
)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat3x3<T, P> const & mat
)
{
return &(mat[0].x);
}
//! Return the address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tmat3x3<T, P> & mat
)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat4x4<T, P> const & mat
)
{
return &(mat[0].x);
}
//! Return the address to the data of the matrix input.
//! From GLM_GTC_type_ptr extension.
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tmat4x4<T, P> & mat
)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat2x3<T, P> const & mat
)
{
return &(mat[0].x);
}
//! Return the address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tmat2x3<T, P> & mat
)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat3x2<T, P> const & mat
)
{
return &(mat[0].x);
}
//! Return the address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tmat3x2<T, P> & mat
)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat2x4<T, P> const & mat
)
{
return &(mat[0].x);
}
//! Return the address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tmat2x4<T, P> & mat
)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat4x2<T, P> const & mat
)
{
return &(mat[0].x);
}
//! Return the address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tmat4x2<T, P> & mat
)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat3x4<T, P> const & mat
)
{
return &(mat[0].x);
}
//! Return the address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tmat3x4<T, P> & mat
)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tmat4x3<T, P> const & mat
)
{
return &(mat[0].x);
}
/// Return the address to the data of the matrix input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr(tmat4x3<T, P> & mat)
{
return &(mat[0].x);
}
/// Return the constant address to the data of the input parameter.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T const * value_ptr
(
tquat<T, P> const & q
)
{
return &(q[0]);
}
/// Return the address to the data of the quaternion input.
/// @see gtc_type_ptr
template<typename T, precision P>
GLM_FUNC_QUALIFIER T * value_ptr
(
tquat<T, P> & q
)
{
return &(q[0]);
}
/// Build a vector from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tvec2<T, defaultp> make_vec2(T const * const ptr)
{
tvec2<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tvec2<T, defaultp>));
return Result;
}
/// Build a vector from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tvec3<T, defaultp> make_vec3(T const * const ptr)
{
tvec3<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tvec3<T, defaultp>));
return Result;
}
/// Build a vector from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tvec4<T, defaultp> make_vec4(T const * const ptr)
{
tvec4<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tvec4<T, defaultp>));
return Result;
}
/// Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat2x2<T, defaultp> make_mat2x2(T const * const ptr)
{
tmat2x2<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat2x2<T, defaultp>));
return Result;
}
/// Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat2x3<T, defaultp> make_mat2x3(T const * const ptr)
{
tmat2x3<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat2x3<T, defaultp>));
return Result;
}
/// Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat2x4<T, defaultp> make_mat2x4(T const * const ptr)
{
tmat2x4<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat2x4<T, defaultp>));
return Result;
}
/// Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat3x2<T, defaultp> make_mat3x2(T const * const ptr)
{
tmat3x2<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat3x2<T, defaultp>));
return Result;
}
//! Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat3x3<T, defaultp> make_mat3x3(T const * const ptr)
{
tmat3x3<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat3x3<T, defaultp>));
return Result;
}
//! Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat3x4<T, defaultp> make_mat3x4(T const * const ptr)
{
tmat3x4<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat3x4<T, defaultp>));
return Result;
}
//! Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat4x2<T, defaultp> make_mat4x2(T const * const ptr)
{
tmat4x2<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat4x2<T, defaultp>));
return Result;
}
//! Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat4x3<T, defaultp> make_mat4x3(T const * const ptr)
{
tmat4x3<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat4x3<T, defaultp>));
return Result;
}
//! Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat4x4<T, defaultp> make_mat4x4(T const * const ptr)
{
tmat4x4<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tmat4x4<T, defaultp>));
return Result;
}
//! Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat2x2<T, defaultp> make_mat2(T const * const ptr)
{
return make_mat2x2(ptr);
}
//! Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat3x3<T, defaultp> make_mat3(T const * const ptr)
{
return make_mat3x3(ptr);
}
//! Build a matrix from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tmat4x4<T, defaultp> make_mat4(T const * const ptr)
{
return make_mat4x4(ptr);
}
//! Build a quaternion from a pointer.
/// @see gtc_type_ptr
template <typename T>
GLM_FUNC_QUALIFIER tquat<T, defaultp> make_quat(T const * const ptr)
{
tquat<T, defaultp> Result;
memcpy(value_ptr(Result), ptr, sizeof(tquat<T, defaultp>));
return Result;
}
/// @}
}//namespace glm
| 4,763 |
335 | {
"word": "Reek",
"definitions": [
"Smell strongly and unpleasantly; stink.",
"Be suggestive of something unpleasant or undesirable.",
"Give off smoke, steam, or fumes."
],
"parts-of-speech": "Verb"
} | 98 |
629 | /* Copyright (c) 2017, United States Government, as represented by the
* Administrator of the National Aeronautics and Space Administration.
*
* All rights reserved.
*
* The Astrobee platform is licensed under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include <temp_monitor/temp_monitor.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <ctime>
#include <cerrno>
#include <cstring>
// Grab an unsigned integer from user input with type and value checking
uint32_t InputUnsignedInteger(uint32_t min, uint32_t max) {
uint32_t choice;
while (true) {
std::cout << "Input choice > ";
std::string input;
getline(std::cin, input);
std::stringstream ss(input);
if (ss >> choice && choice >= min && choice <= max)
return choice;
std::cerr << "Number not in range [" << min << ":" << max << "], please try again" << std::endl;
}
std::cerr << "Got to an unreachable section of code" << std::endl;
return 0;
}
// Grab an unsigned integer from user input with type and value checking
uint16_t InputHexidecimalByte() {
uint16_t choice;
while (true) {
std::cout << "Input choice > ";
std::string input;
getline(std::cin, input);
std::stringstream ss(input);
ss << std::hex << input;
ss >> choice;
return choice;
}
std::cerr << "Got to an unreachable section of code" << std::endl;
return 0;
}
// Print a main menu
bool MainMenu(temp_monitor::TempMonitorPtr & sensor) {
// Print the menu
std::cout << "*******************************************************" << std::endl;
std::cout << "************************* MENU ************************" << std::endl;
std::cout << "*******************************************************" << std::endl;
std::cout << "0. Quit" << std::endl;
std::cout << "*******************************************************" << std::endl;
std::cout << "1. Connect to temperature sensor" << std::endl;
std::cout << "2. Query temperature" << std::endl;
std::cout << "*******************************************************" << std::endl;
// Keep looping until we have valid input
uint8_t choice = InputUnsignedInteger(0, 2);
// Do something based on the choice
switch (choice) {
case 0: {
std::cout << "Goodbye." << std::endl;
return false;
}
case 1: {
// Grab the i2c device number
std::cout << "Input i2c device number" << std::endl;
uint8_t dev_num = static_cast < uint8_t >(InputUnsignedInteger(0, 4));
// Grab the device address
std::cout << "Input num retries" << std::endl;
uint32_t retries = InputUnsignedInteger(0, 512);
// Grab the device address
std::cout << "Select slave" << std::endl;
std::cout << "- [0] Backplane (0x48)" << std::endl;
std::cout << "- [1] LLP (0x49)" << std::endl;
std::cout << "- [2] MLP (0x4A)" << std::endl;
std::cout << "- [3] HLP (0x4B)" << std::endl;
uint32_t address_id = InputUnsignedInteger(0, 3);
uint16_t address = 0x48;
std::string type = "ADT7410";
switch (address_id) {
default:
case 0: address = 0x48; type = "ADT7410"; break;
case 1: address = 0x49; type = "TCN75A"; break;
case 2: address = 0x4A; type = "TCN75A"; break;
case 3: address = 0x4B; type = "TCN75A"; break;
}
// Try and open the bus
i2c::Error err;
auto bus = i2c::Open("/dev/i2c-" + std::to_string(dev_num), &err);
if (!bus) {
std::cerr << "Could not open the i2c bus" << std::endl;
return true;
}
bus->SetRetries(retries);
// Try and contact the slave device
auto device = bus->DeviceAt(address);
if (!device) {
std::cerr << "Could not find the i2c slave address" << std::endl;
return true;
}
// Check if we already have this bus open
sensor = temp_monitor::TempMonitorFactory::Create(type, device);
if (!sensor) {
std::cerr << "Could not instantiate the device" << std::endl;
return true;
}
return true;
}
case 2: {
double temp;
if (sensor == nullptr)
std::cerr << "Please connect to a temperature sensor" << std::endl;
else if (sensor->GetTemperature(&temp))
std::cout << "Temperature: " << temp << " celcius" << std::endl;
else
std::cerr << "No response from temperature sensor" << std::endl;
return true;
}
default: {
std::cerr << "Invalid selection" << std::endl;
return true;
}
}
return true;
}
// Main entry point for application
int main(int argc, char *argv[]) {
std::cout << std::endl << "Astrobee temperature monitor host test" << std::endl << std::endl;
temp_monitor::TempMonitorPtr sensor = temp_monitor::TempMonitorPtr();
while (MainMenu(sensor)) {}
return 0;
}
| 1,979 |
513 | package com.zmops.zeus.iot.server.core.analysis.manual.trends;
/**
* @author nantian created at 2021/9/6 17:32
*/
public class UIntTrends {
}
| 58 |
4,772 | package example.service;
import example.repo.Customer468Repository;
import org.springframework.stereotype.Service;
@Service
public class Customer468Service {
public Customer468Service(Customer468Repository repo) {}
}
| 60 |
785 | /*
* Copyright © 2018 Apple Inc. and the ServiceTalk project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicetalk.http.router.jersey;
import io.servicetalk.http.router.jersey.Context.ConnectionContextReferencingFactory;
import io.servicetalk.http.router.jersey.Context.HttpRequestReferencingFactory;
import io.servicetalk.transport.api.ConnectionContext;
import org.glassfish.jersey.internal.inject.AbstractBinder;
import org.glassfish.jersey.process.internal.RequestScoped;
import javax.ws.rs.core.Feature;
import javax.ws.rs.core.FeatureContext;
import static io.servicetalk.http.router.jersey.Context.CONNECTION_CONTEXT_REF_GENERIC_TYPE;
import static io.servicetalk.http.router.jersey.Context.HTTP_REQUEST_GENERIC_TYPE;
import static io.servicetalk.http.router.jersey.Context.HTTP_REQUEST_REF_GENERIC_TYPE;
import static org.glassfish.jersey.internal.inject.ReferencingFactory.referenceFactory;
/**
* Feature enabling ServiceTalk request handling.
* This feature registers providers and binders needed to enable ServiceTalk as a handler for a Jersey application.
*/
public final class ServiceTalkFeature implements Feature {
@Override
public boolean configure(final FeatureContext context) {
if (!context.getConfiguration().isRegistered(EndpointEnhancingRequestFilter.class)) {
context.register(BufferMessageBodyReaderWriter.class);
context.register(BufferPublisherMessageBodyReaderWriter.class);
context.register(BufferSingleMessageBodyReaderWriter.class);
context.register(EndpointEnhancingRequestFilter.class);
context.register(new AbstractBinder() {
@Override
protected void configure() {
bindFactory(ConnectionContextReferencingFactory.class).to(ConnectionContext.class)
.proxy(true).proxyForSameScope(true).in(RequestScoped.class);
bindFactory(referenceFactory()).to(CONNECTION_CONTEXT_REF_GENERIC_TYPE).in(RequestScoped.class);
bindFactory(HttpRequestReferencingFactory.class).to(HTTP_REQUEST_GENERIC_TYPE)
.proxy(true).proxyForSameScope(false).in(RequestScoped.class);
bindFactory(referenceFactory()).to(HTTP_REQUEST_REF_GENERIC_TYPE).in(RequestScoped.class);
}
});
}
return true;
}
}
| 1,030 |
488 | <filename>src/midend/programAnalysis/annotationLanguageParser/report.h
#include <featureTests.h>
#ifdef ROSE_ENABLE_SOURCE_ANALYSIS
#ifndef BDWY_REPORT_H
#define BDWY_REPORT_H
typedef std::list< reportAnn * > report_list;
typedef report_list::iterator report_list_p;
typedef report_list::const_iterator report_list_cp;
/** @brief Report element base class
*
* */
class reportElementAnn : public Ann
{
public:
reportElementAnn(int line)
: Ann(line)
{}
virtual void lookup(enumPropertyAnn * default_property,
procedureAnn * procedure,
Annotations * annotations) =0;
#ifdef NONEMPTY
virtual void report(std::ostream & out,
bool is_error,
Analyzer * analyzer,
procLocation * where) =0;
// static callNode * callnode(stmtNode * stmt);
#endif
};
/** @brief Literal string report element
*
* This type of element just prints out the string it's given. */
class literalReportElementAnn : public reportElementAnn
{
private:
std::string _literal;
public:
// DQ (8/12/2004): Added virtual destructor to avoid g++ compiler warnings
virtual ~literalReportElementAnn() {};
literalReportElementAnn(const parserID * name);
virtual void lookup(enumPropertyAnn * default_property,
procedureAnn * procedure,
Annotations * annotations);
#ifdef NONEMPTY
virtual void report(std::ostream & out,
bool is_error,
Analyzer * analyzer,
procLocation * where);
#endif
};
/** @brief Expression report element
*
* This type of element queries the values of variable analysis properties.
* */
class expressionReportElementAnn : public reportElementAnn
{
private:
/** @brief Flow sensitivity */
Broadway::FlowSensitivity _flow_sensitivity;
/** @brief The expression to evaluate */
exprAnn * _expr;
public:
expressionReportElementAnn(Broadway::FlowSensitivity flow_sensitivity,
exprAnn * expr, int line);
virtual void lookup(enumPropertyAnn * default_property,
procedureAnn * procedure,
Annotations * annotations);
#ifdef NONEMPTY
virtual void report(std::ostream & out,
bool is_error,
Analyzer * analyzer,
procLocation * where);
#endif
};
/** @brief Location report element
*
* This type of element prints out locations in the program -- either
* context sensitive, or not. */
class locationReportElementAnn : public reportElementAnn
{
public:
typedef enum { Callsite, Context, Procedure, Num_Contexts } ContextKind;
private:
std::string _kind_name;
ContextKind _kind;
public:
// DQ (8/12/2004): Added virtual destructor to avoid g++ compiler warnings
virtual ~locationReportElementAnn() {};
locationReportElementAnn(const parserID * id);
virtual void lookup(enumPropertyAnn * default_property,
procedureAnn * procedure,
Annotations * annotations);
#ifdef NONEMPTY
virtual void report(std::ostream & out,
bool is_error,
Analyzer * analyzer,
procLocation * where);
#endif
};
/** @brief Binding report element
*
* This type of element prints out the bindings of a variable. It makes the
* most sense for formal parameters, where the binding is one-to-one, but
* will work for any variable. */
class bindingReportElementAnn : public reportElementAnn
{
private:
/** @brief The name of the variable */
std::string _variable_name;
/** @brief Size only
*
* When true, just print the number of objects in the binding. */
bool _size_only;
/** @brief A reference to the annvariable object */
annVariable * _variable;
public:
// DQ (8/12/2004): Added virtual destructor to avoid g++ compiler warnings
virtual ~bindingReportElementAnn() {};
bindingReportElementAnn(const parserID * varname, bool size_only);
virtual void lookup(enumPropertyAnn * default_property,
procedureAnn * procedure,
Annotations * annotations);
#ifdef NONEMPTY
virtual void report(std::ostream & out,
bool is_error,
Analyzer * analyzer,
procLocation * where);
#endif
};
typedef std::list< reportElementAnn * > report_element_list;
typedef report_element_list::iterator report_element_list_p;
/** @brief Report annotation
*
* */
class reportAnn : public Ann
{
private:
/** @brief Condition
*
* The condition to test for activating this report. Null for no
* condition. */
exprAnn * _condition;
/** @brief Report elements
*
* Each element of this list is an item to print. */
report_element_list _elements;
/** @brief Is error?
*
* Mark error reports so that we can count them up. */
bool _is_error;
public:
// DQ (8/12/2004): Added virtual destructor to avoid g++ compiler warnings
virtual ~reportAnn() {};
reportAnn(exprAnn * condition,
bool is_error,
report_element_list * elements, int line);
virtual void lookup(procedureAnn * procedure,
Annotations * annotations);
#ifdef NONEMPTY
virtual void report(std::ostream & out,
Analyzer * analyzer,
procLocation * where,
propertyAnalyzer * property_analyzer);
#endif
};
#endif /* BDWY_REPORT_H */
#endif
| 2,115 |
4,054 | <reponame>Anlon-Burke/vespa<gh_stars>1000+
/*
* Line based variant of Apache commons-text StringComparator
* https://github.com/apache/commons-text/blob/3b1a0a5a47ee9fa2b36f99ca28e2e1d367a10a11/src/main/java/org/apache/commons/text/diff/StringsComparator.java
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.vespa.hosted.controller.application.pkg;
import com.yahoo.collections.Pair;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* <p>
* It is guaranteed that the comparisons will always be done as
* {@code o1.equals(o2)} where {@code o1} belongs to the first
* sequence and {@code o2} belongs to the second sequence. This can
* be important if subclassing is used for some elements in the first
* sequence and the {@code equals} method is specialized.
* </p>
* <p>
* Comparison can be seen from two points of view: either as giving the smallest
* modification allowing to transform the first sequence into the second one, or
* as giving the longest sequence which is a subsequence of both initial
* sequences. The {@code equals} method is used to compare objects, so any
* object can be put into sequences. Modifications include deleting, inserting
* or keeping one object, starting from the beginning of the first sequence.
* </p>
* <p>
* This class implements the comparison algorithm, which is the very efficient
* algorithm from <NAME>
* <a href="http://www.cis.upenn.edu/~bcpierce/courses/dd/papers/diff.ps">
* An O(ND) Difference Algorithm and Its Variations</a>.
*/
public class LinesComparator {
private final List<String> left;
private final List<String> right;
private final int[] vDown;
private final int[] vUp;
private LinesComparator(List<String> left, List<String> right) {
this.left = left;
this.right = right;
int size = left.size() + right.size() + 2;
vDown = new int[size];
vUp = new int[size];
}
private void buildScript(int start1, int end1, int start2, int end2, List<Pair<LineOperation, String>> result) {
Snake middle = getMiddleSnake(start1, end1, start2, end2);
if (middle == null
|| middle.start == end1 && middle.diag == end1 - end2
|| middle.end == start1 && middle.diag == start1 - start2) {
int i = start1;
int j = start2;
while (i < end1 || j < end2) {
if (i < end1 && j < end2 && left.get(i).equals(right.get(j))) {
result.add(new Pair<>(LineOperation.keep, left.get(i)));
++i;
++j;
} else {
if (end1 - start1 > end2 - start2) {
result.add(new Pair<>(LineOperation.delete, left.get(i)));
++i;
} else {
result.add(new Pair<>(LineOperation.insert, right.get(j)));
++j;
}
}
}
} else {
buildScript(start1, middle.start, start2, middle.start - middle.diag, result);
for (int i = middle.start; i < middle.end; ++i) {
result.add(new Pair<>(LineOperation.keep, left.get(i)));
}
buildScript(middle.end, end1, middle.end - middle.diag, end2, result);
}
}
private Snake buildSnake(final int start, final int diag, final int end1, final int end2) {
int end = start;
while (end - diag < end2 && end < end1 && left.get(end).equals(right.get(end - diag))) {
++end;
}
return new Snake(start, end, diag);
}
private Snake getMiddleSnake(final int start1, final int end1, final int start2, final int end2) {
final int m = end1 - start1;
final int n = end2 - start2;
if (m == 0 || n == 0) {
return null;
}
final int delta = m - n;
final int sum = n + m;
final int offset = (sum % 2 == 0 ? sum : sum + 1) / 2;
vDown[1 + offset] = start1;
vUp[1 + offset] = end1 + 1;
for (int d = 0; d <= offset; ++d) {
// Down
for (int k = -d; k <= d; k += 2) {
// First step
final int i = k + offset;
if (k == -d || k != d && vDown[i - 1] < vDown[i + 1]) {
vDown[i] = vDown[i + 1];
} else {
vDown[i] = vDown[i - 1] + 1;
}
int x = vDown[i];
int y = x - start1 + start2 - k;
while (x < end1 && y < end2 && left.get(x).equals(right.get(y))) {
vDown[i] = ++x;
++y;
}
// Second step
if (delta % 2 != 0 && delta - d <= k && k <= delta + d) {
if (vUp[i - delta] <= vDown[i]) { // NOPMD
return buildSnake(vUp[i - delta], k + start1 - start2, end1, end2);
}
}
}
// Up
for (int k = delta - d; k <= delta + d; k += 2) {
// First step
final int i = k + offset - delta;
if (k == delta - d || k != delta + d && vUp[i + 1] <= vUp[i - 1]) {
vUp[i] = vUp[i + 1] - 1;
} else {
vUp[i] = vUp[i - 1];
}
int x = vUp[i] - 1;
int y = x - start1 + start2 - k;
while (x >= start1 && y >= start2 && left.get(x).equals(right.get(y))) {
vUp[i] = x--;
y--;
}
// Second step
if (delta % 2 == 0 && -d <= k && k <= d) {
if (vUp[i] <= vDown[i + delta]) { // NOPMD
return buildSnake(vUp[i], k + start1 - start2, end1, end2);
}
}
}
}
// this should not happen
throw new RuntimeException("Internal Error");
}
private static class Snake {
private final int start;
private final int end;
private final int diag;
private Snake(int start, int end, int diag) {
this.start = start;
this.end = end;
this.diag = diag;
}
}
private enum LineOperation {
keep(" "), delete("- "), insert("+ ");
private final String prefix;
LineOperation(String prefix) {
this.prefix = prefix;
}
}
/** @return line-based diff in unified format. Empty contents are identical. */
public static Optional<String> diff(List<String> left, List<String> right) {
List<Pair<LineOperation, String>> changes = new ArrayList<>(Math.max(left.size(), right.size()));
new LinesComparator(left, right).buildScript(0, left.size(), 0, right.size(), changes);
// After we have a list of keep, delete, insert for each line from left and right input, generate a unified
// diff by printing all delete and insert operations with contextLines of keep lines before and after.
// Make sure the change windows are non-overlapping by continuously growing the window
int contextLines = 3;
List<int[]> changeWindows = new ArrayList<>();
int[] last = null;
for (int i = 0, leftIndex = 0, rightIndex = 0; i < changes.size(); i++) {
if (changes.get(i).getFirst() == LineOperation.keep) {
leftIndex++;
rightIndex++;
continue;
}
// We found a new change and it is too far away from the previous change to be combined into the same window
if (last == null || i - last[1] > contextLines) {
last = new int[]{Math.max(i - contextLines, 0), Math.min(i + contextLines + 1, changes.size()), Math.max(leftIndex - contextLines, 0), Math.max(rightIndex - contextLines, 0)};
changeWindows.add(last);
} else // otherwise, extend the previous change window
last[1] = Math.min(i + contextLines + 1, changes.size());
if (changes.get(i).getFirst() == LineOperation.delete) leftIndex++;
else rightIndex++;
}
if (changeWindows.isEmpty()) return Optional.empty();
StringBuilder sb = new StringBuilder();
for (int[] changeWindow: changeWindows) {
int start = changeWindow[0], end = changeWindow[1], leftIndex = changeWindow[2], rightIndex = changeWindow[3];
Map<LineOperation, Long> counts = IntStream.range(start, end)
.mapToObj(i -> changes.get(i).getFirst())
.collect(Collectors.groupingBy(i -> i, Collectors.counting()));
sb.append("@@ -").append(leftIndex + 1).append(',').append(end - start - counts.getOrDefault(LineOperation.insert, 0L))
.append(" +").append(rightIndex + 1).append(',').append(end - start - counts.getOrDefault(LineOperation.delete, 0L)).append(" @@\n");
for (int i = start; i < end; i++)
sb.append(changes.get(i).getFirst().prefix).append(changes.get(i).getSecond()).append('\n');
}
return Optional.of(sb.toString());
}
}
| 4,562 |
1,291 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for plane_wave_hamiltonian.py"""
import unittest
import numpy as np
from openfermion.hamiltonians.plane_wave_hamiltonian import (
jellium_model, jordan_wigner_dual_basis_hamiltonian, plane_wave_hamiltonian)
from openfermion.transforms.opconversions import jordan_wigner
from openfermion.linalg import eigenspectrum, get_sparse_operator
from openfermion.utils import Grid, is_hermitian
class PlaneWaveHamiltonianTest(unittest.TestCase):
def test_plane_wave_hamiltonian_integration(self):
length_set = [2, 3, 4]
spinless_set = [True, False]
length_scale = 1.1
for geometry in [[('H', (0,)), ('H', (0.8,))], [('H', (0.1,))],
[('H', (0.1,))]]:
for l in length_set:
for spinless in spinless_set:
grid = Grid(dimensions=1, scale=length_scale, length=l)
h_plane_wave = plane_wave_hamiltonian(
grid, geometry, spinless, True, include_constant=False)
h_dual_basis = plane_wave_hamiltonian(
grid, geometry, spinless, False, include_constant=False)
# Test for Hermiticity
plane_wave_operator = get_sparse_operator(h_plane_wave)
dual_operator = get_sparse_operator(h_dual_basis)
self.assertTrue(is_hermitian((plane_wave_operator)))
self.assertTrue(is_hermitian(dual_operator))
jw_h_plane_wave = jordan_wigner(h_plane_wave)
jw_h_dual_basis = jordan_wigner(h_dual_basis)
h_plane_wave_spectrum = eigenspectrum(jw_h_plane_wave)
h_dual_basis_spectrum = eigenspectrum(jw_h_dual_basis)
max_diff = np.amax(h_plane_wave_spectrum -
h_dual_basis_spectrum)
min_diff = np.amin(h_plane_wave_spectrum -
h_dual_basis_spectrum)
self.assertAlmostEqual(max_diff, 0)
self.assertAlmostEqual(min_diff, 0)
def test_plane_wave_hamiltonian_default_to_jellium_with_no_geometry(self):
grid = Grid(dimensions=1, scale=1.0, length=4)
self.assertTrue(plane_wave_hamiltonian(grid) == jellium_model(grid))
def test_plane_wave_hamiltonian_bad_geometry(self):
grid = Grid(dimensions=1, scale=1.0, length=4)
with self.assertRaises(ValueError):
plane_wave_hamiltonian(grid, geometry=[('H', (0, 0, 0))])
with self.assertRaises(ValueError):
plane_wave_hamiltonian(grid,
geometry=[('H', (0, 0, 0))],
include_constant=True)
def test_plane_wave_hamiltonian_bad_element(self):
grid = Grid(dimensions=3, scale=1.0, length=4)
with self.assertRaises(ValueError):
plane_wave_hamiltonian(grid, geometry=[('Unobtainium', (0, 0, 0))])
def test_jordan_wigner_dual_basis_hamiltonian(self):
grid = Grid(dimensions=2, length=3, scale=1.)
spinless_set = [True, False]
geometry = [('H', (0, 0)), ('H', (0.5, 0.8))]
for spinless in spinless_set:
fermion_hamiltonian = plane_wave_hamiltonian(grid,
geometry,
spinless,
False,
include_constant=False)
qubit_hamiltonian = jordan_wigner(fermion_hamiltonian)
test_hamiltonian = jordan_wigner_dual_basis_hamiltonian(
grid, geometry, spinless, include_constant=False)
self.assertTrue(test_hamiltonian == qubit_hamiltonian)
def test_jordan_wigner_dual_basis_hamiltonian_default_to_jellium(self):
grid = Grid(dimensions=1, scale=1.0, length=4)
self.assertTrue(
jordan_wigner_dual_basis_hamiltonian(grid) == jordan_wigner(
jellium_model(grid, plane_wave=False)))
def test_jordan_wigner_dual_basis_hamiltonian_bad_geometry(self):
grid = Grid(dimensions=1, scale=1.0, length=4)
with self.assertRaises(ValueError):
jordan_wigner_dual_basis_hamiltonian(grid,
geometry=[('H', (0, 0, 0))])
with self.assertRaises(ValueError):
jordan_wigner_dual_basis_hamiltonian(grid,
geometry=[('H', (0, 0, 0))],
include_constant=True)
def test_jordan_wigner_dual_basis_hamiltonian_bad_element(self):
grid = Grid(dimensions=3, scale=1.0, length=4)
with self.assertRaises(ValueError):
jordan_wigner_dual_basis_hamiltonian(grid,
geometry=[('Unobtainium',
(0, 0, 0))])
def test_plane_wave_energy_cutoff(self):
geometry = [('H', (0,)), ('H', (0.8,))]
grid = Grid(dimensions=1, scale=1.1, length=5)
e_cutoff = 50.0
h_1 = plane_wave_hamiltonian(grid, geometry, True, True, False)
jw_1 = jordan_wigner(h_1)
spectrum_1 = eigenspectrum(jw_1)
h_2 = plane_wave_hamiltonian(grid, geometry, True, True, False,
e_cutoff)
jw_2 = jordan_wigner(h_2)
spectrum_2 = eigenspectrum(jw_2)
max_diff = np.amax(np.absolute(spectrum_1 - spectrum_2))
self.assertGreater(max_diff, 0.)
def test_plane_wave_period_cutoff(self):
# TODO: After figuring out the correct formula for period cutoff for
# dual basis, change period_cutoff to default, and change
# h_1 to also accept period_cutoff for real integration test.
geometry = [('H', (0,)), ('H', (0.8,))]
grid = Grid(dimensions=1, scale=1.1, length=5)
period_cutoff = 50.0
h_1 = plane_wave_hamiltonian(grid, geometry, True, True, False, None)
jw_1 = jordan_wigner(h_1)
spectrum_1 = eigenspectrum(jw_1)
h_2 = plane_wave_hamiltonian(grid, geometry, True, True, False, None,
True, period_cutoff)
jw_2 = jordan_wigner(h_2)
spectrum_2 = eigenspectrum(jw_2)
max_diff = np.amax(np.absolute(spectrum_1 - spectrum_2))
self.assertGreater(max_diff, 0.)
# TODO: This is only for code coverage. Remove after having real
# integration test.
plane_wave_hamiltonian(grid, geometry, True, True, False, None, True)
| 3,773 |
579 | #include "rpc.h"
namespace erpc {
template <class TTr>
void Rpc<TTr>::kick_req_st(SSlot *sslot) {
assert(in_dispatch());
auto &credits = sslot->session_->client_info_.credits_;
assert(credits > 0); // Precondition
auto &ci = sslot->client_info_;
size_t sending =
(std::min)(credits, sslot->tx_msgbuf_->num_pkts_ - ci.num_tx_);
bool bypass = can_bypass_wheel(sslot);
for (size_t x = 0; x < sending; x++) {
if (bypass) {
enqueue_pkt_tx_burst_st(sslot, ci.num_tx_ /* pkt_idx */,
&ci.tx_ts_[ci.num_tx_ % kSessionCredits]);
} else {
enqueue_wheel_req_st(sslot, ci.num_tx_);
}
ci.num_tx_++;
credits--;
}
}
// We're asked to send RFRs, which means that we have recieved the first
// response packet, but not the entire response. The latter implies that a
// background continuation cannot invalidate resp_msgbuf.
template <class TTr>
void Rpc<TTr>::kick_rfr_st(SSlot *sslot) {
assert(in_dispatch());
auto &credits = sslot->session_->client_info_.credits_;
auto &ci = sslot->client_info_;
assert(credits > 0); // Precondition
assert(ci.num_rx_ >= sslot->tx_msgbuf_->num_pkts_);
assert(ci.num_rx_ < wire_pkts(sslot->tx_msgbuf_, ci.resp_msgbuf_));
// TODO: Pace RFRs
size_t rfr_pndng = wire_pkts(sslot->tx_msgbuf_, ci.resp_msgbuf_) - ci.num_tx_;
size_t sending = (std::min)(credits, rfr_pndng); // > 0
for (size_t x = 0; x < sending; x++) {
enqueue_rfr_st(sslot, ci.resp_msgbuf_->get_pkthdr_0());
ci.num_tx_++;
credits--;
}
}
FORCE_COMPILE_TRANSPORTS
} // namespace erpc
| 713 |
424 | <reponame>fasiha/halflife-regression
"""
Copyright (c) 2016 Duolingo Inc. MIT Licence.
Python script that implements spaced repetition models from Settles & Meeder (2016).
Recommended to run with pypy for efficiency. See README.
"""
import argparse
import csv
import gzip
import math
import os
import random
import sys
from collections import defaultdict, namedtuple
# various constraints on parameters and outputs
MIN_HALF_LIFE = 15.0 / (24 * 60) # 15 minutes
MAX_HALF_LIFE = 274. # 9 months
LN2 = math.log(2.)
# data instance object
Instance = namedtuple('Instance', 'p t fv h a lang right wrong ts uid lexeme'.split())
class SpacedRepetitionModel(object):
"""
Spaced repetition model. Implements the following approaches:
- 'hlr' (half-life regression; trainable)
- 'lr' (logistic regression; trainable)
- 'leitner' (fixed)
- 'pimsleur' (fixed)
"""
def __init__(self, method='hlr', omit_h_term=False, initial_weights=None, lrate=.001, hlwt=.01, l2wt=.1, sigma=1.):
self.method = method
self.omit_h_term = omit_h_term
self.weights = defaultdict(float)
if initial_weights is not None:
self.weights.update(initial_weights)
self.fcounts = defaultdict(int)
self.lrate = lrate
self.hlwt = hlwt
self.l2wt = l2wt
self.sigma = sigma
def halflife(self, inst, base):
try:
dp = sum([self.weights[k]*x_k for (k, x_k) in inst.fv])
return hclip(base ** dp)
except:
return MAX_HALF_LIFE
def predict(self, inst, base=2.):
if self.method == 'hlr':
h = self.halflife(inst, base)
p = 2. ** (-inst.t/h)
return pclip(p), h
elif self.method == 'leitner':
try:
h = hclip(2. ** inst.fv[0][1])
except OverflowError:
h = MAX_HALF_LIFE
p = 2. ** (-inst.t/h)
return pclip(p), h
elif self.method == 'pimsleur':
try:
h = hclip(2. ** (2.35*inst.fv[0][1] - 16.46))
except OverflowError:
h = MAX_HALF_LIFE
p = 2. ** (-inst.t/h)
return pclip(p), h
elif self.method == 'lr':
dp = sum([self.weights[k]*x_k for (k, x_k) in inst.fv])
p = 1./(1+math.exp(-dp))
return pclip(p), random.random()
else:
raise Exception
def train_update(self, inst):
if self.method == 'hlr':
base = 2.
p, h = self.predict(inst, base)
dlp_dw = 2.*(p-inst.p)*(LN2**2)*p*(inst.t/h)
dlh_dw = 2.*(h-inst.h)*LN2*h
for (k, x_k) in inst.fv:
rate = (1./(1+inst.p)) * self.lrate / math.sqrt(1 + self.fcounts[k])
# rate = self.lrate / math.sqrt(1 + self.fcounts[k])
# sl(p) update
self.weights[k] -= rate * dlp_dw * x_k
# sl(h) update
if not self.omit_h_term:
self.weights[k] -= rate * self.hlwt * dlh_dw * x_k
# L2 regularization update
self.weights[k] -= rate * self.l2wt * self.weights[k] / self.sigma**2
# increment feature count for learning rate
self.fcounts[k] += 1
elif self.method == 'leitner' or self.method == 'pimsleur':
pass
elif self.method == 'lr':
p, _ = self.predict(inst)
err = p - inst.p
for (k, x_k) in inst.fv:
# rate = (1./(1+inst.p)) * self.lrate / math.sqrt(1 + self.fcounts[k])
rate = self.lrate / math.sqrt(1 + self.fcounts[k])
# error update
self.weights[k] -= rate * err * x_k
# L2 regularization update
self.weights[k] -= rate * self.l2wt * self.weights[k] / self.sigma**2
# increment feature count for learning rate
self.fcounts[k] += 1
def train(self, trainset):
if self.method == 'leitner' or self.method == 'pimsleur':
return
random.shuffle(trainset)
for inst in trainset:
self.train_update(inst)
def losses(self, inst):
p, h = self.predict(inst)
slp = (inst.p - p)**2
slh = (inst.h - h)**2
return slp, slh, p, h
def eval(self, testset, prefix=''):
results = {'p': [], 'h': [], 'pp': [], 'hh': [], 'slp': [], 'slh': []}
for inst in testset:
slp, slh, p, h = self.losses(inst)
results['p'].append(inst.p) # ground truth
results['h'].append(inst.h)
results['pp'].append(p) # predictions
results['hh'].append(h)
results['slp'].append(slp) # loss function values
results['slh'].append(slh)
mae_p = mae(results['p'], results['pp'])
mae_h = mae(results['h'], results['hh'])
cor_p = spearmanr(results['p'], results['pp'])
cor_h = spearmanr(results['h'], results['hh'])
total_slp = sum(results['slp'])
total_slh = sum(results['slh'])
total_l2 = sum([x**2 for x in self.weights.values()])
total_loss = total_slp + self.hlwt*total_slh + self.l2wt*total_l2
if prefix:
sys.stderr.write('%s\t' % prefix)
sys.stderr.write('%.1f (p=%.1f, h=%.1f, l2=%.1f)\tmae(p)=%.3f\tcor(p)=%.3f\tmae(h)=%.3f\tcor(h)=%.3f\n' % \
(total_loss, total_slp, self.hlwt*total_slh, self.l2wt*total_l2, \
mae_p, cor_p, mae_h, cor_h))
def dump_weights(self, fname):
with open(fname, 'wb') as f:
for (k, v) in self.weights.iteritems():
f.write('%s\t%.4f\n' % (k, v))
def dump_predictions(self, fname, testset):
with open(fname, 'wb') as f:
f.write('p\tpp\th\thh\tlang\tuser_id\ttimestamp\n')
for inst in testset:
pp, hh = self.predict(inst)
f.write('%.4f\t%.4f\t%.4f\t%.4f\t%s\t%s\t%d\n' % (inst.p, pp, inst.h, hh, inst.lang, inst.uid, inst.ts))
def dump_detailed_predictions(self, fname, testset):
with open(fname, 'wb') as f:
f.write('p\tpp\th\thh\tlang\tuser_id\ttimestamp\tlexeme_tag\n')
for inst in testset:
pp, hh = self.predict(inst)
for i in range(inst.right):
f.write('1.0\t%.4f\t%.4f\t%.4f\t%s\t%s\t%d\t%s\n' % (pp, inst.h, hh, inst.lang, inst.uid, inst.ts, inst.lexeme))
for i in range(inst.wrong):
f.write('0.0\t%.4f\t%.4f\t%.4f\t%s\t%s\t%d\t%s\n' % (pp, inst.h, hh, inst.lang, inst.uid, inst.ts, inst.lexeme))
def pclip(p):
# bound min/max model predictions (helps with loss optimization)
return min(max(p, 0.0001), .9999)
def hclip(h):
# bound min/max half-life
return min(max(h, MIN_HALF_LIFE), MAX_HALF_LIFE)
def mae(l1, l2):
# mean average error
return mean([abs(l1[i] - l2[i]) for i in range(len(l1))])
def mean(lst):
# the average of a list
return float(sum(lst))/len(lst)
def spearmanr(l1, l2):
# spearman rank correlation
m1 = mean(l1)
m2 = mean(l2)
num = 0.
d1 = 0.
d2 = 0.
for i in range(len(l1)):
num += (l1[i]-m1)*(l2[i]-m2)
d1 += (l1[i]-m1)**2
d2 += (l2[i]-m2)**2
return num/math.sqrt(d1*d2)
def read_data(input_file, method, omit_bias=False, omit_lexemes=False, max_lines=None):
# read learning trace data in specified format, see README for details
sys.stderr.write('reading data...')
instances = list()
if input_file.endswith('gz'):
f = gzip.open(input_file, 'rb')
else:
f = open(input_file, 'rb')
reader = csv.DictReader(f)
for i, row in enumerate(reader):
if max_lines is not None and i >= max_lines:
break
p = pclip(float(row['p_recall']))
t = float(row['delta'])/(60*60*24) # convert time delta to days
h = hclip(-t/(math.log(p, 2)))
lang = '%s->%s' % (row['ui_language'], row['learning_language'])
lexeme_id = row['lexeme_id']
lexeme_string = row['lexeme_string']
timestamp = int(row['timestamp'])
user_id = row['user_id']
seen = int(row['history_seen'])
right = int(row['history_correct'])
wrong = seen - right
right_this = int(row['session_correct'])
wrong_this = int(row['session_seen']) - right_this
# feature vector is a list of (feature, value) tuples
fv = []
# core features based on method
if method == 'leitner':
fv.append((intern('diff'), right-wrong))
elif method == 'pimsleur':
fv.append((intern('total'), right+wrong))
else:
# fv.append((intern('right'), right))
# fv.append((intern('wrong'), wrong))
fv.append((intern('right'), math.sqrt(1+right)))
fv.append((intern('wrong'), math.sqrt(1+wrong)))
# optional flag features
if method == 'lr':
fv.append((intern('time'), t))
if not omit_bias:
fv.append((intern('bias'), 1.))
if not omit_lexemes:
fv.append((intern('%s:%s' % (row['learning_language'], lexeme_string)), 1.))
instances.append(Instance(p, t, fv, h, (right+2.)/(seen+4.), lang, right_this, wrong_this, timestamp, user_id, lexeme_string))
if i % 1000000 == 0:
sys.stderr.write('%d...' % i)
sys.stderr.write('done!\n')
splitpoint = int(0.9 * len(instances))
return instances[:splitpoint], instances[splitpoint:]
argparser = argparse.ArgumentParser(description='Fit a SpacedRepetitionModel to data.')
argparser.add_argument('-b', action="store_true", default=False, help='omit bias feature')
argparser.add_argument('-l', action="store_true", default=False, help='omit lexeme features')
argparser.add_argument('-t', action="store_true", default=False, help='omit half-life term')
argparser.add_argument('-m', action="store", dest="method", default='hlr', help="hlr, lr, leitner, pimsleur")
argparser.add_argument('-x', action="store", dest="max_lines", type=int, default=None, help="maximum number of lines to read (for dev)")
argparser.add_argument('input_file', action="store", help='log file for training')
if __name__ == "__main__":
args = argparser.parse_args()
# model diagnostics
sys.stderr.write('method = "%s"\n' % args.method)
if args.b:
sys.stderr.write('--> omit_bias\n')
if args.l:
sys.stderr.write('--> omit_lexemes\n')
if args.t:
sys.stderr.write('--> omit_h_term\n')
# read data set
trainset, testset = read_data(args.input_file, args.method, args.b, args.l, args.max_lines)
sys.stderr.write('|train| = %d\n' % len(trainset))
sys.stderr.write('|test| = %d\n' % len(testset))
# train model & print preliminary evaluation info
model = SpacedRepetitionModel(method=args.method, omit_h_term=args.t)
model.train(trainset)
model.eval(testset, 'test')
# write out model weights and predictions
filebits = [args.method] + \
[k for k, v in sorted(vars(args).iteritems()) if v is True] + \
[os.path.splitext(os.path.basename(args.input_file).replace('.gz', ''))[0]]
if args.max_lines is not None:
filebits.append(str(args.max_lines))
filebase = '.'.join(filebits)
if not os.path.exists('results/'):
os.makedirs('results/')
model.dump_weights('results/'+filebase+'.weights')
model.dump_predictions('results/'+filebase+'.preds', testset)
# model.dump_detailed_predictions('results/'+filebase+'.detailed', testset)
| 5,819 |
2,843 | package com.yc.businessinterface;
/**
* <pre>
* @author yangchong
* email : <EMAIL>
* time : 2020/6/20
* desc : 用户相关操作接口
* revise:
* </pre>
*/
public interface IUserManager{
/**
* 获取用户token
* @return 返回token
*/
String getToken();
/**
* 用户ID
*/
String getUid();
}
| 207 |
639 | <filename>Example/Pods/HZExtend/HZExtend/Classes/Foundation/NSString+HZExtend.h
//
// NSString+HZExtend.h
// ZHFramework
//
// Created by xzh. on 15/7/20.
// Copyright (c) 2015年 xzh. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "NSObject+HZExtend.h"
NS_ASSUME_NONNULL_BEGIN
@interface NSString (HZExtend)
/**
* 以md5算法加密
*/
- (NSString *)md5;
/************查询字符串************/
- (NSString *)urlEncode;
- (NSString *)urlDecode;
/**
* 以https://github.com/GeniusBrother/HZExtend?author=GeniusBrother为例
*/
- (NSString *)scheme; //https
- (NSString *)host; //github.com
- (NSString *)allPath; //https://github.com/GeniusBrother/HZExtend
- (NSString *)path; ///GeniusBrother/HZExtend
- (NSString *)keyValues; //author=GeniusBrother
- (NSDictionary *)queryDic; //@{@"author":@"GeniusBrother"}
@end
NS_ASSUME_NONNULL_END | 371 |
575 | <filename>chrome/android/javatests/src/org/chromium/chrome/browser/customtabs/DetachedResourceRequestTest.java
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.customtabs;
import static org.chromium.components.content_settings.PrefNames.COOKIE_CONTROLS_MODE;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.support.test.InstrumentationRegistry;
import androidx.browser.customtabs.CustomTabsCallback;
import androidx.browser.customtabs.CustomTabsIntent;
import androidx.browser.customtabs.CustomTabsService;
import androidx.browser.customtabs.CustomTabsSession;
import androidx.browser.customtabs.CustomTabsSessionToken;
import androidx.test.filters.SmallTest;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.chromium.base.task.PostTask;
import org.chromium.base.test.util.CallbackHelper;
import org.chromium.base.test.util.CriteriaHelper;
import org.chromium.base.test.util.MetricsUtils.HistogramDelta;
import org.chromium.chrome.browser.MockSafeBrowsingApiHandler;
import org.chromium.chrome.browser.browserservices.verification.OriginVerifier;
import org.chromium.chrome.browser.document.ChromeLauncherActivity;
import org.chromium.chrome.browser.firstrun.FirstRunStatus;
import org.chromium.chrome.browser.flags.ChromeFeatureList;
import org.chromium.chrome.browser.profiles.Profile;
import org.chromium.chrome.browser.tab.Tab;
import org.chromium.chrome.test.ChromeJUnit4ClassRunner;
import org.chromium.chrome.test.util.browser.Features.DisableFeatures;
import org.chromium.chrome.test.util.browser.Features.EnableFeatures;
import org.chromium.components.content_settings.CookieControlsMode;
import org.chromium.components.embedder_support.util.Origin;
import org.chromium.components.prefs.PrefService;
import org.chromium.components.safe_browsing.SafeBrowsingApiBridge;
import org.chromium.components.user_prefs.UserPrefs;
import org.chromium.content_public.browser.UiThreadTaskTraits;
import org.chromium.content_public.browser.WebContents;
import org.chromium.content_public.browser.test.util.JavaScriptUtils;
import org.chromium.content_public.browser.test.util.TestThreadUtils;
import org.chromium.net.NetError;
import org.chromium.net.test.EmbeddedTestServer;
import org.chromium.net.test.ServerCertificate;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeoutException;
/** Tests for detached resource requests. */
@RunWith(ChromeJUnit4ClassRunner.class)
@EnableFeatures(ChromeFeatureList.SAFE_BROWSING_DELAYED_WARNINGS)
public class DetachedResourceRequestTest {
@Rule
public CustomTabActivityTestRule mCustomTabActivityTestRule = new CustomTabActivityTestRule();
private CustomTabsConnection mConnection;
private Context mContext;
private EmbeddedTestServer mServer;
private static final Uri ORIGIN = Uri.parse("http://cats.google.com");
private static final int NET_OK = 0;
@Before
public void setUp() {
TestThreadUtils.runOnUiThreadBlocking(() -> FirstRunStatus.setFirstRunFlowComplete(true));
mConnection = CustomTabsTestUtils.setUpConnection();
mContext = InstrumentationRegistry.getInstrumentation()
.getTargetContext()
.getApplicationContext();
TestThreadUtils.runOnUiThreadBlocking(OriginVerifier::clearCachedVerificationsForTesting);
}
@After
public void tearDown() {
CustomTabsTestUtils.cleanupSessions(mConnection);
if (mServer != null) mServer.stopAndDestroyServer();
mServer = null;
}
@Test
@SmallTest
public void testCanDoParallelRequest() {
CustomTabsSessionToken session = CustomTabsSessionToken.createMockSessionTokenForTesting();
Assert.assertTrue(mConnection.newSession(session));
TestThreadUtils.runOnUiThreadBlocking(
() -> Assert.assertFalse(mConnection.canDoParallelRequest(session, ORIGIN)));
TestThreadUtils.runOnUiThreadBlocking(() -> {
String packageName = mContext.getPackageName();
OriginVerifier.addVerificationOverride(packageName, Origin.create(ORIGIN.toString()),
CustomTabsService.RELATION_USE_AS_ORIGIN);
Assert.assertTrue(mConnection.canDoParallelRequest(session, ORIGIN));
});
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_RESOURCE_PREFETCH)
public void testCanDoResourcePrefetch() throws Exception {
CustomTabsSessionToken session = CustomTabsSessionToken.createMockSessionTokenForTesting();
Assert.assertTrue(mConnection.newSession(session));
TestThreadUtils.runOnUiThreadBlocking(() -> {
String packageName = mContext.getPackageName();
OriginVerifier.addVerificationOverride(packageName, Origin.create(ORIGIN.toString()),
CustomTabsService.RELATION_USE_AS_ORIGIN);
});
Intent intent = prepareIntentForResourcePrefetch(
Arrays.asList(Uri.parse("https://foo.bar")), ORIGIN);
TestThreadUtils.runOnUiThreadBlocking(
() -> Assert.assertEquals(0, mConnection.maybePrefetchResources(session, intent)));
CustomTabsTestUtils.warmUpAndWait();
TestThreadUtils.runOnUiThreadBlocking(
() -> Assert.assertEquals(0, mConnection.maybePrefetchResources(session, intent)));
mConnection.mClientManager.setAllowResourcePrefetchForSession(session, true);
TestThreadUtils.runOnUiThreadBlocking(
() -> Assert.assertEquals(1, mConnection.maybePrefetchResources(session, intent)));
}
@Test
@SmallTest
public void testStartParallelRequestValidation() throws Exception {
CustomTabsSessionToken session = prepareSession();
CustomTabsTestUtils.warmUpAndWait();
TestThreadUtils.runOnUiThreadBlocking(() -> {
int expected = CustomTabsConnection.ParallelRequestStatus.NO_REQUEST;
HistogramDelta histogram =
new HistogramDelta("CustomTabs.ParallelRequestStatusOnStart", expected);
Assert.assertEquals(expected, mConnection.handleParallelRequest(session, new Intent()));
Assert.assertEquals(1, histogram.getDelta());
expected = CustomTabsConnection.ParallelRequestStatus.FAILURE_INVALID_URL;
histogram = new HistogramDelta("CustomTabs.ParallelRequestStatusOnStart", expected);
Intent intent =
prepareIntent(Uri.parse("android-app://this.is.an.android.app"), ORIGIN);
Assert.assertEquals("Should not allow android-app:// scheme", expected,
mConnection.handleParallelRequest(session, intent));
Assert.assertEquals(1, histogram.getDelta());
expected = CustomTabsConnection.ParallelRequestStatus.FAILURE_INVALID_URL;
histogram = new HistogramDelta("CustomTabs.ParallelRequestStatusOnStart", expected);
intent = prepareIntent(Uri.parse(""), ORIGIN);
Assert.assertEquals("Should not allow an empty URL", expected,
mConnection.handleParallelRequest(session, intent));
Assert.assertEquals(1, histogram.getDelta());
expected =
CustomTabsConnection.ParallelRequestStatus.FAILURE_INVALID_REFERRER_FOR_SESSION;
histogram = new HistogramDelta("CustomTabs.ParallelRequestStatusOnStart", expected);
intent = prepareIntent(Uri.parse("HTTPS://foo.bar"), Uri.parse("wrong://origin"));
Assert.assertEquals("Should not allow an arbitrary origin", expected,
mConnection.handleParallelRequest(session, intent));
expected = CustomTabsConnection.ParallelRequestStatus.SUCCESS;
histogram = new HistogramDelta("CustomTabs.ParallelRequestStatusOnStart", expected);
intent = prepareIntent(Uri.parse("HTTPS://foo.bar"), ORIGIN);
Assert.assertEquals(expected, mConnection.handleParallelRequest(session, intent));
Assert.assertEquals(1, histogram.getDelta());
});
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_RESOURCE_PREFETCH)
public void testStartResourcePrefetchUrlsValidation() throws Exception {
CustomTabsSessionToken session = prepareSession();
CustomTabsTestUtils.warmUpAndWait();
TestThreadUtils.runOnUiThreadBlocking(() -> {
Assert.assertEquals(0, mConnection.maybePrefetchResources(session, new Intent()));
ArrayList<Uri> urls = new ArrayList<>();
Intent intent = prepareIntentForResourcePrefetch(urls, ORIGIN);
Assert.assertEquals(0, mConnection.maybePrefetchResources(session, intent));
urls.add(Uri.parse("android-app://this.is.an.android.app"));
intent = prepareIntentForResourcePrefetch(urls, ORIGIN);
Assert.assertEquals(0, mConnection.maybePrefetchResources(session, intent));
urls.add(Uri.parse(""));
intent = prepareIntentForResourcePrefetch(urls, ORIGIN);
Assert.assertEquals(0, mConnection.maybePrefetchResources(session, intent));
urls.add(Uri.parse("https://foo.bar"));
intent = prepareIntentForResourcePrefetch(urls, ORIGIN);
Assert.assertEquals(1, mConnection.maybePrefetchResources(session, intent));
urls.add(Uri.parse("https://bar.foo"));
intent = prepareIntentForResourcePrefetch(urls, ORIGIN);
Assert.assertEquals(2, mConnection.maybePrefetchResources(session, intent));
intent = prepareIntentForResourcePrefetch(urls, Uri.parse("wrong://origin"));
Assert.assertEquals(0, mConnection.maybePrefetchResources(session, intent));
});
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_REPORT_PARALLEL_REQUEST_STATUS)
public void testCanStartParallelRequest() throws Exception {
testCanStartParallelRequest(true);
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_REPORT_PARALLEL_REQUEST_STATUS)
public void testCanStartParallelRequestBeforeNative() throws Exception {
testCanStartParallelRequest(false);
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_REPORT_PARALLEL_REQUEST_STATUS)
public void testParallelRequestFailureCallback() throws Exception {
Uri url = Uri.parse("http://request-url");
int status =
CustomTabsConnection.ParallelRequestStatus.FAILURE_INVALID_REFERRER_FOR_SESSION;
DetachedResourceRequestCheckCallback customTabsCallback =
new DetachedResourceRequestCheckCallback(url, status, 0);
CustomTabsSessionToken session = prepareSession(ORIGIN, customTabsCallback);
CustomTabsTestUtils.warmUpAndWait();
PostTask.runOrPostTask(UiThreadTaskTraits.DEFAULT, () -> {
Assert.assertEquals(status,
mConnection.handleParallelRequest(
session, prepareIntent(url, Uri.parse("http://not-the-right-origin"))));
});
customTabsCallback.waitForRequest(0, 1);
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_REPORT_PARALLEL_REQUEST_STATUS)
public void testParallelRequestCompletionFailureCallback() throws Exception {
final CallbackHelper cb = new CallbackHelper();
setUpTestServerWithListener(new EmbeddedTestServer.ConnectionListener() {
@Override
public void readFromSocket(long socketId) {
cb.notifyCalled();
}
});
Uri url = Uri.parse(mServer.getURL("/close-socket"));
DetachedResourceRequestCheckCallback customTabsCallback =
new DetachedResourceRequestCheckCallback(url,
CustomTabsConnection.ParallelRequestStatus.SUCCESS,
Math.abs(NetError.ERR_EMPTY_RESPONSE));
CustomTabsSessionToken session = prepareSession(ORIGIN, customTabsCallback);
PostTask.runOrPostTask(UiThreadTaskTraits.DEFAULT,
() -> mConnection.onHandledIntent(session, prepareIntent(url, ORIGIN)));
CustomTabsTestUtils.warmUpAndWait();
customTabsCallback.waitForRequest(0, 1);
cb.waitForCallback(0, 1);
customTabsCallback.waitForCompletion(0, 1);
}
@Test
@SmallTest
public void testCanStartResourcePrefetch() throws Exception {
CustomTabsSessionToken session = prepareSession();
CustomTabsTestUtils.warmUpAndWait();
final CallbackHelper cb = new CallbackHelper();
// We expect one read per prefetched url.
setUpTestServerWithListener(new EmbeddedTestServer.ConnectionListener() {
@Override
public void readFromSocket(long socketId) {
cb.notifyCalled();
}
});
List<Uri> urls = Arrays.asList(Uri.parse(mServer.getURL("/echo-raw?a=1")),
Uri.parse(mServer.getURL("/echo-raw?a=2")),
Uri.parse(mServer.getURL("/echo-raw?a=3")));
PostTask.runOrPostTask(UiThreadTaskTraits.DEFAULT, () -> {
Assert.assertEquals(urls.size(),
mConnection.maybePrefetchResources(
session, prepareIntentForResourcePrefetch(urls, ORIGIN)));
});
cb.waitForCallback(0, urls.size());
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_REPORT_PARALLEL_REQUEST_STATUS)
public void testCanSetCookie() throws Exception {
testCanSetCookie(true);
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_REPORT_PARALLEL_REQUEST_STATUS)
public void testCanSetCookieBeforeNative() throws Exception {
testCanSetCookie(false);
}
/**
* Tests that cached detached resource requests that are forbidden by SafeBrowsing don't end up
* in the content area, for a main resource.
*/
@Test
@SmallTest
@DisableFeatures(ChromeFeatureList.SPLIT_CACHE_BY_NETWORK_ISOLATION_KEY)
public void testSafeBrowsingMainResource() throws Exception {
testSafeBrowsingMainResource(true /* afterNative */, false /* splitCacheEnabled */);
}
/**
* Tests that non-cached detached resource requests that are forbidden by SafeBrowsing don't end
* up in the content area, for a main resource.
*/
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.SPLIT_CACHE_BY_NETWORK_ISOLATION_KEY)
public void testSafeBrowsingMainResourceWithSplitCache() throws Exception {
testSafeBrowsingMainResource(true /* afterNative */, true /* splitCacheEnabled */);
}
/**
* Tests that cached detached resource requests that are forbidden by SafeBrowsing don't end up
* in the content area, for a subresource.
*/
@Test
@SmallTest
public void testSafeBrowsingSubresource() throws Exception {
testSafeBrowsingSubresource(true);
}
/**
* Tests that cached detached resource requests that are forbidden by SafeBrowsing don't end up
* in the content area, for a main resource.
*/
@Test
@SmallTest
@DisableFeatures(ChromeFeatureList.SPLIT_CACHE_BY_NETWORK_ISOLATION_KEY)
public void testSafeBrowsingMainResourceBeforeNative() throws Exception {
testSafeBrowsingMainResource(false /* afterNative */, false /* splitCacheEnabled */);
}
/**
* Tests that cached detached resource requests that are forbidden by SafeBrowsing don't end up
* in the content area, for a subresource.
*/
@Test
@SmallTest
public void testSafeBrowsingSubresourceBeforeNative() throws Exception {
testSafeBrowsingSubresource(false);
}
@Test
@SmallTest
public void testCanBlockThirdPartyCookies() throws Exception {
CustomTabsSessionToken session = prepareSession();
CustomTabsTestUtils.warmUpAndWait();
mServer = EmbeddedTestServer.createAndStartHTTPSServer(mContext, ServerCertificate.CERT_OK);
TestThreadUtils.runOnUiThreadBlocking(() -> {
PrefService prefs = UserPrefs.get(Profile.getLastUsedRegularProfile());
Assert.assertEquals(
prefs.getInteger(COOKIE_CONTROLS_MODE), CookieControlsMode.INCOGNITO_ONLY);
prefs.setInteger(COOKIE_CONTROLS_MODE, CookieControlsMode.BLOCK_THIRD_PARTY);
});
final Uri url = Uri.parse(mServer.getURL("/set-cookie?acookie;SameSite=none;Secure"));
TestThreadUtils.runOnUiThreadBlocking(() -> {
Assert.assertEquals(CustomTabsConnection.ParallelRequestStatus.SUCCESS,
mConnection.handleParallelRequest(session, prepareIntent(url, ORIGIN)));
});
String echoUrl = mServer.getURL("/echoheader?Cookie");
Intent intent = CustomTabsTestUtils.createMinimalCustomTabIntent(mContext, echoUrl);
mCustomTabActivityTestRule.startCustomTabActivityWithIntent(intent);
Tab tab = mCustomTabActivityTestRule.getActivity().getActivityTab();
String content = JavaScriptUtils.executeJavaScriptAndWaitForResult(
tab.getWebContents(), "document.body.textContent");
Assert.assertEquals("\"None\"", content);
}
/**
* Demonstrates upcoming restrictions on cookies in third party contexts
*/
@Test
@SmallTest
@EnableFeatures({ChromeFeatureList.SAME_SITE_BY_DEFAULT_COOKIES,
ChromeFeatureList.COOKIES_WITHOUT_SAME_SITE_MUST_BE_SECURE})
public void
testUpcomingThirdPartyCookiePolicies() throws Exception {
CustomTabsSessionToken session = prepareSession();
CustomTabsTestUtils.warmUpAndWait();
mServer = EmbeddedTestServer.createAndStartHTTPSServer(mContext, ServerCertificate.CERT_OK);
// This isn't blocking third-party cookies by preferences.
TestThreadUtils.runOnUiThreadBlocking(() -> {
PrefService prefs = UserPrefs.get(Profile.getLastUsedRegularProfile());
Assert.assertEquals(
prefs.getInteger(COOKIE_CONTROLS_MODE), CookieControlsMode.INCOGNITO_ONLY);
});
// Of the three cookies, only one that's both SameSite=None and Secure
// is actually set. (And Secure is meant as the attribute, being over
// https isn't enough).
final Uri url = Uri.parse(
mServer.getURL("/set-cookie?a=1&b=2;SameSite=None&c=3;SameSite=None;Secure;"));
TestThreadUtils.runOnUiThreadBlocking(() -> {
Assert.assertEquals(CustomTabsConnection.ParallelRequestStatus.SUCCESS,
mConnection.handleParallelRequest(session, prepareIntent(url, ORIGIN)));
});
String echoUrl = mServer.getURL("/echoheader?Cookie");
Intent intent = CustomTabsTestUtils.createMinimalCustomTabIntent(mContext, echoUrl);
mCustomTabActivityTestRule.startCustomTabActivityWithIntent(intent);
Tab tab = mCustomTabActivityTestRule.getActivity().getActivityTab();
String content = JavaScriptUtils.executeJavaScriptAndWaitForResult(
tab.getWebContents(), "document.body.textContent");
Assert.assertEquals("\"c=3\"", content);
}
@Test
@SmallTest
public void testThirdPartyCookieBlockingAllowsFirstParty() throws Exception {
CustomTabsTestUtils.warmUpAndWait();
mServer = EmbeddedTestServer.createAndStartServer(mContext);
TestThreadUtils.runOnUiThreadBlocking(() -> {
PrefService prefs = UserPrefs.get(Profile.getLastUsedRegularProfile());
Assert.assertEquals(
prefs.getInteger(COOKIE_CONTROLS_MODE), CookieControlsMode.INCOGNITO_ONLY);
prefs.setInteger(COOKIE_CONTROLS_MODE, CookieControlsMode.BLOCK_THIRD_PARTY);
});
final Uri url = Uri.parse(mServer.getURL("/set-cookie?acookie"));
final Uri origin = Uri.parse(Origin.create(url).toString());
CustomTabsSessionToken session = prepareSession(url);
TestThreadUtils.runOnUiThreadBlocking(() -> {
Assert.assertEquals(CustomTabsConnection.ParallelRequestStatus.SUCCESS,
mConnection.handleParallelRequest(session, prepareIntent(url, origin)));
});
String echoUrl = mServer.getURL("/echoheader?Cookie");
Intent intent = CustomTabsTestUtils.createMinimalCustomTabIntent(mContext, echoUrl);
mCustomTabActivityTestRule.startCustomTabActivityWithIntent(intent);
Tab tab = mCustomTabActivityTestRule.getActivity().getActivityTab();
String content = JavaScriptUtils.executeJavaScriptAndWaitForResult(
tab.getWebContents(), "document.body.textContent");
Assert.assertEquals("\"acookie\"", content);
}
@Test
@SmallTest
@EnableFeatures(ChromeFeatureList.CCT_REPORT_PARALLEL_REQUEST_STATUS)
public void testRepeatedIntents() throws Exception {
mServer = EmbeddedTestServer.createAndStartServer(mContext);
final Uri url = Uri.parse(mServer.getURL("/set-cookie?acookie"));
DetachedResourceRequestCheckCallback callback = new DetachedResourceRequestCheckCallback(
url, CustomTabsConnection.ParallelRequestStatus.SUCCESS, NET_OK);
CustomTabsSession session = CustomTabsTestUtils.bindWithCallback(callback).session;
Uri launchedUrl = Uri.parse(mServer.getURL("/echotitle"));
Intent intent = (new CustomTabsIntent.Builder(session).build()).intent;
intent.setComponent(new ComponentName(mContext, ChromeLauncherActivity.class));
intent.setAction(Intent.ACTION_VIEW);
intent.setData(launchedUrl);
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
intent.putExtra(CustomTabsConnection.PARALLEL_REQUEST_URL_KEY, url);
intent.putExtra(CustomTabsConnection.PARALLEL_REQUEST_REFERRER_KEY, ORIGIN);
CustomTabsSessionToken token = CustomTabsSessionToken.getSessionTokenFromIntent(intent);
Assert.assertTrue(mConnection.newSession(token));
mConnection.mClientManager.setAllowParallelRequestForSession(token, true);
TestThreadUtils.runOnUiThreadBlocking(() -> {
OriginVerifier.addVerificationOverride(mContext.getPackageName(),
Origin.create(ORIGIN.toString()), CustomTabsService.RELATION_USE_AS_ORIGIN);
Assert.assertTrue(mConnection.canDoParallelRequest(token, ORIGIN));
});
mContext.startActivity(intent);
callback.waitForRequest(0, 1);
callback.waitForCompletion(0, 1);
mContext.startActivity(intent);
callback.waitForRequest(1, 1);
callback.waitForCompletion(1, 1);
}
private void testCanStartParallelRequest(boolean afterNative) throws Exception {
final CallbackHelper cb = new CallbackHelper();
setUpTestServerWithListener(new EmbeddedTestServer.ConnectionListener() {
@Override
public void readFromSocket(long socketId) {
cb.notifyCalled();
}
});
Uri url = Uri.parse(mServer.getURL("/echotitle"));
DetachedResourceRequestCheckCallback customTabsCallback =
new DetachedResourceRequestCheckCallback(
url, CustomTabsConnection.ParallelRequestStatus.SUCCESS, NET_OK);
CustomTabsSessionToken session = prepareSession(ORIGIN, customTabsCallback);
if (afterNative) CustomTabsTestUtils.warmUpAndWait();
PostTask.runOrPostTask(UiThreadTaskTraits.DEFAULT,
() -> mConnection.onHandledIntent(session, prepareIntent(url, ORIGIN)));
if (!afterNative) CustomTabsTestUtils.warmUpAndWait();
customTabsCallback.waitForRequest(0, 1);
cb.waitForCallback(0, 1);
customTabsCallback.waitForCompletion(0, 1);
}
private void testCanSetCookie(boolean afterNative) throws Exception {
mServer = EmbeddedTestServer.createAndStartHTTPSServer(mContext, ServerCertificate.CERT_OK);
final Uri url = Uri.parse(mServer.getURL("/set-cookie?acookie;SameSite=none;Secure"));
DetachedResourceRequestCheckCallback customTabsCallback =
new DetachedResourceRequestCheckCallback(
url, CustomTabsConnection.ParallelRequestStatus.SUCCESS, NET_OK);
CustomTabsSessionToken session = prepareSession(ORIGIN, customTabsCallback);
if (afterNative) CustomTabsTestUtils.warmUpAndWait();
TestThreadUtils.runOnUiThreadBlocking(
() -> mConnection.onHandledIntent(session, prepareIntent(url, ORIGIN)));
if (!afterNative) CustomTabsTestUtils.warmUpAndWait();
customTabsCallback.waitForRequest(0, 1);
customTabsCallback.waitForCompletion(0, 1);
String echoUrl = mServer.getURL("/echoheader?Cookie");
Intent intent = CustomTabsTestUtils.createMinimalCustomTabIntent(mContext, echoUrl);
mCustomTabActivityTestRule.startCustomTabActivityWithIntent(intent);
Tab tab = mCustomTabActivityTestRule.getActivity().getActivityTab();
String content = JavaScriptUtils.executeJavaScriptAndWaitForResult(
tab.getWebContents(), "document.body.textContent");
Assert.assertEquals("\"acookie\"", content);
}
private void testSafeBrowsingMainResource(boolean afterNative, boolean splitCacheEnabled)
throws Exception {
SafeBrowsingApiBridge.setSafeBrowsingHandlerType(
new MockSafeBrowsingApiHandler().getClass());
CustomTabsSessionToken session = prepareSession();
String cacheable = "/cachetime";
CallbackHelper readFromSocketCallback =
waitForDetachedRequest(session, cacheable, afterNative);
Uri url = Uri.parse(mServer.getURL(cacheable));
try {
MockSafeBrowsingApiHandler.addMockResponse(
url.toString(), "{\"matches\":[{\"threat_type\":\"5\"}]}");
Intent intent =
CustomTabsTestUtils.createMinimalCustomTabIntent(mContext, url.toString());
mCustomTabActivityTestRule.startCustomTabActivityWithIntent(intent);
Tab tab = mCustomTabActivityTestRule.getActivity().getActivityTab();
// TODO(crbug.com/1039443): For now, we check the presence of an interstitial through
// the title since isShowingInterstitialPage does not work with committed interstitials.
// Once we fully migrate to committed interstitials, this should be changed to a more
// robust check.
CriteriaHelper.pollUiThread(
() -> tab.getWebContents().getTitle().equals("Security error"));
if (splitCacheEnabled) {
// Note that since the SplitCacheByNetworkIsolationKey feature is
// enabled, the detached request and the original request both
// would be read from the socket as they both would have different
// top frame origins in the cache partitioning key: |ORIGIN| and
// mServer's base url, respectively.
Assert.assertEquals(2, readFromSocketCallback.getCallCount());
} else {
// 1 read from the detached request, and 0 from the page load, as
// the response comes from the cache, and SafeBrowsing blocks it.
Assert.assertEquals(1, readFromSocketCallback.getCallCount());
}
} finally {
MockSafeBrowsingApiHandler.clearMockResponses();
}
}
private void testSafeBrowsingSubresource(boolean afterNative) throws Exception {
SafeBrowsingApiBridge.setSafeBrowsingHandlerType(
new MockSafeBrowsingApiHandler().getClass());
CustomTabsSessionToken session = prepareSession();
String cacheable = "/cachetime";
waitForDetachedRequest(session, cacheable, afterNative);
Uri url = Uri.parse(mServer.getURL(cacheable));
try {
MockSafeBrowsingApiHandler.addMockResponse(
url.toString(), "{\"matches\":[{\"threat_type\":\"5\"}]}");
String pageUrl = mServer.getURL("/chrome/test/data/android/cacheable_subresource.html");
Intent intent = CustomTabsTestUtils.createMinimalCustomTabIntent(mContext, pageUrl);
mCustomTabActivityTestRule.startCustomTabActivityWithIntent(intent);
Tab tab = mCustomTabActivityTestRule.getActivity().getActivityTab();
WebContents webContents = tab.getWebContents();
// TODO(crbug.com/1039443): For now, we check the presence of an interstitial through
// the title since isShowingInterstitialPage does not work with committed interstitials.
// Once we fully migrate to committed interstitials, this should be changed to a more
// robust check.
CriteriaHelper.pollUiThread(() -> webContents.getTitle().equals("Security error"));
} finally {
MockSafeBrowsingApiHandler.clearMockResponses();
}
}
private CustomTabsSessionToken prepareSession() throws Exception {
return prepareSession(ORIGIN, null);
}
private CustomTabsSessionToken prepareSession(Uri origin) throws Exception {
return prepareSession(origin, null);
}
private CustomTabsSessionToken prepareSession(Uri origin, CustomTabsCallback callback)
throws Exception {
CustomTabsSession session = CustomTabsTestUtils.bindWithCallback(callback).session;
Intent intent = (new CustomTabsIntent.Builder(session)).build().intent;
CustomTabsSessionToken token = CustomTabsSessionToken.getSessionTokenFromIntent(intent);
Assert.assertTrue(mConnection.newSession(token));
mConnection.mClientManager.setAllowParallelRequestForSession(token, true);
mConnection.mClientManager.setAllowResourcePrefetchForSession(token, true);
TestThreadUtils.runOnUiThreadBlocking(() -> {
OriginVerifier.addVerificationOverride(mContext.getPackageName(),
Origin.create(origin.toString()), CustomTabsService.RELATION_USE_AS_ORIGIN);
Assert.assertTrue(mConnection.canDoParallelRequest(token, origin));
});
return token;
}
private void setUpTestServerWithListener(EmbeddedTestServer.ConnectionListener listener) {
mServer = new EmbeddedTestServer();
final CallbackHelper readFromSocketCallback = new CallbackHelper();
mServer.initializeNative(mContext, EmbeddedTestServer.ServerHTTPSSetting.USE_HTTP);
mServer.setConnectionListener(listener);
mServer.addDefaultHandlers("");
Assert.assertTrue(mServer.start());
}
private CallbackHelper waitForDetachedRequest(CustomTabsSessionToken session,
String relativeUrl, boolean afterNative) throws TimeoutException {
// Count the number of times data is read from the socket.
// We expect 1 for the detached request.
// Cannot count connections as Chrome opens multiple sockets at page load time.
CallbackHelper readFromSocketCallback = new CallbackHelper();
setUpTestServerWithListener(new EmbeddedTestServer.ConnectionListener() {
@Override
public void readFromSocket(long socketId) {
readFromSocketCallback.notifyCalled();
}
});
Uri url = Uri.parse(mServer.getURL(relativeUrl));
if (afterNative) CustomTabsTestUtils.warmUpAndWait();
TestThreadUtils.runOnUiThreadBlocking(
() -> mConnection.onHandledIntent(session, prepareIntent(url, ORIGIN)));
if (!afterNative) CustomTabsTestUtils.warmUpAndWait();
readFromSocketCallback.waitForCallback(0);
return readFromSocketCallback;
}
private static Intent prepareIntent(Uri url, Uri referrer) {
Intent intent = new Intent();
intent.setData(Uri.parse("http://www.example.com"));
intent.putExtra(CustomTabsConnection.PARALLEL_REQUEST_URL_KEY, url);
intent.putExtra(CustomTabsConnection.PARALLEL_REQUEST_REFERRER_KEY, referrer);
return intent;
}
private static Intent prepareIntentForResourcePrefetch(List<Uri> urls, Uri referrer) {
Intent intent = new Intent();
intent.setData(Uri.parse("http://www.example.com"));
intent.putExtra(CustomTabsConnection.RESOURCE_PREFETCH_URL_LIST_KEY, new ArrayList<>(urls));
intent.putExtra(CustomTabsConnection.PARALLEL_REQUEST_REFERRER_KEY, referrer);
return intent;
}
private static class DetachedResourceRequestCheckCallback extends CustomTabsCallback {
private final Uri mExpectedUrl;
private final int mExpectedRequestStatus;
private final int mExpectedFinalStatus;
private final CallbackHelper mRequestedWaiter = new CallbackHelper();
private final CallbackHelper mCompletionWaiter = new CallbackHelper();
public DetachedResourceRequestCheckCallback(
Uri expectedUrl, int expectedRequestStatus, int expectedFinalStatus) {
super();
mExpectedUrl = expectedUrl;
mExpectedRequestStatus = expectedRequestStatus;
mExpectedFinalStatus = expectedFinalStatus;
}
@Override
public void extraCallback(String callbackName, Bundle args) {
if (CustomTabsConnection.ON_DETACHED_REQUEST_REQUESTED.equals(callbackName)) {
Uri url = args.getParcelable("url");
int status = args.getInt("status");
Assert.assertEquals(mExpectedUrl, url);
Assert.assertEquals(mExpectedRequestStatus, status);
mRequestedWaiter.notifyCalled();
} else if (CustomTabsConnection.ON_DETACHED_REQUEST_COMPLETED.equals(callbackName)) {
Uri url = args.getParcelable("url");
int status = args.getInt("net_error");
Assert.assertEquals(mExpectedUrl, url);
Assert.assertEquals(mExpectedFinalStatus, status);
mCompletionWaiter.notifyCalled();
}
}
public void waitForRequest() throws TimeoutException {
mRequestedWaiter.waitForFirst();
}
public void waitForRequest(int currentCallCount, int numberOfCallsToWaitFor)
throws TimeoutException {
mRequestedWaiter.waitForCallback(currentCallCount, numberOfCallsToWaitFor);
}
public void waitForCompletion() throws TimeoutException {
mCompletionWaiter.waitForFirst();
}
public void waitForCompletion(int currentCallCount, int numberOfCallsToWaitFor)
throws TimeoutException {
mCompletionWaiter.waitForCallback(currentCallCount, numberOfCallsToWaitFor);
}
}
}
| 13,773 |
3,212 | <filename>nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-site-to-site/src/main/java/org/apache/nifi/remote/protocol/FlowFileTransaction.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.remote.protocol;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.util.StopWatch;
import java.util.Set;
public class FlowFileTransaction {
private final ProcessSession session;
private final ProcessContext context;
private final StopWatch stopWatch;
private final long bytesSent;
private final Set<FlowFile> flowFilesSent;
private final String calculatedCRC;
public FlowFileTransaction() {
this(null, null, new StopWatch(true), 0, null, null);
}
public FlowFileTransaction(ProcessSession session, ProcessContext context, StopWatch stopWatch, long bytesSent, Set<FlowFile> flowFilesSent, String calculatedCRC) {
this.session = session;
this.context = context;
this.stopWatch = stopWatch;
this.bytesSent = bytesSent;
this.flowFilesSent = flowFilesSent;
this.calculatedCRC = calculatedCRC;
}
public ProcessSession getSession() {
return session;
}
public StopWatch getStopWatch() {
return stopWatch;
}
public long getBytesSent() {
return bytesSent;
}
public Set<FlowFile> getFlowFilesSent() {
return flowFilesSent;
}
public String getCalculatedCRC() {
return calculatedCRC;
}
public ProcessContext getContext() {
return context;
}
}
| 768 |
6,270 | [
{
"type": "feature",
"category": "APIGateway",
"description": "1. Extended GetDocumentationParts operation to support retrieving documentation parts resources without contents. 2. Added hosted zone ID in the custom domain response."
},
{
"type": "feature",
"category": "Polly",
"description": "Amazon Polly adds Korean language support with new female voice - \"Seoyeon\" and new Indian English female voice - \"Aditi\""
},
{
"type": "feature",
"category": "SES",
"description": "SES launches Configuration Set Reputation Metrics and Email Pausing Today, two features that build upon the capabilities of the reputation dashboard. The first is the ability to export reputation metrics for individual configuration sets. The second is the ability to temporarily pause email sending, either at the configuration set level, or across your entire Amazon SES account."
},
{
"type": "feature",
"category": "StepFunctions",
"description": "You can now use the UpdateStateMachine API to update your state machine definition and role ARN. Existing executions will continue to use the previous definition and role ARN. You can use the DescribeStateMachineForExecution API to determine which state machine definition and role ARN is associated with an execution"
}
] | 399 |
560 | /*
* Copyright (c) 2018 <NAME> <<EMAIL>>
* All Rights Reserved.
*/
package me.zhanghai.android.douya.functional.throwing;
import java.util.Objects;
import me.zhanghai.android.douya.functional.FunctionalException;
import me.zhanghai.android.douya.functional.extension.TriFunction;
@FunctionalInterface
public interface ThrowingTriFunction<T, U, V, R> extends TriFunction<T, U, V, R> {
R applyThrows(T t, U u, V v) throws Exception;
default R apply(T t, U u, V v) {
try {
return applyThrows(t, u, v);
} catch (Exception e) {
throw new FunctionalException(e);
}
}
default <W> ThrowingTriFunction<T, U, V, W> andThen(ThrowingFunction<? super R, ? extends W> after) {
Objects.requireNonNull(after);
return (T t, U u, V v) -> after.applyThrows(applyThrows(t, u, v));
}
}
| 349 |
763 | <filename>projects/batfish-common-protocol/src/test/java/org/batfish/common/matchers/Layer2TopologyMatchersImpl.java<gh_stars>100-1000
package org.batfish.common.matchers;
import javax.annotation.Nonnull;
import org.batfish.common.topology.Layer2Node;
import org.batfish.common.topology.Layer2Topology;
import org.hamcrest.Description;
import org.hamcrest.TypeSafeDiagnosingMatcher;
final class Layer2TopologyMatchersImpl {
static final class InSameBroadcastDomain extends TypeSafeDiagnosingMatcher<Layer2Topology> {
private final Layer2Node _node1;
private final Layer2Node _node2;
InSameBroadcastDomain(@Nonnull Layer2Node node1, @Nonnull Layer2Node node2) {
_node1 = node1;
_node2 = node2;
}
@Override
public void describeTo(Description description) {
description.appendText(
String.format(
"A Layer2Topology with a broadcast domain containing nodes %s and %s",
_node1, _node2));
}
@Override
protected boolean matchesSafely(Layer2Topology item, Description mismatchDescription) {
boolean matches = item.inSameBroadcastDomain(_node1, _node2);
if (!matches) {
if (!item.getBroadcastDomainRepresentative(_node1).isPresent()) {
mismatchDescription.appendText(String.format("does not contain %s", _node1));
} else if (!item.getBroadcastDomainRepresentative(_node2).isPresent()) {
mismatchDescription.appendText(String.format("does not contain %s", _node2));
} else {
mismatchDescription.appendText("nodes are in different broadcast domains");
}
}
return matches;
}
}
private Layer2TopologyMatchersImpl() {}
}
| 616 |
5,169 | {
"name": "RESTMagic",
"version": "0.0.3",
"summary": "RESTMagic is a simple framework for building RESTFul client applications.",
"description": " RESTmagic is a framework for that framework you already deployed, your RESTFUL api. Light or no configuration will tell the framework how your resources are organized, and where to get the appropriate templates. Before it joins those together with your choice of mustache flavored templating, it will check if you have a native view instead to present. The whole framework is built to be easily customized with as much or as little magic as your want.\n",
"homepage": "http://www.RESTMagic.org",
"license": "MIT",
"authors": {
"jason": "<EMAIL>"
},
"source": {
"git": "https://github.com/RESTmagic/RESTMagic.git",
"tag": "0.0.3"
},
"platforms": {
"ios": "5.0"
},
"source_files": [
"RESTMagic",
"RESTMagic/**/*.{h,m}"
],
"public_header_files": "RESTMagic/**/*.h",
"requires_arc": true,
"dependencies": {
"GRMustache": [
"~> 6.4.0"
]
}
}
| 362 |
639 | <filename>src/nnfusion/core/kernels/cuda_gpu/kernels/reverse.cpp
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include "reverse.hpp"
#include "nnfusion/core/operators/generic_op/generic_op.hpp"
using namespace nnfusion;
using namespace nnfusion::kernels;
cuda::Reverse::Reverse(shared_ptr<KernelContext> ctx)
: BlockCudaEmitter(ctx)
{
auto reverse = static_pointer_cast<nnfusion::op::Reverse>(ctx->gnode->get_op_ptr());
arg_shape = ctx->inputs[0]->get_shape();
arg_rank = arg_shape.size();
result_shape = ctx->outputs[0]->get_shape();
reverse_axes = reverse->get_reversed_axes();
vector<uint32_t> reverse_axes_flag(arg_rank, 0);
for (auto a : reverse_axes)
{
reverse_axes_flag[a] = 1;
}
std::stringstream tag;
tag << arg_rank << "axes_" << join(reverse_axes, "_");
custom_tag = tag.str();
}
LanguageUnit_p cuda::Reverse::emit_function_body()
{
LanguageUnit_p _lu(new LanguageUnit(get_function_name()));
auto& lu = *_lu;
if (nnfusion::is_scalar(arg_shape))
{
lu << "uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;\n";
lu << "if (tid < 1)\n";
lu.block_begin();
{
lu << "output0[0] = input0[0];\n";
}
lu.block_end();
}
else
{
vector<uint32_t> reverse_axes_flag(arg_rank, 0);
for (auto a : reverse_axes)
{
reverse_axes_flag[a] = 1;
}
// function signature:
// extern "C" __global__ void kernel(m_context->dtypes[0]* input0, m_context->dtypes[0]* input1, m_context->dtypes[2]* output0)
auto code = nnfusion::op::create_code_from_template(
R"(
int input_shape[] = {@input_shape@};
int reverse_axes[] = {@reverse_axes@};
uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < @nthreads@) {
uint32_t input_idx = tid;
uint32_t output_idx = 0;
uint32_t stride = 1;
for(uint32_t i = @rank@; i > 0; i--) {
uint32_t idx = i - 1;
uint32_t axes_i_in = input_idx % input_shape[idx];
input_idx /= input_shape[idx];
uint32_t axes_i_out = reverse_axes[idx] ? input_shape[idx] - axes_i_in - 1 : axes_i_in;
output_idx += axes_i_out * stride;
stride *= input_shape[idx];
}
output0[output_idx] = input0[tid];
}
)",
{{"input_shape", join(arg_shape)},
{"reverse_axes", join(reverse_axes_flag)},
{"nthreads", static_cast<uint32_t>(shape_size(arg_shape))},
{"rank", static_cast<uint32_t>(arg_rank)}});
lu << code;
}
return _lu;
}
void cuda::Reverse::set_launch_config()
{
uint32_t nthreads = static_cast<uint32_t>(shape_size(result_shape));
uint32_t block_size_x = 64;
uint32_t aligned_grid_size_x = align_to_block_size(nthreads, block_size_x);
m_gridDim = dim3(aligned_grid_size_x, 1, 1);
m_blockDim = dim3(block_size_x, 1, 1);
}
LanguageUnit_p cuda::Reverse::emit_dependency()
{
LanguageUnit_p _lu(new LanguageUnit(get_function_name() + "_dep"));
_lu->require(header::cuda);
return _lu;
}
REGISTER_KERNEL_EMITTER(
"Reverse", // op_name
Device(CUDA_GPU).TypeConstraint(element::f32).Tag("cuda_kernel").Priority(2), // attrs
cuda::Reverse) // constructor | 1,692 |
1,405 | <filename>sample4/recompiled_java/sources/com/lenovo/performancecenter/coreui/util/RootPassage.java
package com.lenovo.performancecenter.coreui.util;
import android.app.ActivityManager;
import android.content.Context;
import android.os.Build;
import android.os.Handler;
import android.util.Log;
import com.lenovo.performancecenter.performance.BootSpeedActivity;
import com.lenovo.safecenter.support.CMDHelper;
import com.lenovo.safecenter.utils.Untils;
import com.lenovo.safecenter.utils.WflUtils;
import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.httpclient.cookie.CookieSpec;
public class RootPassage {
public static final String CMD_CC = " -cc ";
public static final String CMD_FS = " -fs ";
public static File cmdFile;
static /* synthetic */ String a(ArrayList x0, String x1) {
StringBuffer stringBuffer = new StringBuffer();
if (x0 != null) {
Object[] objArr = new Object[5];
objArr[0] = "/system/bin";
objArr[1] = "app_process";
objArr[2] = "/system/bin";
objArr[3] = "com.lenovo.safecenter.LFS";
StringBuffer stringBuffer2 = new StringBuffer();
int size = x0.size();
for (int i = 0; i < size; i++) {
String str = (String) x0.get(i);
if (str != null && !"".equals(str)) {
if (i > 0) {
stringBuffer2.append(Untils.PAUSE);
}
stringBuffer2.append(str);
}
}
objArr[4] = x1 + " " + ((Object) stringBuffer2);
stringBuffer.append(String.format("%s/%s %s %s %s", objArr));
}
return stringBuffer.toString();
}
static /* synthetic */ void a(Context x0, String x1) {
String str = "am force-stop " + x1 + "\n";
Log.i("KillSingle", System.currentTimeMillis() + " command == " + str);
Log.i("KillSingle", System.currentTimeMillis() + " CMDHelper.socketClient result == " + CMDHelper.exeCmd(x0, str));
}
static /* synthetic */ void a(Context x0, ArrayList x1) {
StringBuffer stringBuffer = new StringBuffer();
Iterator it = x1.iterator();
while (it.hasNext()) {
stringBuffer.append("am force-stop " + ((String) it.next()) + "\n");
}
String stringBuffer2 = stringBuffer.toString();
Log.i("KILLALL", System.currentTimeMillis() + " command == " + stringBuffer2);
Log.i("KILLALL", System.currentTimeMillis() + " CMDHelper.socketClient result == " + CMDHelper.exeCmd(x0, stringBuffer2));
}
static /* synthetic */ void b(String x0, String x1, Context x2) {
Log.i("KillSingle", System.currentTimeMillis() + " cmd1==" + x0 + " cmd2==" + x1);
StringBuffer stringBuffer = new StringBuffer();
stringBuffer.append(x0).append("\n");
stringBuffer.append(x1).append("\n");
String stringBuffer2 = stringBuffer.toString();
Log.i("KillSingle", System.currentTimeMillis() + " command == " + stringBuffer2);
Log.i("KillSingle", System.currentTimeMillis() + " CMDHelper.socketClient result == " + CMDHelper.exeCmd(x2, stringBuffer2));
}
public static void execRootCmd(final Handler handler, final ArrayList<String> pkgNameList, final String flag, final Context context) {
if (pkgNameList.size() == 0 || !" -fs ".equals(flag)) {
return;
}
if (!WflUtils.isRoot()) {
ActivityManager activityManager = (ActivityManager) context.getSystemService("activity");
Iterator i$ = pkgNameList.iterator();
while (i$.hasNext()) {
activityManager.killBackgroundProcesses(i$.next());
}
if (handler != null) {
handler.sendEmptyMessage(6);
return;
}
return;
}
new Thread(new Runnable() {
/* class com.lenovo.performancecenter.coreui.util.RootPassage.AnonymousClass1 */
public final void run() {
Log.i("KILLALL", "Build.VERSION.SDK_INT == " + Build.VERSION.SDK_INT);
if (Build.VERSION.SDK_INT >= 17) {
RootPassage.a(context, pkgNameList);
} else {
RootPassage.c(RootPassage.exportLenovo(context), RootPassage.a(pkgNameList, flag), context);
}
if (handler != null) {
handler.sendEmptyMessage(6);
}
}
}).start();
}
public static void execRootCmd(final String pkgName, final String flag, final Context context) {
if (pkgName != null && !"".equals(pkgName) && " -fs ".equals(flag)) {
if (!WflUtils.isRoot()) {
((ActivityManager) context.getSystemService("activity")).killBackgroundProcesses(pkgName);
} else {
new Thread(new Runnable() {
/* class com.lenovo.performancecenter.coreui.util.RootPassage.AnonymousClass2 */
public final void run() {
Log.i("KillSingle", "Build.VERSION.SDK_INT == " + Build.VERSION.SDK_INT);
if (Build.VERSION.SDK_INT >= 17) {
RootPassage.a(context, pkgName);
return;
}
String exportLenovo = RootPassage.exportLenovo(context);
String appProssLenovo = RootPassage.appProssLenovo(pkgName, flag);
Context context = context;
String str = pkgName;
RootPassage.b(exportLenovo, appProssLenovo, context);
}
}).start();
}
}
}
public static void execRootCmdInJar(final String pkgName, final String flag, final Context context) {
if (pkgName != null && !"".equals(pkgName) && flag != null && !"".equals(flag)) {
new Thread(new Runnable() {
/* class com.lenovo.performancecenter.coreui.util.RootPassage.AnonymousClass3 */
public final void run() {
try {
String exportLenovo = RootPassage.exportLenovo(context);
String appProssLenovo = RootPassage.appProssLenovo(pkgName, flag);
Context context = context;
String str = pkgName;
RootPassage.b(exportLenovo, appProssLenovo, context);
} catch (Exception e) {
e.printStackTrace();
}
}
}).start();
}
}
public static void setComponentAbleBoot(final Context context, final HashMap<String[], int[]> cns) {
Log.i(BootSpeedActivity.TAG, System.currentTimeMillis() + " setComponentAbleBoot");
new Thread(new Runnable() {
/* class com.lenovo.performancecenter.coreui.util.RootPassage.AnonymousClass4 */
public final void run() {
String able;
StringBuffer buffer = new StringBuffer();
for (Map.Entry<String[], int[]> entry : cns.entrySet()) {
if (entry.getValue()[0] != -1) {
if (entry.getValue()[0] == 1) {
able = "enable";
} else if (entry.getValue()[0] == 2) {
able = "disable";
}
buffer.append("pm " + able + " " + entry.getKey()[0] + CookieSpec.PATH_DELIM + entry.getKey()[1] + "\n");
}
}
String shbuffer = buffer.toString();
Log.i(BootSpeedActivity.TAG, System.currentTimeMillis() + " command == " + shbuffer);
Log.i(BootSpeedActivity.TAG, System.currentTimeMillis() + " result == " + CMDHelper.exeCmd(context, shbuffer));
}
}).start();
}
public static void setComponentAbleBoot(final Context context, final String[] component, final int state) {
Log.i(BootSpeedActivity.TAG, System.currentTimeMillis() + " setComponentAbleBoot");
new Thread(new Runnable() {
/* class com.lenovo.performancecenter.coreui.util.RootPassage.AnonymousClass5 */
public final void run() {
StringBuffer buffer = new StringBuffer();
String able = "enable";
if (state == 1) {
able = "enable";
} else if (state == 2) {
able = "disable";
}
buffer.append("pm " + able + " " + component[0] + CookieSpec.PATH_DELIM + component[1] + "\n");
String shbuffer = buffer.toString();
Log.i(BootSpeedActivity.TAG, System.currentTimeMillis() + " command == " + shbuffer);
Log.i(BootSpeedActivity.TAG, System.currentTimeMillis() + " result == " + CMDHelper.exeCmd(context, shbuffer));
}
}).start();
}
public static String exportLenovo(Context context) {
File nbFile = context.getFileStreamPath("nb.jar");
if (nbFile == null || !nbFile.exists()) {
return "";
}
return String.format("export CLASSPATH=%s\n", nbFile.getAbsolutePath());
}
public static String appProssLenovo(String paramString, String flag) {
return String.format("%s/%s %s %s %s\n", "/system/bin", "app_process", "/system/bin", "com.lenovo.safecenter.LFS", flag + paramString);
}
/* access modifiers changed from: private */
public static void c(String cmd1, String cmd2, Context context) {
Log.i("KILLALL", System.currentTimeMillis() + " cmd1==" + cmd1 + " cmd2==" + cmd2);
StringBuffer buffer = new StringBuffer();
buffer.append(cmd1).append("\n");
buffer.append(cmd2).append("\n");
String shbuffer = buffer.toString();
Log.i("KILLALL", System.currentTimeMillis() + " command == " + shbuffer);
Log.i("KILLALL", System.currentTimeMillis() + " CMDHelper.socketClient result == " + CMDHelper.exeCmd(context, shbuffer));
}
public static void clearnNotification(String packageName, Context context) {
c(exportLenovo(context), appProssLenovo(packageName, " -nc "), context);
}
}
| 4,887 |
6,526 | /*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2020 ScyllaDB.
*/
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/make_task.hh>
namespace seastar {
/// \addtogroup future-util
/// @{
namespace internal {
template <typename Func>
SEASTAR_CONCEPT( requires std::is_nothrow_move_constructible_v<Func> )
auto
schedule_in_group(scheduling_group sg, Func func) noexcept {
static_assert(std::is_nothrow_move_constructible_v<Func>);
auto tsk = make_task(sg, std::move(func));
schedule(tsk);
return tsk->get_future();
}
}
/// \brief run a callable (with some arbitrary arguments) in a scheduling group
///
/// If the conditions are suitable (see scheduling_group::may_run_immediately()),
/// then the function is run immediately. Otherwise, the function is queued to run
/// when its scheduling group next runs.
///
/// \param sg scheduling group that controls execution time for the function
/// \param func function to run; must be movable or copyable
/// \param args arguments to the function; may be copied or moved, so use \c std::ref()
/// to force passing references
template <typename Func, typename... Args>
SEASTAR_CONCEPT( requires std::is_nothrow_move_constructible_v<Func> )
inline
auto
with_scheduling_group(scheduling_group sg, Func func, Args&&... args) noexcept {
static_assert(std::is_nothrow_move_constructible_v<Func>);
using return_type = decltype(func(std::forward<Args>(args)...));
using futurator = futurize<return_type>;
if (sg.active()) {
return futurator::invoke(func, std::forward<Args>(args)...);
} else {
return internal::schedule_in_group(sg, [func = std::move(func), args = std::make_tuple(std::forward<Args>(args)...)] () mutable {
return futurator::apply(func, std::move(args));
});
}
}
/// @}
} // namespace seastar
| 841 |
3,428 | {"id":"00295","group":"easy-ham-2","checksum":{"type":"MD5","value":"37087d1f5c2678b8152b397111456d3f"},"text":"From <EMAIL> Fri Aug 9 14:54:34 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: yyyy@<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id 5E44543FB1\n\tfor <jm@localhost>; Fri, 9 Aug 2002 09:54:34 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Fri, 09 Aug 2002 14:54:34 +0100 (IST)\nReceived: from webnote.net (mail.webnote.net [1172.16.58.319]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g79DsYb03395 for\n <<EMAIL>>; Fri, 9 Aug 2002 14:54:34 +0100\nReceived: from lugh.tuatha.org (<EMAIL> [192.168.127.125]) by\n webnote.net (8.9.3/8.9.3) with ESMTP id OAA06691 for <<EMAIL>>;\n Fri, 9 Aug 2002 14:51:28 +0100\nReceived: from lugh (root@localhost [127.0.0.1]) by lugh.tuatha.org\n (8.9.3/8.9.3) with ESMTP id OAA11276; Fri, 9 Aug 2002 14:50:36 +0100\nReceived: from hawk.dcu.ie (mail.dcu.ie [136.206.1.5]) by lugh.tuatha.org\n (8.9.3/8.9.3) with ESMTP id OAA11243 for <<EMAIL>>; Fri,\n 9 Aug 2002 14:50:29 +0100\nX-Authentication-Warning: lugh.tuatha.org: Host mail.dcu.ie [136.206.1.5]\n claimed to be hawk.dcu.ie\nReceived: from prodigy.redbrick.dcu.ie (172.16.58.3) by hawk.dcu.ie\n (6.0.040) id 3D36BB4A00080847 for <EMAIL>; Fri, 9 Aug 2002 14:50:29\n +0100\nReceived: by prodigy.redbrick.dcu.ie (Postfix, from userid 1023) id\n F0207DA4A; Fri, 9 Aug 2002 14:50:28 +0100 (IST)\nDate: Fri, 9 Aug 2002 14:50:28 +0100\nFrom: <NAME> <<EMAIL>>\nTo: [email protected]\nSubject: Re: [ILUG] slashdot EW Dijkstra humor\nMessage-Id: <<EMAIL>.Redbrick.DCU.IE>\nReferences: <55DA5264CE16D41186F600D0B74D6B0924724A@KBS01>\n <<EMAIL>>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=us-ascii\nContent-Disposition: inline\nUser-Agent: Mutt/1.2.5i\nIn-Reply-To: <<EMAIL>>; from\n <EMAIL> on Fri, Aug 09, 2002 at 10:34:41AM +0100\nSender: [email protected]\nErrors-To: [email protected]\nX-Mailman-Version: 1.1\nPrecedence: bulk\nList-Id: Irish Linux Users' Group <ilug.linux.ie>\nX-Beenthere: [email protected]\n\n<NAME>'s [<EMAIL>] 65 lines of wisdom included:\n> <NAME> wrote a code fragment:\n> > For(a=0;a<strlen(somestring);a++)\n> > {\n> > some_thing_goes_here();\n> >\n> > if(b=strlen(somestring)-4)\n> > do_something;\n> >\n> > };\n> \n> Unfortunately strlen is a relatively expensive operation. If you are using\n> C++ this is not such a big issue as string.length() can be declared const.\n> So long as you do not modify the string object, the compiler can do the\n> caching for you.\n\nThere's a more simple reason as to why strlen shouldn't be used on\nthe string here, and that's because in future you could actually\nchange the contents of ``somestring'' within the for loop, and be\nleft wondering why the number of iterations are not as you expect.\n\nPhil.\n\n-- \nIrish Linux Users' Group: <EMAIL>\nhttp://www.linux.ie/mailman/listinfo/ilug for (un)subscription information.\nList maintainer: <EMAIL>\n\n\n"} | 1,334 |
4,140 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hive.spark.client;
import java.io.IOException;
import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hive.spark.client.rpc.RpcServer;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
/**
* Factory for SparkClient instances.
*/
@InterfaceAudience.Private
public final class SparkClientFactory {
/** Used by client and driver to share a client ID for establishing an RPC session. */
static final String CONF_CLIENT_ID = "spark.client.authentication.client_id";
/** Used by client and driver to share a secret for establishing an RPC session. */
static final String CONF_KEY_SECRET = "spark.client.authentication.secret";
private static volatile RpcServer server = null;
private static final Object serverLock = new Object();
/**
* Initializes the SparkClient library. Must be called before creating client instances.
*
* @param conf Map containing configuration parameters for the client library.
*/
public static void initialize(Map<String, String> conf, HiveConf hiveConf) throws IOException {
if (server == null) {
synchronized (serverLock) {
if (server == null) {
try {
server = new RpcServer(conf, hiveConf);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
}
}
}
}
/** Stops the SparkClient library. */
public static void stop() {
if (server != null) {
synchronized (serverLock) {
if (server != null) {
server.close();
server = null;
}
}
}
}
/**
* Instantiates a new Spark client.
*
* @param sparkConf Configuration for the remote Spark application, contains spark.* properties.
* @param hiveConf Configuration for Hive, contains hive.* properties.
*/
public static SparkClient createClient(Map<String, String> sparkConf, HiveConf hiveConf,
String sessionId) throws IOException {
Preconditions.checkState(server != null,
"Invalid state: Hive on Spark RPC Server has not been initialized");
switch (hiveConf.getVar(HiveConf.ConfVars.SPARK_CLIENT_TYPE)) {
case HiveConf.HIVE_SPARK_SUBMIT_CLIENT:
return new SparkSubmitSparkClient(server, sparkConf, hiveConf, sessionId);
case HiveConf.HIVE_SPARK_LAUNCHER_CLIENT:
return new SparkLauncherSparkClient(server, sparkConf, hiveConf, sessionId);
default:
throw new IllegalArgumentException("Unknown Hive on Spark launcher type " + hiveConf.getVar(
HiveConf.ConfVars.SPARK_CLIENT_TYPE) + " valid options are " +
HiveConf.HIVE_SPARK_SUBMIT_CLIENT + " or " + HiveConf.HIVE_SPARK_LAUNCHER_CLIENT);
}
}
@VisibleForTesting
public static int getServerPort() {
return server.getPort();
}
}
| 1,255 |
803 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#ifndef _TCCONST_H
#define _TCCONST_H
enum IOTYPE
{
iotypeRead = 0,
iotypeWrite,
iotypeMax
};
// PLEASE DO NOT SHIFT IOFILE TYPES AROUND ... we're building tools to depend upon these
// reasons, so they need to be static.
enum IOFILE
{
iofileOther = 0,
iofileDbAttached = 1,
iofileDbRecovery = 2,
iofileLog = 3,
iofileDbTotal = 4,
iofileFlushMap = 5,
iofileRBS = 6,
iofileMax = 7
};
enum IOCATEGORY // iocat
{
iocatUnknown = 0,
iocatMaintenance = 1,
iocatTransactional = 2,
iocatMax
};
const QWORD qwLegacyFileID = 0x10000001FFFFFFFF; // "operational" file IDs
const QWORD qwSetDbSizeFileID = 0x10000002FFFFFFFF;
const QWORD qwDumpingFileID = 0x10000003FFFFFFFF;
const QWORD qwDefragFileID = 0x10000004FFFFFFFF;
const QWORD qwPatchFileID = 0x10000005FFFFFFFF;
const QWORD qwCheckpointFileID = 0x2000000000000000; // top DWORD is m_iInstance (except top nibble)
const QWORD qwLogFileMgmtID = 0x3000000000000000; // top DWORD is m_iInstance (except top nibble), bottom DWORD is log generation (when it's filled in)
const QWORD qwLogFileID = 0x4000000000000000; // top DWORD is m_iInstance (except top nibble), bottom DWORD is log generation (when it's filled in)
//nst QWORD qwBackupFileID = 0x5000000000000000; // DEPRECATED - now qwBackupDbID or qwBackupLogID
const QWORD qwBackupDbID = 0x6000000000000000; // low DWORD is ifmp.
const QWORD qwBackupLogID = 0x7000000000000000; // top DWORD is m_iInstance (except top nibble)
const QWORD qwRBSFileID = 0x8000000000000000; // top DWORD is m_iInstance (except top nibble)
const QWORD qwRBSRevertChkFileID = 0x9000000000000000; // top DWORD is m_iInstance (except top nibble)
const LONG lGenSignalCurrentID = 0x80000001; // signal to indicate current lgen where we can't know the lgen apriori
const LONG lGenSignalTempID = 0x80000002; // signal to indicate current lgen where we can't know the lgen apriori
const LONG lgenSignalReserveID = 0x80000003; // signal to indicate unknown lgen because the reserve log file hasn't been assigned to one yet
INLINE QWORD QwInstFileID( const QWORD qwInstFileType, const INT iInstance, const LONG lGen )
{
#ifdef DEBUG
Assert( qwInstFileType == qwCheckpointFileID ||
qwInstFileType == qwLogFileID ||
qwInstFileType == qwLogFileMgmtID ||
qwInstFileType == qwBackupLogID ||
qwInstFileType == qwRBSFileID );
if ( qwInstFileType == qwCheckpointFileID )
{
Expected( lGen == 0 );
}
else
{
Expected( lGen != 0 );
Expected( lGen > 0 ||
lGen == lGenSignalCurrentID || lGen == lGenSignalTempID || lGen == lgenSignalReserveID );
}
#endif
// note: (ULONG)lGen, must cast to unsigned to avoid sign extension from lGenSignal* constants.
const QWORD qwRet = ( qwInstFileType | ( (QWORD)iInstance << 32 ) | (ULONG)lGen );
return qwRet;
}
// The 4 sub-components of an OS File's IO reasons are defined here. From a high
// level the ESE sub-reasons will be broken up thusly:
//
// Primary - The Buffer Manager / BF and Log / LOG will predominantly own
// the primary reasons for IO, as they are first contact with the
// OS layer when the system does IO.
// Secondary - Secondary reasons are owned by BT / Node / the layer above BF that issues IO
// Tertiary - Tertiary reasons are owned by layers above BT, and internal clients of the system.
// Examples: DBScan, space, verstore etc.
// User - This containst the public Jet Api (JetOp) called that caused the system to do IO.
// Flags - These are options on IO.
//
// PLEASE DO NOT SHIFT IOREASONs AROUND ... we're building tools to depend upon these
// reasons, so they need to be static.
enum IOREASONPRIMARY : BYTE
{
//iorpNone = 0, defined generically by OS layer
iorpInvalid = 0,
iorpHeader = 1,
// PLEASE attempt not to use values = 2 through 21 as SOMEONE's tool for correlating
// I/Os depends upon it.
//
// Generic OS Layer reasons
//
iorpOsLayerTracing = 24,
// Database I Reasons
//
iorpBFRead = 32, // deprecated
iorpBFLatch = 33,
iorpBFPreread = 34,
iorpBFRemapReVerify = 35,
// Note: we do pre-read as a flag.
// Database O Reasons
//
iorpBFCheckpointAdv = 64,
iorpBFAvailPool = 65,
iorpBFShrink = 66,
// iorpBFIdleFlush = 67, // 67 can be re-used.
iorpBFFilthyFlush = 68,
iorpBFDatabaseFlush = 69,
iorpSPDatabaseInlineZero = 70, // Used to 'de-sparse' a region.
iorpBFPurge = 71,
iorpBFImpedingWriteCleanDoubleIo = 72, // should not happen - see comment above usage.
// Transaction Log I/O Reasons
//
iorpLog = 80,
iorpLGWriteCapacity = 81,
iorpLGWriteCommit = 82,
iorpLGWriteNewGen = 83,
iorpLGWriteSignal = 84, // at least some times this is capacity flush ...
iorpLGFlushAll = 85,
iorpLogRecRedo = 86,
// Alternate Transaction Log I/O Reasons
iorpShadowLog = 92,
// Specialty reasons ...
//
iorpCheckpoint = 96, // the file itself, not the BF process.
iorpDirectAccessUtil = 97, // direct data access for clients directly reading on-disk data.
iorpBackup = 98,
iorpRestore = 99,
iorpPatchFix = 100,
iorpDatabaseExtension = 101,
iorpDatabaseShrink = 102,
iorpDbScan = 103,
iorpCorruptionTestHook = 104, // this is only driven by a test hook
iorpDatabaseTrim = 105,
iorpFlushMap = 106,
iorpBlockCache = 107,
iorpRBS = 108,
iorpRevertPage = 109,
iorpRBSRevertCheckpoint = 110,
// Are you sure you want to append the IO reason at the end? There are other
// more specific categories above.
};
enum IOREASONSECONDARY : BYTE
{
//iorsNone = 0, defined generically by OS layer
iorsHeader = 1,
iorsBTOpen = 2,
iorsBTSeek = 3,
iorsBTMoveFirst = 4,
iorsBTMoveNext = 5,
iorsBTMovePrev = 6,
iorsBTInsert = 7,
iorsBTAppend = 8,
iorsBTReplace = 9,
iorsBTDelete = 10,
iorsBTGet = 11,
iorsBTSplit = 12,
iorsBTMerge = 13,
iorsBTRefresh = 14,
iorsBTPreread = 15,
iorsBTMax = 16,
};
static_assert( iorsBTMax <= 16, "IOREASONSECONDARY overflow" );
enum IOREASONTERTIARY : BYTE
{
iortISAM = 1, // An external client called the JET Api to perform an operation, IOREASONUSER tells which Api was called
// Internal components/reasons that initiated an IO
iortSpace = 2,
iortDbScan = 3,
iortDbShrink = 4,
iortRollbackUndo = 5,
iortScrubbing = 6,
iortOLD = 7,
iortOLD2 = 8,
iortRecovery = 9,
iortBackupRestore = 10,
iortPagePatching = 11,
iortSort = 12,
iortRepair = 13,
iortFreeExtSnapshot = 14,
iortRecTask = 15,
iortMax = 16,
};
static_assert(iortMax <= 16, "IOREASONTERTIARY overflow");
enum IOREASONUSER : BYTE
{
// Will hold the 8-bit Jet op defined in _jet.hxx
ioruMax = 255,
};
enum IOREASONFLAGS : BYTE
{
//iorfNone = 0, defined generically by OS layer
iorfShadow = 0x01,
iorfFill = 0x02,
iorfOpportune = 0x04,
iorfForeground = 0x08,
iorfDependantOrVersion = 0x10,
iorfReclaimPageFromOS = 0x20,
iorfSuperCold = 0x40,
};
enum IOFLUSHREASON
{
iofrInvalid = 0x0,
iofrLogPatch = 0x00000001,
iofrLogCommitFlush = 0x00000002,
iofrLogCapacityFlush = 0x00000004,
iofrLogSignalFlush = 0x00000008, // This seems to be capacity flush in disguise? from tcconst.hxx on iorpLGFlushSignal: "at least some times this is capacity flush ..." .. always? what's iorpLGCapacityFlush for?
iofrPersistClosingLogFile = 0x00000010,
iofrLogMaxRequired = 0x00000020,
iofrLogFlushAll = 0x00000040,
iofrLogTerm = 0x00000080,
iofrDbHdrUpdMaxRequired = 0x00000100,
iofrDbHdrUpdMinRequired = 0x00000200,
iofrDbUpdMinRequired = 0x00000400,
iofrCheckpointMinRequired = 0x00000800,
iofrCheckpointForceWrite = 0x00001000,
iofrDbHdrUpdLgposLastResize = 0x00002000,
iofrWriteBackFlushIfmpContext = 0x00004000, // [Default] Only original FFB call (pre the FFB conversion) for write-back DBs (for RW, paramEnableFileCache and no logging)
iofrFlushIfmpContext = 0x00008000,
iofrDbResize = 0x00010000, // database is being resized (grown or shrunk)
iofrUtility = 0x00020000, // trailer / header restore patching, and other misc stuff?
iofrIncReseedUtil = 0x00040000,
iofrSeparateHeaderUpdate = 0x00080000,
iofrFmInit = 0x00100000,
iofrFmFlush = 0x00200000,
iofrFmTerm = 0x00400000,
iofrDefensiveCloseFlush = 0x00800000,
iofrDefensiveErrorPath = 0x01000000,
iofrDefensiveFileHdrWrite = 0x02000000,
iofrOpeningFileFlush = 0x04000000,
iofrPagePatching = 0x08000000,
iofrBlockCache = 0x10000000,
iofrRBS = 0x20000000,
iofrRBSRevertUtil = 0x40000000,
// Reserved by OS Layer: = 0x80000000,
};
#endif // !_TCCONST_H
| 4,324 |
2,553 | <filename>scripts/run_deeptrio_test.py<gh_stars>1000+
# Copyright 2019 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant .run_deeptrio."""
import io
from unittest import mock
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import six
from deepvariant.opensource_only.scripts import run_deeptrio
FLAGS = flags.FLAGS
# pylint: disable=line-too-long
class RunDeeptrioTest(parameterized.TestCase):
def _create_all_commands_and_check_stdout(self, expected_stdout=None):
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
commands, postprocess_cmds = run_deeptrio.create_all_commands(
'/tmp/deeptrio_tmp_output')
# Confirm that these basic commands don't have extra messages printed out
# to stdout.
if expected_stdout is None:
self.assertEmpty(mock_stdout.getvalue())
else:
self.assertEqual(mock_stdout.getvalue(), expected_stdout)
return commands, postprocess_cmds
@parameterized.parameters('WGS', 'WES', 'PACBIO')
@flagsaver.flagsaver
def test_call_variants_postprocess_variants_commands(self, model_type):
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.output_gvcf_merged = 'your_gvcf_merged'
FLAGS.num_shards = 64
commands, postprocess_cmds = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[1], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--checkpoint "/opt/models/deeptrio/{}/child/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
commands[2], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent1.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--checkpoint "/opt/models/deeptrio/{}/parent/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
commands[3], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent2.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--checkpoint "/opt/models/deeptrio/{}/parent/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
postprocess_cmds[0], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--outfile "your_vcf_child" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/[email protected]" '
'--gvcf_outfile "your_gvcf_child"')
self.assertEqual(
postprocess_cmds[1], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent1.tfrecord.gz" '
'--outfile "your_vcf_parent1" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/[email protected]" '
'--gvcf_outfile "your_gvcf_parent1"')
self.assertEqual(
postprocess_cmds[2], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent2.tfrecord.gz" '
'--outfile "your_vcf_parent2" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/[email protected]" '
'--gvcf_outfile "your_gvcf_parent2"')
self.assertLen(commands, 4)
self.assertLen(postprocess_cmds, 3)
@parameterized.parameters('WGS', 'WES', 'PACBIO')
@flagsaver.flagsaver
def test_duo_call_variants_postprocess_variants_commands(self, model_type):
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_merged = 'your_gvcf_merged'
FLAGS.num_shards = 64
commands, postprocess_cmds = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[1], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--checkpoint "/opt/models/deeptrio/{}/child/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
commands[2], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent1.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--checkpoint "/opt/models/deeptrio/{}/parent/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
postprocess_cmds[0], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--outfile "your_vcf_child" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/[email protected]" '
'--gvcf_outfile "your_gvcf_child"')
self.assertEqual(
postprocess_cmds[1], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent1.tfrecord.gz" '
'--outfile "your_vcf_parent1" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/[email protected]" '
'--gvcf_outfile "your_gvcf_parent1"')
# pylint: disable=g-generic-assert
self.assertLen(commands, 3)
self.assertLen(postprocess_cmds, 2)
@parameterized.parameters(
('WGS', '--gvcf "/tmp/deeptrio_tmp_output/[email protected]" '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '),
('WES', '--gvcf "/tmp/deeptrio_tmp_output/[email protected]" '
'--pileup_image_height_child "100" '
'--pileup_image_height_parent "100" '),
('PACBIO', '--add_hp_channel '
'--alt_aligned_pileup "diff_channels" '
'--gvcf "/tmp/deeptrio_tmp_output/[email protected]" '
'--noparse_sam_aux_fields '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--pileup_image_width "199" '
'--norealign_reads '
'--nosort_by_haplotypes '
'--vsc_min_fraction_indels "0.12" '))
@flagsaver.flagsaver
def test_make_examples_commands_with_types(self, model_type,
extra_args_plus_gvcf):
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.output_gvcf_merged = 'your_gvcf_merged'
FLAGS.num_shards = 64
commands, _ = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[0], 'time seq 0 63 '
'| parallel -q --halt 2 --line-buffer '
'/opt/deepvariant/bin/deeptrio/make_examples '
'--mode calling '
'--ref "your_ref" '
'--reads_parent1 "your_bam_parent1" '
'--reads_parent2 "your_bam_parent2" '
'--reads "your_bam_child" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--sample_name "your_sample_child" '
'--sample_name_parent1 "your_sample_parent1" '
'--sample_name_parent2 "your_sample_parent2" '
'%s'
'--task {}' % extra_args_plus_gvcf)
@parameterized.parameters(
('WGS', '--gvcf "/tmp/deeptrio_tmp_output/[email protected]" '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '),
('WES', '--gvcf "/tmp/deeptrio_tmp_output/[email protected]" '
'--pileup_image_height_child "100" '
'--pileup_image_height_parent "100" '),
('PACBIO', '--add_hp_channel '
'--alt_aligned_pileup "diff_channels" '
'--gvcf "/tmp/deeptrio_tmp_output/[email protected]" '
'--noparse_sam_aux_fields '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--pileup_image_width "199" '
'--norealign_reads '
'--nosort_by_haplotypes '
'--vsc_min_fraction_indels "0.12" '))
@flagsaver.flagsaver
def test_duo_make_examples_commands_with_types(self, model_type,
extra_args_plus_gvcf):
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_merged = 'your_gvcf_merged'
FLAGS.num_shards = 64
commands, _ = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[0], 'time seq 0 63 '
'| parallel -q --halt 2 --line-buffer '
'/opt/deepvariant/bin/deeptrio/make_examples '
'--mode calling '
'--ref "your_ref" '
'--reads_parent1 "your_bam_parent1" '
'--reads "your_bam_child" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--sample_name "your_sample_child" '
'--sample_name_parent1 "your_sample_parent1" '
'%s'
'--task {}' % extra_args_plus_gvcf)
@parameterized.parameters(
(None, '--add_hp_channel '
'--alt_aligned_pileup "diff_channels" '
'--gvcf "/tmp/deeptrio_tmp_output/[email protected]" '
'--noparse_sam_aux_fields '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--pileup_image_width "199" '
'--norealign_reads '
'--nosort_by_haplotypes '
'--vsc_min_fraction_indels "0.12" ', None),
('alt_aligned_pileup="rows",vsc_min_fraction_indels=0.03',
'--add_hp_channel '
'--alt_aligned_pileup "rows" '
'--gvcf "/tmp/deeptrio_tmp_output/[email protected]" '
'--noparse_sam_aux_fields '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--pileup_image_width "199" '
'--norealign_reads '
'--nosort_by_haplotypes '
'--vsc_min_fraction_indels "0.03" ',
'\nWarning: --alt_aligned_pileup is previously set to diff_channels, '
'now to "rows".\n'
'\nWarning: --vsc_min_fraction_indels is previously set to 0.12, '
'now to 0.03.\n'),
)
@flagsaver.flagsaver
def test_pacbio_args_overwrite(self, make_examples_extra_args, expected_args,
expected_stdout):
"""Confirms that adding extra flags can overwrite the default from mode."""
FLAGS.model_type = 'PACBIO'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.regions = None
FLAGS.make_examples_extra_args = make_examples_extra_args
commands, _ = self._create_all_commands_and_check_stdout(expected_stdout)
self.assertEqual(
commands[0], 'time seq 0 63 | parallel -q --halt 2 --line-buffer '
'/opt/deepvariant/bin/deeptrio/make_examples --mode calling '
'--ref "your_ref" --reads_parent1 "your_bam_parent1" '
'--reads_parent2 "your_bam_parent2" '
'--reads "your_bam_child" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--sample_name "your_sample_child" '
'--sample_name_parent1 "your_sample_parent1" '
'--sample_name_parent2 "your_sample_parent2" '
'%s'
'--task {}' % expected_args)
@parameterized.parameters(
(None, ('sort_by_haplotypes=true,parse_sam_aux_fields=true'), True),
(True, ('sort_by_haplotypes=true,parse_sam_aux_fields=true'), False),
)
@flagsaver.flagsaver
def test_use_hp_information_conflicts(self, use_hp_information,
make_examples_extra_args, has_conflict):
"""Confirms that PacBio use_hp_information can conflict with HP args."""
FLAGS.model_type = 'PACBIO'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.regions = None
FLAGS.use_hp_information = use_hp_information
FLAGS.make_examples_extra_args = make_examples_extra_args
if has_conflict:
with six.assertRaisesRegex(self, ValueError,
'conflicts with other flags'):
run_deeptrio.create_all_commands('/tmp/deeptrio_tmp_output')
else:
# Otherwise, the command should run without rasing errors.
run_deeptrio.create_all_commands('/tmp/deeptrio_tmp_output')
@parameterized.parameters('WGS', 'WES')
@flagsaver.flagsaver
def test_use_hp_information_only_with_pacbio(self, model_type):
"""Confirms use_hp_information only works for."""
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.regions = None
FLAGS.use_hp_information = True
with six.assertRaisesRegex(
self, ValueError, '--use_hp_information can only be used with '
'--model_type="PACBIO"'):
run_deeptrio.create_all_commands('/tmp/deeptrio_tmp_output')
@parameterized.parameters(
('chr1:20-30', '--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--regions "chr1:20-30"'),
('chr1:20-30 chr2:100-200', '--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--regions "chr1:20-30 chr2:100-200"'),
("'chr1:20-30 chr2:100-200'", '--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
"--regions 'chr1:20-30 chr2:100-200'"),
)
def test_make_examples_regions(self, regions, expected_args):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.num_shards = 64
FLAGS.regions = regions
commands, _ = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[0], 'time seq 0 63 | parallel -q --halt 2 --line-buffer '
'/opt/deepvariant/bin/deeptrio/make_examples --mode calling '
'--ref "your_ref" --reads_parent1 "your_bam_parent1" '
'--reads_parent2 "your_bam_parent2" '
'--reads "your_bam_child" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--sample_name "your_sample_child" '
'--sample_name_parent1 "your_sample_parent1" '
'--sample_name_parent2 "your_sample_parent2" '
'%s '
'--task {}' % expected_args)
@flagsaver.flagsaver
def test_make_examples_extra_args_invalid(self):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.make_examples_extra_args = 'keep_secondary_alignments'
with six.assertRaisesRegex(self, ValueError, 'not enough values to unpack'):
_, _ = run_deeptrio.create_all_commands('/tmp/deeptrio_tmp_output')
@parameterized.parameters(
('batch_size=1024', '--batch_size "1024"'),
('batch_size=4096,'
'config_string="gpu_options: {per_process_gpu_memory_fraction: 0.5}"',
'--batch_size "4096" '
'--config_string "gpu_options: {per_process_gpu_memory_fraction: 0.5}"'),
)
@flagsaver.flagsaver
def test_call_variants_extra_args(self, call_variants_extra_args,
expected_args):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.call_variants_extra_args = call_variants_extra_args
commands, _ = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[1], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/[email protected]" '
'--checkpoint "/opt/models/deeptrio/wgs/child/model.ckpt" '
'%s' % expected_args)
@parameterized.parameters(
('qual_filter=3.0', '--qual_filter "3.0"'),)
@flagsaver.flagsaver
def test_postprocess_variants_extra_args(self,
postprocess_variants_extra_args,
expected_args):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.postprocess_variants_extra_args = postprocess_variants_extra_args
_, commands_post_process = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands_post_process[0],
'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--outfile "your_vcf_child" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/[email protected]" '
'--gvcf_outfile "your_gvcf_child" '
'%s' % expected_args)
@parameterized.parameters(
(True, 'vcf_stats_report=true', '--vcf_stats_report'),
(True, 'vcf_stats_report=false', '--novcf_stats_report'),
# These two cases demonstrate we might end up havig duplicated and
# potentially conflicting flags when using *extra_args.
(False, 'vcf_stats_report=true', '--novcf_stats_report --vcf_stats_report'
),
(False, 'vcf_stats_report=false',
'--novcf_stats_report --novcf_stats_report'),
)
@flagsaver.flagsaver
def test_postprocess_variants_duplicate_extra_args(
self, vcf_stats_report, postprocess_variants_extra_args,
expected_vcf_stats_report):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.vcf_stats_report = vcf_stats_report
FLAGS.postprocess_variants_extra_args = postprocess_variants_extra_args
_, commands_post_process = run_deeptrio.create_all_commands(
'/tmp/deeptrio_tmp_output')
self.assertEqual(
commands_post_process[0],
'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--outfile "your_vcf_child" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/[email protected]" '
'--gvcf_outfile "your_gvcf_child" '
'%s' % expected_vcf_stats_report)
if __name__ == '__main__':
absltest.main()
| 12,235 |
521 | /* $Id: tstSupVerify.cpp $ */
/** @file
* SUP Testcase - Test SUPR3HardenedVerifyPlugIn.
*/
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*********************************************************************************************************************************
* Header Files *
*********************************************************************************************************************************/
#include <VBox/sup.h>
#include <VBox/err.h>
#include <iprt/getopt.h>
#include <iprt/initterm.h>
#include <iprt/mem.h>
#include <iprt/message.h>
#include <iprt/path.h>
#include <iprt/stream.h>
int main(int argc, char **argv)
{
/*
* Init.
*/
int rc = RTR3InitExe(argc, &argv, 0);
if (RT_FAILURE(rc))
return RTMsgInitFailure(rc);
rc = SUPR3HardenedVerifyInit();
if (RT_FAILURE(rc))
return RTMsgErrorExit(RTEXITCODE_FAILURE, "SUPR3HardenedVerifyInit failed: %Rrc", rc);
/*
* Process arguments.
*/
static const RTGETOPTDEF s_aOptions[] =
{
{ "--dummy", 'd', RTGETOPT_REQ_NOTHING },
};
//bool fKeepLoaded = false;
int ch;
RTGETOPTUNION ValueUnion;
RTGETOPTSTATE GetState;
RTGetOptInit(&GetState, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1, 0);
while ((ch = RTGetOpt(&GetState, &ValueUnion)))
{
switch (ch)
{
case VINF_GETOPT_NOT_OPTION:
{
RTERRINFOSTATIC ErrInfo;
RTErrInfoInitStatic(&ErrInfo);
rc = SUPR3HardenedVerifyPlugIn(ValueUnion.psz, &ErrInfo.Core);
if (RT_SUCCESS(rc))
RTMsgInfo("SUPR3HardenedVerifyPlugIn: %Rrc for '%s'\n", rc, ValueUnion.psz);
else
RTMsgError("SUPR3HardenedVerifyPlugIn: %Rrc for '%s' ErrInfo: %s\n",
rc, ValueUnion.psz, ErrInfo.Core.pszMsg);
break;
}
case 'h':
RTPrintf("%s [dll1 [dll2...]]\n", argv[0]);
return 1;
case 'V':
RTPrintf("$Revision: 118839 $\n");
return 0;
default:
return RTGetOptPrintError(ch, &ValueUnion);
}
}
return 0;
}
| 1,450 |
1,668 | package org.elixir_lang.psi;
import org.elixir_lang.psi.call.Call;
import org.elixir_lang.psi.call.MaybeExported;
import org.elixir_lang.psi.call.StubBased;
import org.elixir_lang.psi.call.arguments.star.NoParenthesesOneArgument;
import org.elixir_lang.psi.qualification.Unqualified;
import org.jetbrains.annotations.Contract;
import org.jetbrains.annotations.NotNull;
/**
* IDENTIFIER noParenthesesOneArgument
*/
public interface UnqualifiedNoParenthesesCall<Stub extends org.elixir_lang.psi.stub.call.Stub>
extends Call, MaybeExported, NoParenthesesOneArgument, Quotable, StubBased<Stub>, Unqualified {
@Contract(pure = true)
@NotNull
@Override
String functionName();
}
| 253 |
8,890 | /*!
* Copyright (c) 2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA_EXP
#ifndef LIGHTGBM_CUDA_COLUMN_DATA_HPP_
#define LIGHTGBM_CUDA_COLUMN_DATA_HPP_
#include <LightGBM/config.h>
#include <LightGBM/cuda/cuda_utils.h>
#include <LightGBM/bin.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <vector>
namespace LightGBM {
class CUDAColumnData {
public:
CUDAColumnData(const data_size_t num_data, const int gpu_device_id);
~CUDAColumnData();
void Init(const int num_columns,
const std::vector<const void*>& column_data,
const std::vector<BinIterator*>& column_bin_iterator,
const std::vector<uint8_t>& column_bit_type,
const std::vector<uint32_t>& feature_max_bin,
const std::vector<uint32_t>& feature_min_bin,
const std::vector<uint32_t>& feature_offset,
const std::vector<uint32_t>& feature_most_freq_bin,
const std::vector<uint32_t>& feature_default_bin,
const std::vector<uint8_t>& feature_missing_is_zero,
const std::vector<uint8_t>& feature_missing_is_na,
const std::vector<uint8_t>& feature_mfb_is_zero,
const std::vector<uint8_t>& feature_mfb_is_na,
const std::vector<int>& feature_to_column);
const void* GetColumnData(const int column_index) const { return data_by_column_[column_index]; }
void CopySubrow(const CUDAColumnData* full_set, const data_size_t* used_indices, const data_size_t num_used_indices);
void* const* cuda_data_by_column() const { return cuda_data_by_column_; }
uint32_t feature_min_bin(const int feature_index) const { return feature_min_bin_[feature_index]; }
uint32_t feature_max_bin(const int feature_index) const { return feature_max_bin_[feature_index]; }
uint32_t feature_offset(const int feature_index) const { return feature_offset_[feature_index]; }
uint32_t feature_most_freq_bin(const int feature_index) const { return feature_most_freq_bin_[feature_index]; }
uint32_t feature_default_bin(const int feature_index) const { return feature_default_bin_[feature_index]; }
uint8_t feature_missing_is_zero(const int feature_index) const { return feature_missing_is_zero_[feature_index]; }
uint8_t feature_missing_is_na(const int feature_index) const { return feature_missing_is_na_[feature_index]; }
uint8_t feature_mfb_is_zero(const int feature_index) const { return feature_mfb_is_zero_[feature_index]; }
uint8_t feature_mfb_is_na(const int feature_index) const { return feature_mfb_is_na_[feature_index]; }
const uint32_t* cuda_feature_min_bin() const { return cuda_feature_min_bin_; }
const uint32_t* cuda_feature_max_bin() const { return cuda_feature_max_bin_; }
const uint32_t* cuda_feature_offset() const { return cuda_feature_offset_; }
const uint32_t* cuda_feature_most_freq_bin() const { return cuda_feature_most_freq_bin_; }
const uint32_t* cuda_feature_default_bin() const { return cuda_feature_default_bin_; }
const uint8_t* cuda_feature_missing_is_zero() const { return cuda_feature_missing_is_zero_; }
const uint8_t* cuda_feature_missing_is_na() const { return cuda_feature_missing_is_na_; }
const uint8_t* cuda_feature_mfb_is_zero() const { return cuda_feature_mfb_is_zero_; }
const uint8_t* cuda_feature_mfb_is_na() const { return cuda_feature_mfb_is_na_; }
const int* cuda_feature_to_column() const { return cuda_feature_to_column_; }
const uint8_t* cuda_column_bit_type() const { return cuda_column_bit_type_; }
int feature_to_column(const int feature_index) const { return feature_to_column_[feature_index]; }
uint8_t column_bit_type(const int column_index) const { return column_bit_type_[column_index]; }
private:
template <bool IS_SPARSE, bool IS_4BIT, typename BIN_TYPE>
void InitOneColumnData(const void* in_column_data, BinIterator* bin_iterator, void** out_column_data_pointer);
void LaunchCopySubrowKernel(void* const* in_cuda_data_by_column);
void InitColumnMetaInfo();
void ResizeWhenCopySubrow(const data_size_t num_used_indices);
int num_threads_;
data_size_t num_data_;
int num_columns_;
std::vector<uint8_t> column_bit_type_;
std::vector<uint32_t> feature_min_bin_;
std::vector<uint32_t> feature_max_bin_;
std::vector<uint32_t> feature_offset_;
std::vector<uint32_t> feature_most_freq_bin_;
std::vector<uint32_t> feature_default_bin_;
std::vector<uint8_t> feature_missing_is_zero_;
std::vector<uint8_t> feature_missing_is_na_;
std::vector<uint8_t> feature_mfb_is_zero_;
std::vector<uint8_t> feature_mfb_is_na_;
void** cuda_data_by_column_;
std::vector<int> feature_to_column_;
std::vector<void*> data_by_column_;
uint8_t* cuda_column_bit_type_;
uint32_t* cuda_feature_min_bin_;
uint32_t* cuda_feature_max_bin_;
uint32_t* cuda_feature_offset_;
uint32_t* cuda_feature_most_freq_bin_;
uint32_t* cuda_feature_default_bin_;
uint8_t* cuda_feature_missing_is_zero_;
uint8_t* cuda_feature_missing_is_na_;
uint8_t* cuda_feature_mfb_is_zero_;
uint8_t* cuda_feature_mfb_is_na_;
int* cuda_feature_to_column_;
// used when bagging with subset
data_size_t* cuda_used_indices_;
data_size_t num_used_indices_;
data_size_t cur_subset_buffer_size_;
};
} // namespace LightGBM
#endif // LIGHTGBM_CUDA_COLUMN_DATA_HPP_
#endif // USE_CUDA_EXP
| 2,140 |
399 | package org.cryptocoinpartners.bin;
import java.util.concurrent.Semaphore;
import org.cryptocoinpartners.report.Report;
import org.cryptocoinpartners.report.TableOutput;
import org.cryptocoinpartners.util.IoUtil;
import com.beust.jcommander.Parameter;
public abstract class ReportRunMode extends RunMode {
@Override
public void run(Semaphore semaphore) {
output(getReport().runReport());
if (semaphore != null)
semaphore.release();
}
@Override
public void run() {
Semaphore semaphore = null;
run(semaphore);
}
protected void output(TableOutput tableOutput) {
if (csv != null)
IoUtil.writeCsv(tableOutput, csv);
else
IoUtil.outputAscii(tableOutput);
}
protected abstract Report getReport();
@Parameter(names = "-csv", description = "specifies a file for output in CSV format")
private final String csv = null;
}
| 368 |
506 | <reponame>Ashindustry007/competitive-programming
// https://www.hackerrank.com/challenges/merge-two-sorted-linked-lists
Node* MergeLists(Node *headA, Node* headB) {
if (headA == NULL && headB == NULL) return NULL;
if (headA == NULL) return headB;
if (headB == NULL) return headA;
if (headA->data < headB->data) {
headA->next = MergeLists(headA->next, headB);
return headA;
}
headB->next = MergeLists(headA, headB->next);
return headB;
}
| 174 |
522 | package algs.blog.multithread;
import java.util.Random;
import junit.framework.TestCase;
import algs.model.IMultiPoint;
import algs.blog.multithread.nearestNeighbor.bruteforce.MultiThreadedBruteForceNearestNeighbor;
import algs.model.nd.Hyperpoint;
import algs.model.problems.nearestNeighbor.BruteForceNearestNeighbor;
public class TestBruteForce extends TestCase {
/** random number generator. */
static Random rGen = new Random();
/**
* generate array of n d-dimensional points whose coordinates are
* values in the range 0 .. 1
*/
public static IMultiPoint[] randomPoints (int n, int d) {
IMultiPoint points[] = new IMultiPoint[n];
for (int i = 0; i < n; i++) {
StringBuilder sb = new StringBuilder();
for (int j = 0; j < d; j++) {
sb.append(rGen.nextDouble());
if (j < d-1) { sb.append (","); }
}
points[i] = new Hyperpoint(sb.toString());
}
return points;
}
public void testThreading() {
int numSearches = 256;
for (int d = 2; d < 10; d++) {
System.out.println("dimension " + d + "...");
// Perform a number of searches drawn from same [0,1] uniformly.
IMultiPoint[] searchPoints = randomPoints (numSearches, d);
for (int n = 16; n <= 32768; n *= 2) {
// create n random points in d dimensions drawn from [0,1] uniformly
IMultiPoint[] points = randomPoints (n, d);
BruteForceNearestNeighbor bnn = new BruteForceNearestNeighbor(points);
MultiThreadedBruteForceNearestNeighbor mt_bnn = new MultiThreadedBruteForceNearestNeighbor(points);
IMultiPoint results_n[] = new IMultiPoint[searchPoints.length];
IMultiPoint results_mt[] = new IMultiPoint[searchPoints.length];
// compute native BF
for (int t = 0; t < searchPoints.length; t++) {
results_n[t] = bnn.nearest(searchPoints[t]);
}
// compute multi-thread BF
for (int t = 0; t < searchPoints.length; t++) {
results_mt[t] = mt_bnn.nearest(searchPoints[t]);
}
// assert same
for (int i = 0; i < results_n.length; i++) {
assertEquals (results_n[i], results_mt[i]);
}
}
}
}
}
| 922 |
945 | <reponame>holtenko/iotdb<gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.metrics.dropwizard.type;
import org.apache.iotdb.metrics.type.Rate;
import com.codahale.metrics.Meter;
public class DropwizardRate implements Rate {
Meter meter;
/** read-only meter */
com.codahale.metrics.Timer timer;
public DropwizardRate(Meter meter) {
this.meter = meter;
this.timer = null;
}
public DropwizardRate(com.codahale.metrics.Timer timer) {
this.timer = timer;
this.meter = null;
}
@Override
public long getCount() {
if (meter != null) {
return meter.getCount();
}
return timer.getCount();
}
@Override
public double getOneMinuteRate() {
if (meter != null) {
return meter.getOneMinuteRate();
}
return timer.getOneMinuteRate();
}
@Override
public double getMeanRate() {
if (meter != null) {
return meter.getMeanRate();
}
return timer.getMeanRate();
}
@Override
public double getFiveMinuteRate() {
if (meter != null) {
return meter.getFiveMinuteRate();
}
return timer.getFiveMinuteRate();
}
@Override
public double getFifteenMinuteRate() {
if (meter != null) {
return meter.getFifteenMinuteRate();
}
return timer.getFifteenMinuteRate();
}
@Override
public void mark() {
if (meter != null) {
meter.mark();
}
}
@Override
public void mark(long n) {
if (meter != null) {
meter.mark(n);
}
}
}
| 776 |
514 | <gh_stars>100-1000
/* Copyright (c) 2018-2019, Arm Limited and Contributors
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge,
* to any person obtaining a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <memory>
#include <string>
#include <typeinfo>
#include <vector>
#include <volk.h>
#include "core/image.h"
#include "core/image_view.h"
#include "scene_graph/component.h"
namespace vkb
{
namespace sg
{
/**
* @param format Vulkan format
* @return Whether the vulkan format is ASTC
*/
bool is_astc(VkFormat format);
/**
* @brief Mipmap information
*/
struct Mipmap
{
/// Mipmap level
uint32_t level = 0;
/// Byte offset used for uploading
uint32_t offset = 0;
/// Width depth and height of the mipmap
VkExtent3D extent = {0, 0, 0};
};
class Image : public Component
{
public:
Image(const std::string &name, std::vector<uint8_t> &&data = {}, std::vector<Mipmap> &&mipmaps = {{}});
static std::unique_ptr<Image> load(const std::string &name, const std::string &uri);
virtual ~Image() = default;
virtual std::type_index get_type() override;
const std::vector<uint8_t> &get_data() const;
void clear_data();
VkFormat get_format() const;
const VkExtent3D &get_extent() const;
const std::vector<Mipmap> &get_mipmaps() const;
void generate_mipmaps();
void create_vk_image(Device &device);
const core::Image &get_vk_image() const;
const core::ImageView &get_vk_image_view() const;
protected:
std::vector<uint8_t> &get_mut_data();
void set_data(const uint8_t *raw_data, size_t size);
void set_format(VkFormat format);
void set_width(uint32_t width);
void set_height(uint32_t height);
void set_depth(uint32_t depth);
Mipmap &get_mipmap(size_t index);
std::vector<Mipmap> &get_mut_mipmaps();
private:
std::vector<uint8_t> data;
VkFormat format{VK_FORMAT_UNDEFINED};
std::vector<Mipmap> mipmaps{{}};
std::unique_ptr<core::Image> vk_image;
std::unique_ptr<core::ImageView> vk_image_view;
};
} // namespace sg
} // namespace vkb
| 1,003 |
3,702 | <filename>src/yb/tools/fs_list-tool.cc
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// The following only applies to changes made to this file as part of YugaByte development.
//
// Portions Copyright (c) YugaByte, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations
// under the License.
//
// Tool to list local files and directories
#include "yb/tools/fs_tool.h"
#include <iostream>
#include <sstream>
#include <vector>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "yb/util/flags.h"
#include "yb/util/logging.h"
DEFINE_bool(verbose, false,
"Print additional information (e.g., log segment headers)");
namespace yb {
namespace tools {
using std::string;
using std::vector;
namespace {
enum CommandType {
FS_TREE = 1,
LIST_LOGS = 2,
LIST_TABLETS = 3,
};
// TODO: extract and generalized the "verb" handling code with other
// tools such that it can be shared with other tools.
struct CommandHandler {
CommandType type_;
string name_;
string desc_;
CommandHandler(CommandType type, string name, string desc)
: type_(type), name_(std::move(name)), desc_(std::move(desc)) {}
};
const vector<CommandHandler> kCommandHandlers = {
CommandHandler(FS_TREE, "tree", "Print out a file system tree." ),
CommandHandler(LIST_LOGS, "list_logs",
"List file system logs (optionally accepts a tablet id)."),
CommandHandler(LIST_TABLETS, "list_tablets", "List tablets." ) };
void PrintUsageToStream(const string& prog_name, std::ostream* out) {
*out << "Usage: " << prog_name << " [-verbose] "
<< "-fs_wal_dirs <dirs> -fs_data_dirs <dirs> <command> [option] "
<< std::endl << std::endl
<< "Commands: " << std::endl;
for (const CommandHandler& handler : kCommandHandlers) {
*out << handler.name_ << ": " << handler.desc_ << std::endl;
}
}
void Usage(const string& prog_name, const string& msg) {
std::cerr << "Error " << prog_name << ": " << msg << std::endl
<< std::endl;
PrintUsageToStream(prog_name, &std::cerr);
}
bool ValidateCommand(int argc, char** argv, CommandType* out) {
if (argc < 2) {
Usage(argv[0], "At least one command must be specified!");
return false;
}
for (const CommandHandler& handler : kCommandHandlers) {
if (argv[1] == handler.name_) {
*out = handler.type_;
return true;
}
}
Usage("Invalid command specified ", argv[1]);
return false;
}
} // anonymous namespace
static int FsListToolMain(int argc, char** argv) {
FLAGS_logtostderr = 1;
std::stringstream usage_str;
PrintUsageToStream(argv[0], &usage_str);
google::SetUsageMessage(usage_str.str());
ParseCommandLineFlags(&argc, &argv, true);
InitGoogleLoggingSafe(argv[0]);
CommandType cmd;
if (!ValidateCommand(argc, argv, &cmd)) {
return 2;
}
FsTool fs_tool(FLAGS_verbose ? FsTool::HEADERS_ONLY : FsTool::MINIMUM);
CHECK_OK_PREPEND(fs_tool.Init(), "Error initializing file system tool");
switch (cmd) {
case FS_TREE: {
CHECK_OK(fs_tool.FsTree());
break;
}
case LIST_LOGS: {
if (argc > 2) {
CHECK_OK(fs_tool.ListLogSegmentsForTablet(argv[2]));
} else {
CHECK_OK(fs_tool.ListAllLogSegments());
}
break;
}
case LIST_TABLETS: {
CHECK_OK(fs_tool.ListAllTablets());
break;
}
}
return 0;
}
} // namespace tools
} // namespace yb
int main(int argc, char** argv) {
return yb::tools::FsListToolMain(argc, argv);
}
| 1,656 |
1,986 | #include <freertos/FreeRTOS.h>
#include <freertos/task.h>
extern void bt_task(void *ignore);
void app_main(void)
{
xTaskCreatePinnedToCore(&bt_task, "btTask", 2048, NULL, 5, NULL, 0);
}
| 81 |
678 | <reponame>bzxy/cydia
/**
* This header is generated by class-dump-z 0.2b.
*
* Source: /System/Library/PrivateFrameworks/SportsWorkout.framework/SportsWorkout
*/
#import <SportsWorkout/SWRunWorkoutPreset.h>
@interface SWRunWorkoutPresetNoGoal : SWRunWorkoutPreset {
}
+ (id)newPresetGoalNone; // 0x87e1
- (void)_fireOnDemandPromptForWorkoutController:(id)workoutController target:(id)target; // 0x8ac9
- (void)_fireWorkoutSummaryPromptForWorkoutController:(id)workoutController target:(id)target; // 0x88b1
@end
| 187 |
872 | package com.github.glomadrian.mvpcleanarchitecture.ui.presenter;
import com.github.glomadrian.mvpcleanarchitecture.domain.model.MarvelCharacter;
import com.github.glomadrian.mvpcleanarchitecture.ui.view.ModelInfoView;
import com.github.glomadrian.mvpcleanarchitecture.ui.viewmodel.CharacterInfoViewModel;
/**
* @author glomadrian
*/
public class CharacterInfoPresenterImp implements CharacterInfoPresenter {
ModelInfoView modelInfoView;
@Override
public void initialize() {
//Do nothing
}
@Override
public void onViewCreate() {
//Do nothing
}
@Override
public void onViewResume() {
//Do nothing
}
@Override
public void onViewDestroy() {
//Do nothing
}
@Override
public void setView(ModelInfoView view) {
this.modelInfoView = view;
}
@Override
public void onCharacter(MarvelCharacter marvelCharacter) {
//Parse the character to characterInfoModelView and call the view to show
CharacterInfoViewModel characterInfoViewModel = new CharacterInfoViewModel(marvelCharacter);
modelInfoView.showCharacterInfo(characterInfoViewModel);
}
}
| 415 |
678 | <filename>WeChat-Headers/NSMutableData-AppendUTF8.h
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "NSMutableData.h"
@interface NSMutableData (AppendUTF8)
- (void)appendUTF8Format:(id)arg1;
- (void)appendUTF8String:(id)arg1;
@end
| 134 |
22,481 | <reponame>andersop91/core
"""Basic checks for HomeKit button."""
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
from tests.components.homekit_controller.common import Helper, setup_test_component
def create_switch_with_setup_button(accessory):
"""Define setup button characteristics."""
service = accessory.add_service(ServicesTypes.OUTLET)
setup = service.add_char(CharacteristicsTypes.Vendor.HAA_SETUP)
setup.value = ""
setup.format = "string"
cur_state = service.add_char(CharacteristicsTypes.ON)
cur_state.value = True
return service
async def test_press_button(hass):
"""Test a switch service that has a button characteristic is correctly handled."""
helper = await setup_test_component(hass, create_switch_with_setup_button)
# Helper will be for the primary entity, which is the outlet. Make a helper for the button.
energy_helper = Helper(
hass,
"button.testdevice_setup",
helper.pairing,
helper.accessory,
helper.config_entry,
)
outlet = energy_helper.accessory.services.first(service_type=ServicesTypes.OUTLET)
setup = outlet[CharacteristicsTypes.Vendor.HAA_SETUP]
await hass.services.async_call(
"button",
"press",
{"entity_id": "button.testdevice_setup"},
blocking=True,
)
assert setup.value == "#HAA@trcmd"
| 509 |
2,151 | <filename>third_party/blink/renderer/core/html/custom/v0_custom_element_registry.h
/*
* Copyright (C) 2012 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of Google Inc. nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_HTML_CUSTOM_V0_CUSTOM_ELEMENT_REGISTRY_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_HTML_CUSTOM_V0_CUSTOM_ELEMENT_REGISTRY_H_
#include "base/macros.h"
#include "third_party/blink/renderer/core/html/custom/v0_custom_element.h"
#include "third_party/blink/renderer/core/html/custom/v0_custom_element_definition.h"
#include "third_party/blink/renderer/core/html/custom/v0_custom_element_descriptor.h"
#include "third_party/blink/renderer/core/html/custom/v0_custom_element_descriptor_hash.h"
#include "third_party/blink/renderer/platform/wtf/hash_map.h"
#include "third_party/blink/renderer/platform/wtf/hash_set.h"
#include "third_party/blink/renderer/platform/wtf/text/atomic_string.h"
#include "third_party/blink/renderer/platform/wtf/text/atomic_string_hash.h"
namespace blink {
class CustomElementRegistry;
class ExceptionState;
class V0CustomElementConstructorBuilder;
class V0CustomElementRegistry final {
DISALLOW_NEW();
public:
void Trace(blink::Visitor*);
void DocumentWasDetached() { document_was_detached_ = true; }
protected:
friend class V0CustomElementRegistrationContext;
V0CustomElementRegistry() : document_was_detached_(false) {}
V0CustomElementDefinition* RegisterElement(
Document*,
V0CustomElementConstructorBuilder*,
const AtomicString& name,
V0CustomElement::NameSet valid_names,
ExceptionState&);
V0CustomElementDefinition* Find(const V0CustomElementDescriptor&) const;
bool NameIsDefined(const AtomicString& name) const;
void SetV1(const CustomElementRegistry*);
private:
bool V1NameIsDefined(const AtomicString& name) const;
typedef HeapHashMap<V0CustomElementDescriptor,
Member<V0CustomElementDefinition>>
DefinitionMap;
DefinitionMap definitions_;
HashSet<AtomicString> registered_type_names_;
Member<const CustomElementRegistry> v1_;
bool document_was_detached_;
DISALLOW_COPY_AND_ASSIGN(V0CustomElementRegistry);
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_HTML_CUSTOM_V0_CUSTOM_ELEMENT_REGISTRY_H_
| 1,247 |
1,835 | <filename>specification-impl/src/main/java/cc/cc1234/client/curator/CuratorZookeeperConnection.java
package cc.cc1234.client.curator;
import cc.cc1234.specification.connection.ZookeeperConnection;
import cc.cc1234.specification.listener.ZookeeperNodeListener;
import cc.cc1234.specification.node.NodeMode;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.api.CreateBuilder;
import org.apache.curator.framework.api.DeleteBuilder;
import org.apache.curator.framework.recipes.cache.TreeCache;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
public class CuratorZookeeperConnection implements ZookeeperConnection<CuratorFramework> {
private static final Logger log = LoggerFactory.getLogger(CuratorZookeeperConnection.class);
private final CuratorFramework curatorFramework;
private final TreeCache treeCache;
private AtomicBoolean isSync = new AtomicBoolean(false);
public CuratorZookeeperConnection(CuratorFramework curatorFramework) {
this.curatorFramework = curatorFramework;
treeCache = new TreeCache(curatorFramework, "/");
}
@Override
public void create(String path, String data, boolean recursive, NodeMode mode) throws Exception {
final CreateBuilder createBuilder = getClient().create();
final CreateMode createMode = CreateMode.valueOf(mode.name());
if (recursive) {
createBuilder.creatingParentsIfNeeded()
.withMode(createMode)
.forPath(path, data.getBytes());
} else {
createBuilder.withMode(createMode)
.forPath(path, data.getBytes());
}
}
@Override
public void delete(String path, boolean recursive) throws Exception {
final DeleteBuilder deleteBuilder = getClient().delete();
if (recursive) {
deleteBuilder.deletingChildrenIfNeeded().forPath(path);
} else {
deleteBuilder.forPath(path);
}
}
@Override
public void deleteAsync(List<String> pathList) throws Exception {
for (String s : pathList) {
getClient()
.delete()
.deletingChildrenIfNeeded()
.inBackground()
.forPath(s);
}
}
@Override
public Stat setData(String path, String data) throws Exception {
return getClient().setData().forPath(path, data.getBytes());
}
@Override
public void close() {
treeCache.close();
curatorFramework.close();
isSync.set(false);
}
@Override
public CuratorFramework getClient() {
return this.curatorFramework;
}
@Override
public void sync(List<ZookeeperNodeListener> listeners) {
final String server = curatorFramework.getZookeeperClient().getCurrentConnectionString();
if (!isSync.get()) {
log.debug("begin to sync tree node from {}", server);
treeCache.getListenable().addListener(new CuratorTreeCacheListener(listeners));
try {
treeCache.start();
isSync.set(true);
} catch (Exception e) {
log.error("sync zookeeper node error", e);
treeCache.close();
}
} else {
log.info("ignore sync operation, because of {} has been sync", server);
}
}
}
| 1,438 |
599 | <gh_stars>100-1000
package com.jlmd.android.newfilmsmvp.api.moviedetails.model;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class BelongsToCollection {
@Expose private Integer id;
@Expose private String name;
@SerializedName("poster_path") @Expose private String posterPath;
@SerializedName("backdrop_path") @Expose private String backdropPath;
/**
* @return The id
*/
public Integer getId() {
return id;
}
/**
* @param id The id
*/
public void setId(Integer id) {
this.id = id;
}
/**
* @return The name
*/
public String getName() {
return name;
}
/**
* @param name The name
*/
public void setName(String name) {
this.name = name;
}
/**
* @return The posterPath
*/
public String getPosterPath() {
return posterPath;
}
/**
* @param posterPath The poster_path
*/
public void setPosterPath(String posterPath) {
this.posterPath = posterPath;
}
/**
* @return The backdropPath
*/
public String getBackdropPath() {
return backdropPath;
}
/**
* @param backdropPath The backdrop_path
*/
public void setBackdropPath(String backdropPath) {
this.backdropPath = backdropPath;
}
}
| 453 |
469 | <gh_stars>100-1000
/* Poping */
/* 1 bit */
#define Pop0 1
/* 2 bits */
#define Pop1 2
/* 3 bits */
#define PopI 8
/* 1 bit */
#define PopI8 64
/* 1 bit */
#define Pop8 128
/* 1 bit */
#define PopR4 256
/* 1 bit */
#define PopR8 512
/* 1 bit */
#define PopRef 1024
/* 1 bit */
#define VarPop 2048
/* Pushing */
#define Push0 1
#define PushI 2
#define PushI8 4
#define PushR4 8
#define PushR8 16
#define PushRef 32
#define VarPush 64
#define Push1 128
/*
* dis-cil.c: Disassembles CIL byte codes
*
* Author:
* <NAME> (<EMAIL>)
*
* (C) 2001 Ximian, Inc.
*/
#include <config.h>
#include <glib.h>
#include <stdio.h>
#include "meta.h"
#include "dump.h"
#include "dis-cil.h"
/* Poping */
/* 1 bit */
#define Pop0 1
/* 2 bits */
#define Pop1 2
/* 3 bits */
#define PopI 8
/* 1 bit */
#define PopI8 64
/* 1 bit */
#define Pop8 128
/* 1 bit */
#define PopR4 256
/* 1 bit */
#define PopR8 512
/* 1 bit */
#define PopRef 1024
/* 1 bit */
#define VarPop 2048
/* Pushing */
#define Push0 1
#define PushI 2
#define PushI8 4
#define PushR4 8
#define PushR8 16
#define PushRef 32
#define VarPush 64
#define Push1 128
enum {
InlineBrTarget,
InlineField,
InlineI,
InlineI8,
InlineMethod,
InlineNone,
InlineR,
InlineSig,
InlineString,
InlineSwitch,
InlineTok,
InlineType,
InlineVar,
ShortInlineBrTarget,
ShortInlineI,
ShortInlineR,
ShortInlineVar
};
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
{ b, c, d, e, g, h, i },
typedef struct {
char *name;
int pop, push;
int argument;
int bytes;
unsigned char o1, o2;
} opcode_t;
static opcode_t opcodes [300] = {
#include "mono/cil/opcode.def"
};
void
disassemble_cil (MonoMetadata *m, const unsigned char *start, int size)
{
const unsigned char *end = start + size;
const unsigned char *ptr = start;
opcode_t *entry;
while (ptr < end){
if (*ptr == 0xfe){
ptr++;
entry = &opcodes [*ptr + 256];
} else
entry = &opcodes [*ptr];
ptr++;
fprintf (output, "\tIL_%04x: %s ", ptr - start, entry->name);
switch (entry->argument){
case InlineBrTarget: {
gint target = *(gint32 *) ptr;
fprintf (output, "IL_%04x", ptr + 4 + target);
ptr += 4;
break;
}
case InlineField: {
token = *(guint32 *) ptr;
fprintf (output, "fieldref-0x%08x", token);
ptr += 4;
break;
}
case InlineI: {
int value = *(int *) ptr;
fprintf (output, "%d", value);
ptr += 4;
break;
}
case InlineI8: {
gint64 top = *(guint64 *) value;
fprintf (output, "%ld", top);
ptr += 8;
break;
}
case InlineMethod: {
token = *(guint32 *) ptr;
fprintf (output, "method-0x%08x", token);
ptr += 4;
break;
}
case InlineNone:
break;
case InlineR: {
double r = *(double *) ptr;
fprintf (output, "%g", r);
ptr += 8;
break;
}
case InlineSig: {
guint32 token = *(guint32 *) ptr;
fprintf (output, "signature-0x%08x", token);
ptr += 4;
break;
}
case InlineString: {
guint32 token = *(guint32 *) ptr;
fprintf (output, "string-%0x08x", token);
ptr += 4;
break;
}
case InlineSwitch: {
guint32 count = *(guint32 *) ptr;
guint32 i;
ptr += 4;
fprintf (output, "(\n\t\t\t");
for (i = 0; i < count; i++){
fprintf (output, "IL_%x", *(guint32 *) ptr);
ptr += 4;
}
fprintf (output, "\t\t\t)");
break;
}
case InlineTok: {
guint32 token = *(guint32 *) ptr;
fprintf (output, "TOKEN_%08x", token);
ptr += 4;
break;
}
case InlineType: {
guint32 token = *(guint32 *) ptr;
fprintf (output, "Type-%08x", token);
ptr += 4;
break;
}
case InlineVar: {
gint16 var_idx = *(gint16 *) ptr;
fprintf (output, "variable-%d\n", var_idx);
ptr += 2;
break;
}
case ShortInlineBrTarget: {
signed char x = *ptr;
fprintf (output, "IL_%04x", ptr - start + 1 + x);
ptr++:
break;
}
case ShortInlineI: {
char x = *ptr;
fprintf (output, "0x%02x", x);
ptr++;
break;
}
case ShortInlineR: {
float f = *(float *) ptr;
fprintf (output, "%g", (double) f);
ptr += 4;
break;
}
case ShortInlineVar: {
signed char x = *ptr;
fprintf (output, "Varidx-%d", (int) x);
ptr++;
break;
}m
}
fprintf (output, "\n");
}
}
| 2,032 |
8,273 | <gh_stars>1000+
// Copyright 2010-2021 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/cliques.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "ortools/base/hash.h"
namespace operations_research {
namespace {
// Encapsulates graph() to make all nodes self-connected.
inline bool Connects(std::function<bool(int, int)> graph, int i, int j) {
return i == j || graph(i, j);
}
// Implements the recursive step of the Bron-Kerbosch algorithm with pivoting.
// - graph is a callback such that graph->Run(i, j) returns true iff there is an
// arc between i and j.
// - callback is a callback called for all maximal cliques discovered by the
// algorithm.
// - input_candidates is an array that contains the list of nodes connected to
// all nodes in the current clique. It is composed of two parts; the first
// part contains the "not" set (nodes that were already processed and must not
// be added to the clique - see the description of the algorithm in the
// paper), and nodes that are candidates for addition. The candidates from the
// "not" set are at the beginning of the array.
// - first_candidate_index elements is the index of the first candidate that is
// not in the "not" set (which is also the number of candidates in the "not"
// set).
// - num_input_candidates is the number of elements in input_candidates,
// including both the "not" set and the actual candidates.
// - current_clique is the current clique discovered by the algorithm.
// - stop is a stopping condition for the algorithm; if the value it points to
// is true, the algorithm stops further exploration and returns.
// TODO(user) : rewrite this algorithm without recursion.
void Search(std::function<bool(int, int)> graph,
std::function<bool(const std::vector<int>&)> callback,
int* input_candidates, int first_candidate_index,
int num_input_candidates, std::vector<int>* current_clique,
bool* stop) {
// The pivot is a node from input_candidates that is disconnected from the
// minimal number of nodes in the actual candidates (excluding the "not" set);
// the algorithm then selects only candidates that are disconnected from the
// pivot (and the pivot itself), to reach the termination condition as quickly
// as possible (see the original paper for more details).
int pivot = 0;
// A node that is disconnected from the selected pivot. This node is selected
// during the pivot matching phase to speed up the first iteration of the
// recursive call.
int disconnected_node = 0;
// The number of candidates (that are not in "not") disconnected from the
// selected pivot. The value is computed during pivot selection. In the
// "recursive" phase, we only need to do explore num_disconnected_candidates
// nodes, because after this step, all remaining candidates will all be
// connected to the pivot node (which is in "not"), so they can't form a
// maximal clique.
int num_disconnected_candidates = num_input_candidates;
// If the selected pivot is not in "not", we need to process one more
// candidate (the pivot itself). pre_increment is added to
// num_disconnected_candidates to compensate for this fact.
int pre_increment = 0;
// Find Pivot.
for (int i = 0; i < num_input_candidates && num_disconnected_candidates != 0;
++i) {
int pivot_candidate = input_candidates[i];
// Count is the number of candidates (not including nodes in the "not" set)
// that are disconnected from the pivot candidate.
int count = 0;
// The index of a candidate node that is not connected to pivot_candidate.
// This node will be used to quickly start the nested iteration (we keep
// track of the index so that we don't have to find a node that is
// disconnected from the pivot later in the iteration).
int disconnected_node_candidate = 0;
// Compute the number of candidate nodes that are disconnected from
// pivot_candidate. Note that this computation is the same for the "not"
// candidates and the normal candidates.
for (int j = first_candidate_index;
j < num_input_candidates && count < num_disconnected_candidates; ++j) {
if (!Connects(graph, pivot_candidate, input_candidates[j])) {
count++;
disconnected_node_candidate = j;
}
}
// Update the pivot candidate if we found a new minimum for
// num_disconnected_candidates.
if (count < num_disconnected_candidates) {
pivot = pivot_candidate;
num_disconnected_candidates = count;
if (i < first_candidate_index) {
disconnected_node = disconnected_node_candidate;
} else {
disconnected_node = i;
// The pivot candidate is not in the "not" set. We need to pre-increment
// the counter for the node to compensate for that.
pre_increment = 1;
}
}
}
std::vector<int> new_candidates;
new_candidates.reserve(num_input_candidates);
for (int remaining_candidates = num_disconnected_candidates + pre_increment;
remaining_candidates >= 1; remaining_candidates--) {
// Swap a node that is disconnected from the pivot (or the pivot itself)
// with the first candidate, so that we can later move it to "not" simply by
// increasing the index of the first candidate that is not in "not".
const int selected = input_candidates[disconnected_node];
std::swap(input_candidates[disconnected_node],
input_candidates[first_candidate_index]);
// Fill the list of candidates and the "not" set for the recursive call:
new_candidates.clear();
for (int i = 0; i < first_candidate_index; ++i) {
if (Connects(graph, selected, input_candidates[i])) {
new_candidates.push_back(input_candidates[i]);
}
}
const int new_first_candidate_index = new_candidates.size();
for (int i = first_candidate_index + 1; i < num_input_candidates; ++i) {
if (Connects(graph, selected, input_candidates[i])) {
new_candidates.push_back(input_candidates[i]);
}
}
const int new_candidate_size = new_candidates.size();
// Add the selected candidate to the current clique.
current_clique->push_back(selected);
// If there are no remaining candidates, we have found a maximal clique.
// Otherwise, do the recursive step.
if (new_candidate_size == 0) {
*stop = callback(*current_clique);
} else {
if (new_first_candidate_index < new_candidate_size) {
Search(graph, callback, new_candidates.data(),
new_first_candidate_index, new_candidate_size, current_clique,
stop);
if (*stop) {
return;
}
}
}
// Remove the selected candidate from the current clique.
current_clique->pop_back();
// Add the selected candidate to the set "not" - we've already processed
// all possible maximal cliques that use this node in 'current_clique'. The
// current candidate is the element of the new candidate set, so we can move
// it to "not" simply by increasing first_candidate_index.
first_candidate_index++;
// Find the next candidate that is disconnected from the pivot.
if (remaining_candidates > 1) {
disconnected_node = first_candidate_index;
while (disconnected_node < num_input_candidates &&
Connects(graph, pivot, input_candidates[disconnected_node])) {
disconnected_node++;
}
}
}
}
class FindAndEliminate {
public:
FindAndEliminate(std::function<bool(int, int)> graph, int node_count,
std::function<bool(const std::vector<int>&)> callback)
: graph_(graph), node_count_(node_count), callback_(callback) {}
bool GraphCallback(int node1, int node2) {
if (visited_.find(
std::make_pair(std::min(node1, node2), std::max(node1, node2))) !=
visited_.end()) {
return false;
}
return Connects(graph_, node1, node2);
}
bool SolutionCallback(const std::vector<int>& solution) {
const int size = solution.size();
if (size > 1) {
for (int i = 0; i < size - 1; ++i) {
for (int j = i + 1; j < size; ++j) {
visited_.insert(std::make_pair(std::min(solution[i], solution[j]),
std::max(solution[i], solution[j])));
}
}
callback_(solution);
}
return false;
}
private:
std::function<bool(int, int)> graph_;
int node_count_;
std::function<bool(const std::vector<int>&)> callback_;
absl::flat_hash_set<std::pair<int, int>> visited_;
};
} // namespace
// This method implements the 'version2' of the Bron-Kerbosch
// algorithm to find all maximal cliques in a undirected graph.
void FindCliques(std::function<bool(int, int)> graph, int node_count,
std::function<bool(const std::vector<int>&)> callback) {
std::unique_ptr<int[]> initial_candidates(new int[node_count]);
std::vector<int> actual;
for (int c = 0; c < node_count; ++c) {
initial_candidates[c] = c;
}
bool stop = false;
Search(graph, callback, initial_candidates.get(), 0, node_count, &actual,
&stop);
}
void CoverArcsByCliques(std::function<bool(int, int)> graph, int node_count,
std::function<bool(const std::vector<int>&)> callback) {
FindAndEliminate cache(graph, node_count, callback);
std::unique_ptr<int[]> initial_candidates(new int[node_count]);
std::vector<int> actual;
std::function<bool(int, int)> cached_graph = [&cache](int i, int j) {
return cache.GraphCallback(i, j);
};
std::function<bool(const std::vector<int>&)> cached_callback =
[&cache](const std::vector<int>& res) {
return cache.SolutionCallback(res);
};
for (int c = 0; c < node_count; ++c) {
initial_candidates[c] = c;
}
bool stop = false;
Search(cached_graph, cached_callback, initial_candidates.get(), 0, node_count,
&actual, &stop);
}
} // namespace operations_research
| 3,634 |
574 | //-----------------------------------------------------------------------------
// NVIDIA(R) GVDB VOXELS
// Copyright 2016 NVIDIA Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Version 1.0: <NAME>, 5/1/2017
// Version 1.1: <NAME>, 3/25/2018
//-----------------------------------------------------------------------------
#include "gvdb_vec.h"
#include "gvdb_scene.h"
using namespace nvdb;
#define UVIEW 0
#define UPROJ 1
#define UMODEL 2
#define UINVVIEW 3
#define ULIGHTPOS 4
#define UCAMPOS 5
#define UCAMDIMS 6
#define UCLRAMB 7
#define UCLRDIFF 8
#define UCLRSPEC 9
#define UTEX 10
#define UTEXRES 11
#define UW 12
#define USHADOWMASK 13
#define USHADOWSIZE 14
#define UEYELIGHT 15
#define UOVERTEX 16
#define UOVERSIZE 17
#define UVOLMIN 18
#define UVOLMAX 19
#define USAMPLES 20
#define UTEXMIN 21
#define UTEXMAX 22
// OpenGL Render
#ifdef BUILD_OPENGL
#include <GL/glew.h> // OpenGL extensions
#include <cudaGL.h> // Cuda-GL interop
#include <cuda_gl_interop.h>
GVDB_API void gchkGL( const char* msg );
GVDB_API void renderCamSetupGL ( Scene* scene, int prog, Matrix4F* model );
GVDB_API void renderLightSetupGL ( Scene* scene, int prog );
GVDB_API void renderSceneGL ( Scene* scene, int prog );
GVDB_API void renderSceneGL ( Scene* scene, int prog, bool bMat );
GVDB_API void renderSetTex3D ( Scene* scene, int prog, int tex, Vector3DF res );
GVDB_API void renderSetTex2D ( Scene* scene, int prog, int tex );
GVDB_API void renderSetMaterialGL (Scene* scene, int prog, Vector4DF amb, Vector4DF diff, Vector4DF spec);
GVDB_API void renderSetUW ( Scene* scene, int prog, Matrix4F* model, Vector3DF res );
GVDB_API void renderScreenspaceGL ( Scene* scene, int prog );
GVDB_API void makeSimpleShaderGL ( Scene* scene, const char* vertfile, const char* fragfile);
GVDB_API void makeSliceShader ( Scene* scene, const char* vertname, const char* fragname );
GVDB_API void makeOutlineShader ( Scene* scene, const char* vertname, const char* fragname );
GVDB_API void makeVoxelizeShader ( Scene* scene, const char* vertname, const char* fragname, const char* geomname );
GVDB_API void makeRaycastShader ( Scene* scene, const char* vertname, const char* fragname );
GVDB_API void makeInstanceShader ( Scene* scene, const char* vertname, const char* fragname );
GVDB_API void makeScreenShader ( Scene* scene, const char* vertname, const char* fragname );
#endif
| 853 |
4,071 | <gh_stars>1000+
/* Copyright (C) 2016-2018 Alibaba Group Holding Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef XDL_CORE_UTILS_FILE_UTILS_H
#define XDL_CORE_UTILS_FILE_UTILS_H
#include <string>
#include <vector>
#include <sys/types.h>
#include <sys/stat.h>
namespace xdl {
class FileUtils {
public:
public:
FileUtils() = default;
~FileUtils()= default;
public:
static std::string ReadLocalFile(const std::string &filePath);
static std::string ReadLocalBinaryFile(const std::string &filePath);
static void ReadLocalFile(const std::string &filePath,
std::vector<std::string>* lines);
static bool WriteLocalFile(const std::string &filePath,
const std::string &content);
static bool DeleteLocalFile(const std::string &localFilePath);
static bool CompFile(const std::string& first, const std::string& second);
static int CompFile(const char* first, const char* second);
static bool MoveFile(const std::string& srcFile,
const std::string& desFile);
static bool TouchFile(const std::string& file);
static bool CreatDir(const std::string& dir, mode_t mode = S_IRWXU);
static bool CopyFile(const std::string& srcFile,
const std::string& desFile);
static bool IsFileExist(const std::string& filePath);
static bool IsDirExists(const std::string& dirPath);
static size_t FileSize(const std::string& path);
};
} //xdl
#endif // XDL_CORE_UTILS_FILE_UTILS_H
| 685 |
1,073 | <gh_stars>1000+
RUN: llvm-dsymutil -y %p/dummy-debug-map.map -oso-prepend-path %p/../Inputs/scattered-reloc/ -f -o - | llvm-dwarfdump -debug-dump=info - | FileCheck %s
// See Inputs/scattered-reloc/scattered.s to see how this test
// actually works.
int bar = 42;
CHECK: DW_TAG_variable
CHECK-NOT: DW_TAG
CHECK: DW_AT_name{{.*}}"bar"
CHECK-NOT: DW_TAG
CHECK: DW_AT_location{{.*}}<0x05> 03 10 00 01 00
| 172 |
984 | <filename>generation/WinSDK/RecompiledIdlHeaders/shared/netcx/shared/1.0/net/mdltypes.h<gh_stars>100-1000
// Copyright (C) Microsoft Corporation. All rights reserved.
#pragma once
#pragma region Desktop Family or OneCore Family
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
EXTERN_C_START
#pragma warning(push)
#pragma warning(default:4820) // warn if the compiler inserted padding
typedef struct _NET_FRAGMENT_MDL
{
MDL *
Mdl;
} NET_FRAGMENT_MDL;
C_ASSERT(sizeof(NET_FRAGMENT_MDL) == sizeof(void *));
#pragma warning(pop)
EXTERN_C_END
#define NET_FRAGMENT_EXTENSION_MDL_NAME L"ms_fragment_mdl"
#define NET_FRAGMENT_EXTENSION_MDL_VERSION_1 1U
#endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
#pragma endregion
| 329 |
5,355 | <filename>src/main/java/graphql/relay/PageInfo.java
package graphql.relay;
import graphql.PublicApi;
/**
* Represents pagination information in Relay about {@link graphql.relay.Edge edges} when used
* inside a {@link graphql.relay.Connection connection}
*
* See <a href="https://facebook.github.io/relay/graphql/connections.htm#sec-undefined.PageInfo">https://facebook.github.io/relay/graphql/connections.htm#sec-undefined.PageInfo</a>
*/
@PublicApi
public interface PageInfo {
/**
* @return cursor to the first edge, or null if this page is empty.
*/
ConnectionCursor getStartCursor();
/**
* @return cursor to the last edge, or null if this page is empty.
*/
ConnectionCursor getEndCursor();
/**
* @return true if and only if this page is not the first page. only meaningful when you gave the {@code last} argument.
*/
boolean isHasPreviousPage();
/**
* @return true if and only if this page is not the last page. only meaningful when you gave the {@code first} argument.
*/
boolean isHasNextPage();
}
| 362 |
1,558 | [
{
"name": "Blank",
"displayName": "Blank",
"summary": "Включена навигационная рамка и основные услуги и стили."
},
{
"name": "NavView",
"displayName": "Navigation Pane",
"summary": "Включает \"меню-гамбургер\" для перехода между страницами."
},
{
"name": "MenuBar",
"displayName": "Menu Bar",
"summary": "Добавленное меню позволяет перемещаться между страницами."
}
]
| 356 |
14,668 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
#include <memory>
#include <string>
#include "base/trace_event/trace_event_filter.h"
namespace base {
namespace trace_event {
class TestEventFilter : public TraceEventFilter {
public:
struct HitsCounter {
HitsCounter();
~HitsCounter();
void Reset();
size_t filter_trace_event_hit_count;
size_t end_event_hit_count;
};
static const char kName[];
// Factory method for TraceLog::SetFilterFactoryForTesting().
static std::unique_ptr<TraceEventFilter> Factory(
const std::string& predicate_name);
TestEventFilter();
TestEventFilter(const TestEventFilter&) = delete;
TestEventFilter& operator=(const TestEventFilter&) = delete;
~TestEventFilter() override;
// TraceEventFilter implementation.
bool FilterTraceEvent(const TraceEvent& trace_event) const override;
void EndEvent(const char* category_name, const char* name) const override;
static void set_filter_return_value(bool value) {
filter_return_value_ = value;
}
private:
static bool filter_return_value_;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
| 468 |
389 | <gh_stars>100-1000
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~ Copyright 2020 Adobe
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
package com.adobe.cq.wcm.core.components.models.datalayer.builder;
import com.adobe.cq.wcm.core.components.internal.models.v1.datalayer.builder.DataLayerSupplierImpl;
import org.jetbrains.annotations.NotNull;
import java.util.function.Supplier;
/**
* Generic data builder that specifies fields available on all data builders.
* Every data builder should implement this interface.
*
* @param <T> The data builder type.
* @param <K> The data type.
*/
public abstract class GenericDataBuilder<T extends GenericDataBuilder<T, K>, K> {
/**
* The current data layer supplier.
*/
private final DataLayerSupplier dataLayerSupplier;
/**
* Construct an Abstract Data Builder.
*
* @param supplier The data layer supplier.
*/
GenericDataBuilder(@NotNull final DataLayerSupplier supplier) {
this.dataLayerSupplier = supplier;
}
/**
* Get the current {@link DataLayerSupplier}.
*
* @return The current data layer supplier.
*/
@NotNull
final DataLayerSupplier getDataLayerSupplier() {
return this.dataLayerSupplier;
}
/**
* Set the supplier that supplies the component's ID.
*
* @param supplier The ID value supplier.
* @return A new builder.
*/
@NotNull
public final T withId(@NotNull final Supplier<String> supplier) {
return this.createInstance(new DataLayerSupplierImpl(this.getDataLayerSupplier()).setId(supplier));
}
/**
* Create a new instance of the the current wrapper using the specified supplier.
*
* @param supplier The data layer supplier to wrap.
* @return The wrapped data layer supplier.
*/
@NotNull
abstract T createInstance(@NotNull final DataLayerSupplier supplier);
/**
* Build the data.
*
* @return The data object.
*/
@NotNull
public abstract K build();
}
| 831 |
585 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.util.configuration.providers;
import java.util.EnumMap;
import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import static org.apache.solr.util.configuration.SSLCredentialProvider.CredentialType.SSL_CLIENT_KEY_STORE_PASSWORD;
import static org.apache.solr.util.configuration.SSLCredentialProvider.CredentialType.SSL_CLIENT_TRUST_STORE_PASSWORD;
import static org.apache.solr.util.configuration.SSLCredentialProvider.CredentialType.SSL_KEY_STORE_PASSWORD;
import static org.apache.solr.util.configuration.SSLCredentialProvider.CredentialType.SSL_TRUST_STORE_PASSWORD;
/**
* Environment variable based SSL configuration provider
*/
public class EnvSSLCredentialProvider extends AbstractSSLCredentialProvider {
public static class EnvVars {
public static final String SOLR_SSL_CLIENT_KEY_STORE_PASSWORD = "<PASSWORD>SSL_CLIENT_KEY_STORE_PASSWORD";
public static final String SOLR_SSL_KEY_STORE_PASSWORD = "<PASSWORD>";
public static final String SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD = "<PASSWORD>";
public static final String SOLR_SSL_TRUST_STORE_PASSWORD = "<PASSWORD>";
}
private Map<String, String> envVars;
public EnvSSLCredentialProvider() {
this.envVars = System.getenv();
}
protected EnumMap<CredentialType, String> getCredentialKeyMap() {
return Maps.newEnumMap(ImmutableMap.of(
SSL_KEY_STORE_PASSWORD, EnvVars.SOLR_SSL_KEY_STORE_PASSWORD,
SSL_TRUST_STORE_PASSWORD, EnvVars.SOLR_SSL_TRUST_STORE_PASSWORD,
SSL_CLIENT_KEY_STORE_PASSWORD, EnvVars.SOLR_SSL_CLIENT_KEY_STORE_PASSWORD,
SSL_CLIENT_TRUST_STORE_PASSWORD, EnvVars.SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD
));
}
protected String getCredential(String envKey) {
if (envVars.containsKey(envKey)) {
return envVars.get(envKey);
} else {
return null;
}
}
@VisibleForTesting
public void setEnvVars(Map<String, String> envVars) {
this.envVars = envVars;
}
}
| 987 |
347 | """
Implementation of the line segment detection module.
"""
import math
import numpy as np
import torch
class LineSegmentDetectionModule(object):
""" Module extracting line segments from junctions and line heatmaps. """
def __init__(
self, detect_thresh, num_samples=64, sampling_method="local_max",
inlier_thresh=0., heatmap_low_thresh=0.15, heatmap_high_thresh=0.2,
max_local_patch_radius=3, lambda_radius=2.,
use_candidate_suppression=False, nms_dist_tolerance=3.,
use_heatmap_refinement=False, heatmap_refine_cfg=None,
use_junction_refinement=False, junction_refine_cfg=None):
"""
Parameters:
detect_thresh: The probability threshold for mean activation (0. ~ 1.)
num_samples: Number of sampling locations along the line segments.
sampling_method: Sampling method on locations ("bilinear" or "local_max").
inlier_thresh: The min inlier ratio to satisfy (0. ~ 1.) => 0. means no threshold.
heatmap_low_thresh: The lowest threshold for the pixel to be considered as candidate in junction recovery.
heatmap_high_thresh: The higher threshold for NMS in junction recovery.
max_local_patch_radius: The max patch to be considered in local maximum search.
lambda_radius: The lambda factor in linear local maximum search formulation
use_candidate_suppression: Apply candidate suppression to break long segments into short sub-segments.
nms_dist_tolerance: The distance tolerance for nms. Decide whether the junctions are on the line.
use_heatmap_refinement: Use heatmap refinement method or not.
heatmap_refine_cfg: The configs for heatmap refinement methods.
use_junction_refinement: Use junction refinement method or not.
junction_refine_cfg: The configs for junction refinement methods.
"""
# Line detection parameters
self.detect_thresh = detect_thresh
# Line sampling parameters
self.num_samples = num_samples
self.sampling_method = sampling_method
self.inlier_thresh = inlier_thresh
self.local_patch_radius = max_local_patch_radius
self.lambda_radius = lambda_radius
# Detecting junctions on the boundary parameters
self.low_thresh = heatmap_low_thresh
self.high_thresh = heatmap_high_thresh
# Pre-compute the linspace sampler
self.sampler = np.linspace(0, 1, self.num_samples)
self.torch_sampler = torch.linspace(0, 1, self.num_samples)
# Long line segment suppression configuration
self.use_candidate_suppression = use_candidate_suppression
self.nms_dist_tolerance = nms_dist_tolerance
# Heatmap refinement configuration
self.use_heatmap_refinement = use_heatmap_refinement
self.heatmap_refine_cfg = heatmap_refine_cfg
if self.use_heatmap_refinement and self.heatmap_refine_cfg is None:
raise ValueError("[Error] Missing heatmap refinement config.")
# Junction refinement configuration
self.use_junction_refinement = use_junction_refinement
self.junction_refine_cfg = junction_refine_cfg
if self.use_junction_refinement and self.junction_refine_cfg is None:
raise ValueError("[Error] Missing junction refinement config.")
def convert_inputs(self, inputs, device):
""" Convert inputs to desired torch tensor. """
if isinstance(inputs, np.ndarray):
outputs = torch.tensor(inputs, dtype=torch.float32, device=device)
elif isinstance(inputs, torch.Tensor):
outputs = inputs.to(torch.float32).to(device)
else:
raise ValueError(
"[Error] Inputs must either be torch tensor or numpy ndarray.")
return outputs
def detect(self, junctions, heatmap, device=torch.device("cpu")):
""" Main function performing line segment detection. """
# Convert inputs to torch tensor
junctions = self.convert_inputs(junctions, device=device)
heatmap = self.convert_inputs(heatmap, device=device)
# Perform the heatmap refinement
if self.use_heatmap_refinement:
if self.heatmap_refine_cfg["mode"] == "global":
heatmap = self.refine_heatmap(
heatmap,
self.heatmap_refine_cfg["ratio"],
self.heatmap_refine_cfg["valid_thresh"]
)
elif self.heatmap_refine_cfg["mode"] == "local":
heatmap = self.refine_heatmap_local(
heatmap,
self.heatmap_refine_cfg["num_blocks"],
self.heatmap_refine_cfg["overlap_ratio"],
self.heatmap_refine_cfg["ratio"],
self.heatmap_refine_cfg["valid_thresh"]
)
# Initialize empty line map
num_junctions = junctions.shape[0]
line_map_pred = torch.zeros([num_junctions, num_junctions],
device=device, dtype=torch.int32)
# Stop if there are not enough junctions
if num_junctions < 2:
return line_map_pred, junctions, heatmap
# Generate the candidate map
candidate_map = torch.triu(torch.ones(
[num_junctions, num_junctions], device=device, dtype=torch.int32),
diagonal=1)
# Fetch the image boundary
if len(heatmap.shape) > 2:
H, W, _ = heatmap.shape
else:
H, W = heatmap.shape
# Optionally perform candidate filtering
if self.use_candidate_suppression:
candidate_map = self.candidate_suppression(junctions,
candidate_map)
# Fetch the candidates
candidate_index_map = torch.where(candidate_map)
candidate_index_map = torch.cat([candidate_index_map[0][..., None],
candidate_index_map[1][..., None]],
dim=-1)
# Get the corresponding start and end junctions
candidate_junc_start = junctions[candidate_index_map[:, 0], :]
candidate_junc_end = junctions[candidate_index_map[:, 1], :]
# Get the sampling locations (N x 64)
sampler = self.torch_sampler.to(device)[None, ...]
cand_samples_h = candidate_junc_start[:, 0:1] * sampler + \
candidate_junc_end[:, 0:1] * (1 - sampler)
cand_samples_w = candidate_junc_start[:, 1:2] * sampler + \
candidate_junc_end[:, 1:2] * (1 - sampler)
# Clip to image boundary
cand_h = torch.clamp(cand_samples_h, min=0, max=H-1)
cand_w = torch.clamp(cand_samples_w, min=0, max=W-1)
# Local maximum search
if self.sampling_method == "local_max":
# Compute normalized segment lengths
segments_length = torch.sqrt(torch.sum(
(candidate_junc_start.to(torch.float32) -
candidate_junc_end.to(torch.float32)) ** 2, dim=-1))
normalized_seg_length = (segments_length
/ (((H ** 2) + (W ** 2)) ** 0.5))
# Perform local max search
num_cand = cand_h.shape[0]
group_size = 10000
if num_cand > group_size:
num_iter = math.ceil(num_cand / group_size)
sampled_feat_lst = []
for iter_idx in range(num_iter):
if not iter_idx == num_iter-1:
cand_h_ = cand_h[iter_idx * group_size:
(iter_idx+1) * group_size, :]
cand_w_ = cand_w[iter_idx * group_size:
(iter_idx+1) * group_size, :]
normalized_seg_length_ = normalized_seg_length[
iter_idx * group_size: (iter_idx+1) * group_size]
else:
cand_h_ = cand_h[iter_idx * group_size:, :]
cand_w_ = cand_w[iter_idx * group_size:, :]
normalized_seg_length_ = normalized_seg_length[
iter_idx * group_size:]
sampled_feat_ = self.detect_local_max(
heatmap, cand_h_, cand_w_, H, W,
normalized_seg_length_, device)
sampled_feat_lst.append(sampled_feat_)
sampled_feat = torch.cat(sampled_feat_lst, dim=0)
else:
sampled_feat = self.detect_local_max(
heatmap, cand_h, cand_w, H, W,
normalized_seg_length, device)
# Bilinear sampling
elif self.sampling_method == "bilinear":
# Perform bilinear sampling
sampled_feat = self.detect_bilinear(
heatmap, cand_h, cand_w, H, W, device)
else:
raise ValueError("[Error] Unknown sampling method.")
# [Simple threshold detection]
# detection_results is a mask over all candidates
detection_results = (torch.mean(sampled_feat, dim=-1)
> self.detect_thresh)
# [Inlier threshold detection]
if self.inlier_thresh > 0.:
inlier_ratio = torch.sum(
sampled_feat > self.detect_thresh,
dim=-1).to(torch.float32) / self.num_samples
detection_results_inlier = inlier_ratio >= self.inlier_thresh
detection_results = detection_results * detection_results_inlier
# Convert detection results back to line_map_pred
detected_junc_indexes = candidate_index_map[detection_results, :]
line_map_pred[detected_junc_indexes[:, 0],
detected_junc_indexes[:, 1]] = 1
line_map_pred[detected_junc_indexes[:, 1],
detected_junc_indexes[:, 0]] = 1
# Perform junction refinement
if self.use_junction_refinement and len(detected_junc_indexes) > 0:
junctions, line_map_pred = self.refine_junction_perturb(
junctions, line_map_pred, heatmap, H, W, device)
return line_map_pred, junctions, heatmap
def refine_heatmap(self, heatmap, ratio=0.2, valid_thresh=1e-2):
""" Global heatmap refinement method. """
# Grab the top 10% values
heatmap_values = heatmap[heatmap > valid_thresh]
sorted_values = torch.sort(heatmap_values, descending=True)[0]
top10_len = math.ceil(sorted_values.shape[0] * ratio)
max20 = torch.mean(sorted_values[:top10_len])
heatmap = torch.clamp(heatmap / max20, min=0., max=1.)
return heatmap
def refine_heatmap_local(self, heatmap, num_blocks=5, overlap_ratio=0.5,
ratio=0.2, valid_thresh=2e-3):
""" Local heatmap refinement method. """
# Get the shape of the heatmap
H, W = heatmap.shape
increase_ratio = 1 - overlap_ratio
h_block = round(H / (1 + (num_blocks - 1) * increase_ratio))
w_block = round(W / (1 + (num_blocks - 1) * increase_ratio))
count_map = torch.zeros(heatmap.shape, dtype=torch.int,
device=heatmap.device)
heatmap_output = torch.zeros(heatmap.shape, dtype=torch.float,
device=heatmap.device)
# Iterate through each block
for h_idx in range(num_blocks):
for w_idx in range(num_blocks):
# Fetch the heatmap
h_start = round(h_idx * h_block * increase_ratio)
w_start = round(w_idx * w_block * increase_ratio)
h_end = h_start + h_block if h_idx < num_blocks - 1 else H
w_end = w_start + w_block if w_idx < num_blocks - 1 else W
subheatmap = heatmap[h_start:h_end, w_start:w_end]
if subheatmap.max() > valid_thresh:
subheatmap = self.refine_heatmap(
subheatmap, ratio, valid_thresh=valid_thresh)
# Aggregate it to the final heatmap
heatmap_output[h_start:h_end, w_start:w_end] += subheatmap
count_map[h_start:h_end, w_start:w_end] += 1
heatmap_output = torch.clamp(heatmap_output / count_map,
max=1., min=0.)
return heatmap_output
def candidate_suppression(self, junctions, candidate_map):
""" Suppress overlapping long lines in the candidate segments. """
# Define the distance tolerance
dist_tolerance = self.nms_dist_tolerance
# Compute distance between junction pairs
# (num_junc x 1 x 2) - (1 x num_junc x 2) => num_junc x num_junc map
line_dist_map = torch.sum((torch.unsqueeze(junctions, dim=1)
- junctions[None, ...]) ** 2, dim=-1) ** 0.5
# Fetch all the "detected lines"
seg_indexes = torch.where(torch.triu(candidate_map, diagonal=1))
start_point_idxs = seg_indexes[0]
end_point_idxs = seg_indexes[1]
start_points = junctions[start_point_idxs, :]
end_points = junctions[end_point_idxs, :]
# Fetch corresponding entries
line_dists = line_dist_map[start_point_idxs, end_point_idxs]
# Check whether they are on the line
dir_vecs = ((end_points - start_points)
/ torch.norm(end_points - start_points,
dim=-1)[..., None])
# Get the orthogonal distance
cand_vecs = junctions[None, ...] - start_points.unsqueeze(dim=1)
cand_vecs_norm = torch.norm(cand_vecs, dim=-1)
# Check whether they are projected directly onto the segment
proj = (torch.einsum('bij,bjk->bik', cand_vecs, dir_vecs[..., None])
/ line_dists[..., None, None])
# proj is num_segs x num_junction x 1
proj_mask = (proj >=0) * (proj <= 1)
cand_angles = torch.acos(
torch.einsum('bij,bjk->bik', cand_vecs, dir_vecs[..., None])
/ cand_vecs_norm[..., None])
cand_dists = cand_vecs_norm[..., None] * torch.sin(cand_angles)
junc_dist_mask = cand_dists <= dist_tolerance
junc_mask = junc_dist_mask * proj_mask
# Minus starting points
num_segs = start_point_idxs.shape[0]
junc_counts = torch.sum(junc_mask, dim=[1, 2])
junc_counts -= junc_mask[..., 0][torch.arange(0, num_segs),
start_point_idxs].to(torch.int)
junc_counts -= junc_mask[..., 0][torch.arange(0, num_segs),
end_point_idxs].to(torch.int)
# Get the invalid candidate mask
final_mask = junc_counts > 0
candidate_map[start_point_idxs[final_mask],
end_point_idxs[final_mask]] = 0
return candidate_map
def refine_junction_perturb(self, junctions, line_map_pred,
heatmap, H, W, device):
""" Refine the line endpoints in a similar way as in LSD. """
# Get the config
junction_refine_cfg = self.junction_refine_cfg
# Fetch refinement parameters
num_perturbs = junction_refine_cfg["num_perturbs"]
perturb_interval = junction_refine_cfg["perturb_interval"]
side_perturbs = (num_perturbs - 1) // 2
# Fetch the 2D perturb mat
perturb_vec = torch.arange(
start=-perturb_interval*side_perturbs,
end=perturb_interval*(side_perturbs+1),
step=perturb_interval, device=device)
w1_grid, h1_grid, w2_grid, h2_grid = torch.meshgrid(
perturb_vec, perturb_vec, perturb_vec, perturb_vec)
perturb_tensor = torch.cat([
w1_grid[..., None], h1_grid[..., None],
w2_grid[..., None], h2_grid[..., None]], dim=-1)
perturb_tensor_flat = perturb_tensor.view(-1, 2, 2)
# Fetch the junctions and line_map
junctions = junctions.clone()
line_map = line_map_pred
# Fetch all the detected lines
detected_seg_indexes = torch.where(torch.triu(line_map, diagonal=1))
start_point_idxs = detected_seg_indexes[0]
end_point_idxs = detected_seg_indexes[1]
start_points = junctions[start_point_idxs, :]
end_points = junctions[end_point_idxs, :]
line_segments = torch.cat([start_points.unsqueeze(dim=1),
end_points.unsqueeze(dim=1)], dim=1)
line_segment_candidates = (line_segments.unsqueeze(dim=1)
+ perturb_tensor_flat[None, ...])
# Clip the boundaries
line_segment_candidates[..., 0] = torch.clamp(
line_segment_candidates[..., 0], min=0, max=H - 1)
line_segment_candidates[..., 1] = torch.clamp(
line_segment_candidates[..., 1], min=0, max=W - 1)
# Iterate through all the segments
refined_segment_lst = []
num_segments = line_segments.shape[0]
for idx in range(num_segments):
segment = line_segment_candidates[idx, ...]
# Get the corresponding start and end junctions
candidate_junc_start = segment[:, 0, :]
candidate_junc_end = segment[:, 1, :]
# Get the sampling locations (N x 64)
sampler = self.torch_sampler.to(device)[None, ...]
cand_samples_h = (candidate_junc_start[:, 0:1] * sampler +
candidate_junc_end[:, 0:1] * (1 - sampler))
cand_samples_w = (candidate_junc_start[:, 1:2] * sampler +
candidate_junc_end[:, 1:2] * (1 - sampler))
# Clip to image boundary
cand_h = torch.clamp(cand_samples_h, min=0, max=H - 1)
cand_w = torch.clamp(cand_samples_w, min=0, max=W - 1)
# Perform bilinear sampling
segment_feat = self.detect_bilinear(
heatmap, cand_h, cand_w, H, W, device)
segment_results = torch.mean(segment_feat, dim=-1)
max_idx = torch.argmax(segment_results)
refined_segment_lst.append(segment[max_idx, ...][None, ...])
# Concatenate back to segments
refined_segments = torch.cat(refined_segment_lst, dim=0)
# Convert back to junctions and line_map
junctions_new = torch.cat(
[refined_segments[:, 0, :], refined_segments[:, 1, :]], dim=0)
junctions_new = torch.unique(junctions_new, dim=0)
line_map_new = self.segments_to_line_map(junctions_new,
refined_segments)
return junctions_new, line_map_new
def segments_to_line_map(self, junctions, segments):
""" Convert the list of segments to line map. """
# Create empty line map
device = junctions.device
num_junctions = junctions.shape[0]
line_map = torch.zeros([num_junctions, num_junctions], device=device)
# Iterate through every segment
for idx in range(segments.shape[0]):
# Get the junctions from a single segement
seg = segments[idx, ...]
junction1 = seg[0, :]
junction2 = seg[1, :]
# Get index
idx_junction1 = torch.where(
(junctions == junction1).sum(axis=1) == 2)[0]
idx_junction2 = torch.where(
(junctions == junction2).sum(axis=1) == 2)[0]
# label the corresponding entries
line_map[idx_junction1, idx_junction2] = 1
line_map[idx_junction2, idx_junction1] = 1
return line_map
def detect_bilinear(self, heatmap, cand_h, cand_w, H, W, device):
""" Detection by bilinear sampling. """
# Get the floor and ceiling locations
cand_h_floor = torch.floor(cand_h).to(torch.long)
cand_h_ceil = torch.ceil(cand_h).to(torch.long)
cand_w_floor = torch.floor(cand_w).to(torch.long)
cand_w_ceil = torch.ceil(cand_w).to(torch.long)
# Perform the bilinear sampling
cand_samples_feat = (
heatmap[cand_h_floor, cand_w_floor] * (cand_h_ceil - cand_h)
* (cand_w_ceil - cand_w) + heatmap[cand_h_floor, cand_w_ceil]
* (cand_h_ceil - cand_h) * (cand_w - cand_w_floor) +
heatmap[cand_h_ceil, cand_w_floor] * (cand_h - cand_h_floor)
* (cand_w_ceil - cand_w) + heatmap[cand_h_ceil, cand_w_ceil]
* (cand_h - cand_h_floor) * (cand_w - cand_w_floor))
return cand_samples_feat
def detect_local_max(self, heatmap, cand_h, cand_w, H, W,
normalized_seg_length, device):
""" Detection by local maximum search. """
# Compute the distance threshold
dist_thresh = (0.5 * (2 ** 0.5)
+ self.lambda_radius * normalized_seg_length)
# Make it N x 64
dist_thresh = torch.repeat_interleave(dist_thresh[..., None],
self.num_samples, dim=-1)
# Compute the candidate points
cand_points = torch.cat([cand_h[..., None], cand_w[..., None]],
dim=-1)
cand_points_round = torch.round(cand_points) # N x 64 x 2
# Construct local patches 9x9 = 81
patch_mask = torch.zeros([int(2 * self.local_patch_radius + 1),
int(2 * self.local_patch_radius + 1)],
device=device)
patch_center = torch.tensor(
[[self.local_patch_radius, self.local_patch_radius]],
device=device, dtype=torch.float32)
H_patch_points, W_patch_points = torch.where(patch_mask >= 0)
patch_points = torch.cat([H_patch_points[..., None],
W_patch_points[..., None]], dim=-1)
# Fetch the circle region
patch_center_dist = torch.sqrt(torch.sum(
(patch_points - patch_center) ** 2, dim=-1))
patch_points = (patch_points[patch_center_dist
<= self.local_patch_radius, :])
# Shift [0, 0] to the center
patch_points = patch_points - self.local_patch_radius
# Construct local patch mask
patch_points_shifted = (torch.unsqueeze(cand_points_round, dim=2)
+ patch_points[None, None, ...])
patch_dist = torch.sqrt(torch.sum((torch.unsqueeze(cand_points, dim=2)
- patch_points_shifted) ** 2,
dim=-1))
patch_dist_mask = patch_dist < dist_thresh[..., None]
# Get all points => num_points_center x num_patch_points x 2
points_H = torch.clamp(patch_points_shifted[:, :, :, 0], min=0,
max=H - 1).to(torch.long)
points_W = torch.clamp(patch_points_shifted[:, :, :, 1], min=0,
max=W - 1).to(torch.long)
points = torch.cat([points_H[..., None], points_W[..., None]], dim=-1)
# Sample the feature (N x 64 x 81)
sampled_feat = heatmap[points[:, :, :, 0], points[:, :, :, 1]]
# Filtering using the valid mask
sampled_feat = sampled_feat * patch_dist_mask.to(torch.float32)
if len(sampled_feat) == 0:
sampled_feat_lmax = torch.empty(0, 64)
else:
sampled_feat_lmax, _ = torch.max(sampled_feat, dim=-1)
return sampled_feat_lmax
| 11,949 |
14,668 | /*
* Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "third_party/blink/renderer/modules/webdatabase/sql_transaction_state_machine.h"
namespace blink {
#if DCHECK_IS_ON()
const char* NameForSQLTransactionState(SQLTransactionState state) {
switch (state) {
case SQLTransactionState::kEnd:
return "end";
case SQLTransactionState::kIdle:
return "idle";
case SQLTransactionState::kAcquireLock:
return "acquireLock";
case SQLTransactionState::kOpenTransactionAndPreflight:
return "openTransactionAndPreflight";
case SQLTransactionState::kRunStatements:
return "runStatements";
case SQLTransactionState::kPostflightAndCommit:
return "postflightAndCommit";
case SQLTransactionState::kCleanupAndTerminate:
return "cleanupAndTerminate";
case SQLTransactionState::kCleanupAfterTransactionErrorCallback:
return "cleanupAfterTransactionErrorCallback";
case SQLTransactionState::kDeliverTransactionCallback:
return "deliverTransactionCallback";
case SQLTransactionState::kDeliverTransactionErrorCallback:
return "deliverTransactionErrorCallback";
case SQLTransactionState::kDeliverStatementCallback:
return "deliverStatementCallback";
case SQLTransactionState::kDeliverQuotaIncreaseCallback:
return "deliverQuotaIncreaseCallback";
case SQLTransactionState::kDeliverSuccessCallback:
return "deliverSuccessCallback";
default:
return "UNKNOWN";
}
}
#endif
} // namespace blink
| 846 |
648 | #ifndef _SCNUMS_X86_H
#define _SCNUMS_X86_H
/*
Derived from kernel syscall_32.tbl
cat linux/arch/x86/syscalls/syscall_32.tbl| \
egrep -v "^#"| \
sed 's|^\([0-9]\+\)\s\+\w\+\s\+\(\w\+\).*$|#define __NR_\2\t\t\t\1|g' \
> scnums-x86.h
cat linux/arch/x86/syscalls/syscall_32.tbl| \
egrep -v "^#"| \
sed 's|^\([0-9]\+\)\s\+\w\+\s\+\(\w\+\).*$|\t[__NR_\2] = "\2",|g' \
>> scnums-x86.h
*/
#define __NR_restart_syscall 0
#define __NR_exit 1
#define __NR_fork 2
#define __NR_read 3
#define __NR_write 4
#define __NR_open 5
#define __NR_close 6
#define __NR_waitpid 7
#define __NR_creat 8
#define __NR_link 9
#define __NR_unlink 10
#define __NR_execve 11
#define __NR_chdir 12
#define __NR_time 13
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_lchown 16
#define __NR_break 17
#define __NR_oldstat 18
#define __NR_lseek 19
#define __NR_getpid 20
#define __NR_mount 21
#define __NR_umount 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
#define __NR_ptrace 26
#define __NR_alarm 27
#define __NR_oldfstat 28
#define __NR_pause 29
#define __NR_utime 30
#define __NR_stty 31
#define __NR_gtty 32
#define __NR_access 33
#define __NR_nice 34
#define __NR_ftime 35
#define __NR_sync 36
#define __NR_kill 37
#define __NR_rename 38
#define __NR_mkdir 39
#define __NR_rmdir 40
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_times 43
#define __NR_prof 44
#define __NR_brk 45
#define __NR_setgid 46
#define __NR_getgid 47
#define __NR_signal 48
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
#define __NR_umount2 52
#define __NR_lock 53
#define __NR_ioctl 54
#define __NR_fcntl 55
#define __NR_mpx 56
#define __NR_setpgid 57
#define __NR_ulimit 58
#define __NR_oldolduname 59
#define __NR_umask 60
#define __NR_chroot 61
#define __NR_ustat 62
#define __NR_dup2 63
#define __NR_getppid 64
#define __NR_getpgrp 65
#define __NR_setsid 66
#define __NR_sigaction 67
#define __NR_sgetmask 68
#define __NR_ssetmask 69
#define __NR_setreuid 70
#define __NR_setregid 71
#define __NR_sigsuspend 72
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
#define __NR_getrlimit 76
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
#define __NR_getgroups 80
#define __NR_setgroups 81
#define __NR_select 82
#define __NR_symlink 83
#define __NR_oldlstat 84
#define __NR_readlink 85
#define __NR_uselib 86
#define __NR_swapon 87
#define __NR_reboot 88
#define __NR_readdir 89
#define __NR_mmap 90
#define __NR_munmap 91
#define __NR_truncate 92
#define __NR_ftruncate 93
#define __NR_fchmod 94
#define __NR_fchown 95
#define __NR_getpriority 96
#define __NR_setpriority 97
#define __NR_profil 98
#define __NR_statfs 99
#define __NR_fstatfs 100
#define __NR_ioperm 101
#define __NR_socketcall 102
#define __NR_syslog 103
#define __NR_setitimer 104
#define __NR_getitimer 105
#define __NR_stat 106
#define __NR_lstat 107
#define __NR_fstat 108
#define __NR_olduname 109
#define __NR_iopl 110
#define __NR_vhangup 111
#define __NR_idle 112
#define __NR_vm86old 113
#define __NR_wait4 114
#define __NR_swapoff 115
#define __NR_sysinfo 116
#define __NR_ipc 117
#define __NR_fsync 118
#define __NR_sigreturn 119
#define __NR_clone 120
#define __NR_setdomainname 121
#define __NR_uname 122
#define __NR_modify_ldt 123
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
#define __NR_create_module 127
#define __NR_init_module 128
#define __NR_delete_module 129
#define __NR_get_kernel_syms 130
#define __NR_quotactl 131
#define __NR_getpgid 132
#define __NR_fchdir 133
#define __NR_bdflush 134
#define __NR_sysfs 135
#define __NR_personality 136
#define __NR_afs_syscall 137
#define __NR_setfsuid 138
#define __NR_setfsgid 139
#define __NR__llseek 140
#define __NR_getdents 141
#define __NR__newselect 142
#define __NR_flock 143
#define __NR_msync 144
#define __NR_readv 145
#define __NR_writev 146
#define __NR_getsid 147
#define __NR_fdatasync 148
#define __NR__sysctl 149
#define __NR_mlock 150
#define __NR_munlock 151
#define __NR_mlockall 152
#define __NR_munlockall 153
#define __NR_sched_setparam 154
#define __NR_sched_getparam 155
#define __NR_sched_setscheduler 156
#define __NR_sched_getscheduler 157
#define __NR_sched_yield 158
#define __NR_sched_get_priority_max 159
#define __NR_sched_get_priority_min 160
#define __NR_sched_rr_get_interval 161
#define __NR_nanosleep 162
#define __NR_mremap 163
#define __NR_setresuid 164
#define __NR_getresuid 165
#define __NR_vm86 166
#define __NR_query_module 167
#define __NR_poll 168
#define __NR_nfsservctl 169
#define __NR_setresgid 170
#define __NR_getresgid 171
#define __NR_prctl 172
#define __NR_rt_sigreturn 173
#define __NR_rt_sigaction 174
#define __NR_rt_sigprocmask 175
#define __NR_rt_sigpending 176
#define __NR_rt_sigtimedwait 177
#define __NR_rt_sigqueueinfo 178
#define __NR_rt_sigsuspend 179
#define __NR_pread64 180
#define __NR_pwrite64 181
#define __NR_chown 182
#define __NR_getcwd 183
#define __NR_capget 184
#define __NR_capset 185
#define __NR_sigaltstack 186
#define __NR_sendfile 187
#define __NR_getpmsg 188
#define __NR_putpmsg 189
#define __NR_vfork 190
#define __NR_ugetrlimit 191
#define __NR_mmap2 192
#define __NR_truncate64 193
#define __NR_ftruncate64 194
#define __NR_stat64 195
#define __NR_lstat64 196
#define __NR_fstat64 197
#define __NR_lchown32 198
#define __NR_getuid32 199
#define __NR_getgid32 200
#define __NR_geteuid32 201
#define __NR_getegid32 202
#define __NR_setreuid32 203
#define __NR_setregid32 204
#define __NR_getgroups32 205
#define __NR_setgroups32 206
#define __NR_fchown32 207
#define __NR_setresuid32 208
#define __NR_getresuid32 209
#define __NR_setresgid32 210
#define __NR_getresgid32 211
#define __NR_chown32 212
#define __NR_setuid32 213
#define __NR_setgid32 214
#define __NR_setfsuid32 215
#define __NR_setfsgid32 216
#define __NR_pivot_root 217
#define __NR_mincore 218
#define __NR_madvise 219
#define __NR_getdents64 220
#define __NR_fcntl64 221
#define __NR_gettid 224
#define __NR_readahead 225
#define __NR_setxattr 226
#define __NR_lsetxattr 227
#define __NR_fsetxattr 228
#define __NR_getxattr 229
#define __NR_lgetxattr 230
#define __NR_fgetxattr 231
#define __NR_listxattr 232
#define __NR_llistxattr 233
#define __NR_flistxattr 234
#define __NR_removexattr 235
#define __NR_lremovexattr 236
#define __NR_fremovexattr 237
#define __NR_tkill 238
#define __NR_sendfile64 239
#define __NR_futex 240
#define __NR_sched_setaffinity 241
#define __NR_sched_getaffinity 242
#define __NR_set_thread_area 243
#define __NR_get_thread_area 244
#define __NR_io_setup 245
#define __NR_io_destroy 246
#define __NR_io_getevents 247
#define __NR_io_submit 248
#define __NR_io_cancel 249
#define __NR_fadvise64 250
#define __NR_exit_group 252
#define __NR_lookup_dcookie 253
#define __NR_epoll_create 254
#define __NR_epoll_ctl 255
#define __NR_epoll_wait 256
#define __NR_remap_file_pages 257
#define __NR_set_tid_address 258
#define __NR_timer_create 259
#define __NR_timer_settime 260
#define __NR_timer_gettime 261
#define __NR_timer_getoverrun 262
#define __NR_timer_delete 263
#define __NR_clock_settime 264
#define __NR_clock_gettime 265
#define __NR_clock_getres 266
#define __NR_clock_nanosleep 267
#define __NR_statfs64 268
#define __NR_fstatfs64 269
#define __NR_tgkill 270
#define __NR_utimes 271
#define __NR_fadvise64_64 272
#define __NR_vserver 273
#define __NR_mbind 274
#define __NR_get_mempolicy 275
#define __NR_set_mempolicy 276
#define __NR_mq_open 277
#define __NR_mq_unlink 278
#define __NR_mq_timedsend 279
#define __NR_mq_timedreceive 280
#define __NR_mq_notify 281
#define __NR_mq_getsetattr 282
#define __NR_kexec_load 283
#define __NR_waitid 284
#define __NR_add_key 286
#define __NR_request_key 287
#define __NR_keyctl 288
#define __NR_ioprio_set 289
#define __NR_ioprio_get 290
#define __NR_inotify_init 291
#define __NR_inotify_add_watch 292
#define __NR_inotify_rm_watch 293
#define __NR_migrate_pages 294
#define __NR_openat 295
#define __NR_mkdirat 296
#define __NR_mknodat 297
#define __NR_fchownat 298
#define __NR_futimesat 299
#define __NR_fstatat64 300
#define __NR_unlinkat 301
#define __NR_renameat 302
#define __NR_linkat 303
#define __NR_symlinkat 304
#define __NR_readlinkat 305
#define __NR_fchmodat 306
#define __NR_faccessat 307
#define __NR_pselect6 308
#define __NR_ppoll 309
#define __NR_unshare 310
#define __NR_set_robust_list 311
#define __NR_get_robust_list 312
#define __NR_splice 313
#define __NR_sync_file_range 314
#define __NR_tee 315
#define __NR_vmsplice 316
#define __NR_move_pages 317
#define __NR_getcpu 318
#define __NR_epoll_pwait 319
#define __NR_utimensat 320
#define __NR_signalfd 321
#define __NR_timerfd_create 322
#define __NR_eventfd 323
#define __NR_fallocate 324
#define __NR_timerfd_settime 325
#define __NR_timerfd_gettime 326
#define __NR_signalfd4 327
#define __NR_eventfd2 328
#define __NR_epoll_create1 329
#define __NR_dup3 330
#define __NR_pipe2 331
#define __NR_inotify_init1 332
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
#define __NR_recvmmsg 337
#define __NR_fanotify_init 338
#define __NR_fanotify_mark 339
#define __NR_prlimit64 340
#define __NR_name_to_handle_at 341
#define __NR_open_by_handle_at 342
#define __NR_clock_adjtime 343
#define __NR_syncfs 344
#define __NR_sendmmsg 345
#define __NR_setns 346
#define __NR_process_vm_readv 347
#define __NR_process_vm_writev 348
#define __NR_kcmp 349
static const char *const syscall_to_str_map[] = {
[__NR_restart_syscall] = "restart_syscall",
[__NR_exit] = "exit",
[__NR_fork] = "fork",
[__NR_read] = "read",
[__NR_write] = "write",
[__NR_open] = "open",
[__NR_close] = "close",
[__NR_waitpid] = "waitpid",
[__NR_creat] = "creat",
[__NR_link] = "link",
[__NR_unlink] = "unlink",
[__NR_execve] = "execve",
[__NR_chdir] = "chdir",
[__NR_time] = "time",
[__NR_mknod] = "mknod",
[__NR_chmod] = "chmod",
[__NR_lchown] = "lchown",
[__NR_break] = "break",
[__NR_oldstat] = "oldstat",
[__NR_lseek] = "lseek",
[__NR_getpid] = "getpid",
[__NR_mount] = "mount",
[__NR_umount] = "umount",
[__NR_setuid] = "setuid",
[__NR_getuid] = "getuid",
[__NR_stime] = "stime",
[__NR_ptrace] = "ptrace",
[__NR_alarm] = "alarm",
[__NR_oldfstat] = "oldfstat",
[__NR_pause] = "pause",
[__NR_utime] = "utime",
[__NR_stty] = "stty",
[__NR_gtty] = "gtty",
[__NR_access] = "access",
[__NR_nice] = "nice",
[__NR_ftime] = "ftime",
[__NR_sync] = "sync",
[__NR_kill] = "kill",
[__NR_rename] = "rename",
[__NR_mkdir] = "mkdir",
[__NR_rmdir] = "rmdir",
[__NR_dup] = "dup",
[__NR_pipe] = "pipe",
[__NR_times] = "times",
[__NR_prof] = "prof",
[__NR_brk] = "brk",
[__NR_setgid] = "setgid",
[__NR_getgid] = "getgid",
[__NR_signal] = "signal",
[__NR_geteuid] = "geteuid",
[__NR_getegid] = "getegid",
[__NR_acct] = "acct",
[__NR_umount2] = "umount2",
[__NR_lock] = "lock",
[__NR_ioctl] = "ioctl",
[__NR_fcntl] = "fcntl",
[__NR_mpx] = "mpx",
[__NR_setpgid] = "setpgid",
[__NR_ulimit] = "ulimit",
[__NR_oldolduname] = "oldolduname",
[__NR_umask] = "umask",
[__NR_chroot] = "chroot",
[__NR_ustat] = "ustat",
[__NR_dup2] = "dup2",
[__NR_getppid] = "getppid",
[__NR_getpgrp] = "getpgrp",
[__NR_setsid] = "setsid",
[__NR_sigaction] = "sigaction",
[__NR_sgetmask] = "sgetmask",
[__NR_ssetmask] = "ssetmask",
[__NR_setreuid] = "setreuid",
[__NR_setregid] = "setregid",
[__NR_sigsuspend] = "sigsuspend",
[__NR_sigpending] = "sigpending",
[__NR_sethostname] = "sethostname",
[__NR_setrlimit] = "setrlimit",
[__NR_getrlimit] = "getrlimit",
[__NR_getrusage] = "getrusage",
[__NR_gettimeofday] = "gettimeofday",
[__NR_settimeofday] = "settimeofday",
[__NR_getgroups] = "getgroups",
[__NR_setgroups] = "setgroups",
[__NR_select] = "select",
[__NR_symlink] = "symlink",
[__NR_oldlstat] = "oldlstat",
[__NR_readlink] = "readlink",
[__NR_uselib] = "uselib",
[__NR_swapon] = "swapon",
[__NR_reboot] = "reboot",
[__NR_readdir] = "readdir",
[__NR_mmap] = "mmap",
[__NR_munmap] = "munmap",
[__NR_truncate] = "truncate",
[__NR_ftruncate] = "ftruncate",
[__NR_fchmod] = "fchmod",
[__NR_fchown] = "fchown",
[__NR_getpriority] = "getpriority",
[__NR_setpriority] = "setpriority",
[__NR_profil] = "profil",
[__NR_statfs] = "statfs",
[__NR_fstatfs] = "fstatfs",
[__NR_ioperm] = "ioperm",
[__NR_socketcall] = "socketcall",
[__NR_syslog] = "syslog",
[__NR_setitimer] = "setitimer",
[__NR_getitimer] = "getitimer",
[__NR_stat] = "stat",
[__NR_lstat] = "lstat",
[__NR_fstat] = "fstat",
[__NR_olduname] = "olduname",
[__NR_iopl] = "iopl",
[__NR_vhangup] = "vhangup",
[__NR_idle] = "idle",
[__NR_vm86old] = "vm86old",
[__NR_wait4] = "wait4",
[__NR_swapoff] = "swapoff",
[__NR_sysinfo] = "sysinfo",
[__NR_ipc] = "ipc",
[__NR_fsync] = "fsync",
[__NR_sigreturn] = "sigreturn",
[__NR_clone] = "clone",
[__NR_setdomainname] = "setdomainname",
[__NR_uname] = "uname",
[__NR_modify_ldt] = "modify_ldt",
[__NR_adjtimex] = "adjtimex",
[__NR_mprotect] = "mprotect",
[__NR_sigprocmask] = "sigprocmask",
[__NR_create_module] = "create_module",
[__NR_init_module] = "init_module",
[__NR_delete_module] = "delete_module",
[__NR_get_kernel_syms] = "get_kernel_syms",
[__NR_quotactl] = "quotactl",
[__NR_getpgid] = "getpgid",
[__NR_fchdir] = "fchdir",
[__NR_bdflush] = "bdflush",
[__NR_sysfs] = "sysfs",
[__NR_personality] = "personality",
[__NR_afs_syscall] = "afs_syscall",
[__NR_setfsuid] = "setfsuid",
[__NR_setfsgid] = "setfsgid",
[__NR__llseek] = "_llseek",
[__NR_getdents] = "getdents",
[__NR__newselect] = "_newselect",
[__NR_flock] = "flock",
[__NR_msync] = "msync",
[__NR_readv] = "readv",
[__NR_writev] = "writev",
[__NR_getsid] = "getsid",
[__NR_fdatasync] = "fdatasync",
[__NR__sysctl] = "_sysctl",
[__NR_mlock] = "mlock",
[__NR_munlock] = "munlock",
[__NR_mlockall] = "mlockall",
[__NR_munlockall] = "munlockall",
[__NR_sched_setparam] = "sched_setparam",
[__NR_sched_getparam] = "sched_getparam",
[__NR_sched_setscheduler] = "sched_setscheduler",
[__NR_sched_getscheduler] = "sched_getscheduler",
[__NR_sched_yield] = "sched_yield",
[__NR_sched_get_priority_max] = "sched_get_priority_max",
[__NR_sched_get_priority_min] = "sched_get_priority_min",
[__NR_sched_rr_get_interval] = "sched_rr_get_interval",
[__NR_nanosleep] = "nanosleep",
[__NR_mremap] = "mremap",
[__NR_setresuid] = "setresuid",
[__NR_getresuid] = "getresuid",
[__NR_vm86] = "vm86",
[__NR_query_module] = "query_module",
[__NR_poll] = "poll",
[__NR_nfsservctl] = "nfsservctl",
[__NR_setresgid] = "setresgid",
[__NR_getresgid] = "getresgid",
[__NR_prctl] = "prctl",
[__NR_rt_sigreturn] = "rt_sigreturn",
[__NR_rt_sigaction] = "rt_sigaction",
[__NR_rt_sigprocmask] = "rt_sigprocmask",
[__NR_rt_sigpending] = "rt_sigpending",
[__NR_rt_sigtimedwait] = "rt_sigtimedwait",
[__NR_rt_sigqueueinfo] = "rt_sigqueueinfo",
[__NR_rt_sigsuspend] = "rt_sigsuspend",
[__NR_pread64] = "pread64",
[__NR_pwrite64] = "pwrite64",
[__NR_chown] = "chown",
[__NR_getcwd] = "getcwd",
[__NR_capget] = "capget",
[__NR_capset] = "capset",
[__NR_sigaltstack] = "sigaltstack",
[__NR_sendfile] = "sendfile",
[__NR_getpmsg] = "getpmsg",
[__NR_putpmsg] = "putpmsg",
[__NR_vfork] = "vfork",
[__NR_ugetrlimit] = "ugetrlimit",
[__NR_mmap2] = "mmap2",
[__NR_truncate64] = "truncate64",
[__NR_ftruncate64] = "ftruncate64",
[__NR_stat64] = "stat64",
[__NR_lstat64] = "lstat64",
[__NR_fstat64] = "fstat64",
[__NR_lchown32] = "lchown32",
[__NR_getuid32] = "getuid32",
[__NR_getgid32] = "getgid32",
[__NR_geteuid32] = "geteuid32",
[__NR_getegid32] = "getegid32",
[__NR_setreuid32] = "setreuid32",
[__NR_setregid32] = "setregid32",
[__NR_getgroups32] = "getgroups32",
[__NR_setgroups32] = "setgroups32",
[__NR_fchown32] = "fchown32",
[__NR_setresuid32] = "setresuid32",
[__NR_getresuid32] = "getresuid32",
[__NR_setresgid32] = "setresgid32",
[__NR_getresgid32] = "getresgid32",
[__NR_chown32] = "chown32",
[__NR_setuid32] = "setuid32",
[__NR_setgid32] = "setgid32",
[__NR_setfsuid32] = "setfsuid32",
[__NR_setfsgid32] = "setfsgid32",
[__NR_pivot_root] = "pivot_root",
[__NR_mincore] = "mincore",
[__NR_madvise] = "madvise",
[__NR_getdents64] = "getdents64",
[__NR_fcntl64] = "fcntl64",
[__NR_gettid] = "gettid",
[__NR_readahead] = "readahead",
[__NR_setxattr] = "setxattr",
[__NR_lsetxattr] = "lsetxattr",
[__NR_fsetxattr] = "fsetxattr",
[__NR_getxattr] = "getxattr",
[__NR_lgetxattr] = "lgetxattr",
[__NR_fgetxattr] = "fgetxattr",
[__NR_listxattr] = "listxattr",
[__NR_llistxattr] = "llistxattr",
[__NR_flistxattr] = "flistxattr",
[__NR_removexattr] = "removexattr",
[__NR_lremovexattr] = "lremovexattr",
[__NR_fremovexattr] = "fremovexattr",
[__NR_tkill] = "tkill",
[__NR_sendfile64] = "sendfile64",
[__NR_futex] = "futex",
[__NR_sched_setaffinity] = "sched_setaffinity",
[__NR_sched_getaffinity] = "sched_getaffinity",
[__NR_set_thread_area] = "set_thread_area",
[__NR_get_thread_area] = "get_thread_area",
[__NR_io_setup] = "io_setup",
[__NR_io_destroy] = "io_destroy",
[__NR_io_getevents] = "io_getevents",
[__NR_io_submit] = "io_submit",
[__NR_io_cancel] = "io_cancel",
[__NR_fadvise64] = "fadvise64",
[__NR_exit_group] = "exit_group",
[__NR_lookup_dcookie] = "lookup_dcookie",
[__NR_epoll_create] = "epoll_create",
[__NR_epoll_ctl] = "epoll_ctl",
[__NR_epoll_wait] = "epoll_wait",
[__NR_remap_file_pages] = "remap_file_pages",
[__NR_set_tid_address] = "set_tid_address",
[__NR_timer_create] = "timer_create",
[__NR_timer_settime] = "timer_settime",
[__NR_timer_gettime] = "timer_gettime",
[__NR_timer_getoverrun] = "timer_getoverrun",
[__NR_timer_delete] = "timer_delete",
[__NR_clock_settime] = "clock_settime",
[__NR_clock_gettime] = "clock_gettime",
[__NR_clock_getres] = "clock_getres",
[__NR_clock_nanosleep] = "clock_nanosleep",
[__NR_statfs64] = "statfs64",
[__NR_fstatfs64] = "fstatfs64",
[__NR_tgkill] = "tgkill",
[__NR_utimes] = "utimes",
[__NR_fadvise64_64] = "fadvise64_64",
[__NR_vserver] = "vserver",
[__NR_mbind] = "mbind",
[__NR_get_mempolicy] = "get_mempolicy",
[__NR_set_mempolicy] = "set_mempolicy",
[__NR_mq_open] = "mq_open",
[__NR_mq_unlink] = "mq_unlink",
[__NR_mq_timedsend] = "mq_timedsend",
[__NR_mq_timedreceive] = "mq_timedreceive",
[__NR_mq_notify] = "mq_notify",
[__NR_mq_getsetattr] = "mq_getsetattr",
[__NR_kexec_load] = "kexec_load",
[__NR_waitid] = "waitid",
[__NR_add_key] = "add_key",
[__NR_request_key] = "request_key",
[__NR_keyctl] = "keyctl",
[__NR_ioprio_set] = "ioprio_set",
[__NR_ioprio_get] = "ioprio_get",
[__NR_inotify_init] = "inotify_init",
[__NR_inotify_add_watch] = "inotify_add_watch",
[__NR_inotify_rm_watch] = "inotify_rm_watch",
[__NR_migrate_pages] = "migrate_pages",
[__NR_openat] = "openat",
[__NR_mkdirat] = "mkdirat",
[__NR_mknodat] = "mknodat",
[__NR_fchownat] = "fchownat",
[__NR_futimesat] = "futimesat",
[__NR_fstatat64] = "fstatat64",
[__NR_unlinkat] = "unlinkat",
[__NR_renameat] = "renameat",
[__NR_linkat] = "linkat",
[__NR_symlinkat] = "symlinkat",
[__NR_readlinkat] = "readlinkat",
[__NR_fchmodat] = "fchmodat",
[__NR_faccessat] = "faccessat",
[__NR_pselect6] = "pselect6",
[__NR_ppoll] = "ppoll",
[__NR_unshare] = "unshare",
[__NR_set_robust_list] = "set_robust_list",
[__NR_get_robust_list] = "get_robust_list",
[__NR_splice] = "splice",
[__NR_sync_file_range] = "sync_file_range",
[__NR_tee] = "tee",
[__NR_vmsplice] = "vmsplice",
[__NR_move_pages] = "move_pages",
[__NR_getcpu] = "getcpu",
[__NR_epoll_pwait] = "epoll_pwait",
[__NR_utimensat] = "utimensat",
[__NR_signalfd] = "signalfd",
[__NR_timerfd_create] = "timerfd_create",
[__NR_eventfd] = "eventfd",
[__NR_fallocate] = "fallocate",
[__NR_timerfd_settime] = "timerfd_settime",
[__NR_timerfd_gettime] = "timerfd_gettime",
[__NR_signalfd4] = "signalfd4",
[__NR_eventfd2] = "eventfd2",
[__NR_epoll_create1] = "epoll_create1",
[__NR_dup3] = "dup3",
[__NR_pipe2] = "pipe2",
[__NR_inotify_init1] = "inotify_init1",
[__NR_preadv] = "preadv",
[__NR_pwritev] = "pwritev",
[__NR_rt_tgsigqueueinfo] = "rt_tgsigqueueinfo",
[__NR_perf_event_open] = "perf_event_open",
[__NR_recvmmsg] = "recvmmsg",
[__NR_fanotify_init] = "fanotify_init",
[__NR_fanotify_mark] = "fanotify_mark",
[__NR_prlimit64] = "prlimit64",
[__NR_name_to_handle_at] = "name_to_handle_at",
[__NR_open_by_handle_at] = "open_by_handle_at",
[__NR_clock_adjtime] = "clock_adjtime",
[__NR_syncfs] = "syncfs",
[__NR_sendmmsg] = "sendmmsg",
[__NR_setns] = "setns",
[__NR_process_vm_readv] = "process_vm_readv",
[__NR_process_vm_writev] = "process_vm_writev",
[__NR_kcmp] = "kcmp"
};
#endif /* _SCNUMS_X86_H */
| 10,526 |
316 | /**
* Copyright (c) 2010-2010 <NAME>
* email:<EMAIL>
*
* Report bugs and download new versions at http://code.google.com/p/fontbuilder
*
* This software is distributed under the MIT License.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "builtinimagewriter.h"
#include "layoutdata.h"
#include <QPainter>
#include "../layoutconfig.h"
BuiltinImageWriter::BuiltinImageWriter(QString format,QString ext,QObject *parent) :
AbstractImageWriter(parent)
{
setExtension(ext);
setReloadSupport(true);
m_format = format;
}
bool BuiltinImageWriter::Export(QFile& file) {
QImage pixmap = buildImage();
pixmap.save(&file,m_format.toUtf8().data());
return true;
}
QImage* BuiltinImageWriter::reload(QFile& file) {
QImage* img = new QImage();
if (img->load(&file,m_format.toUtf8().data()))
return img;
delete img;
return 0;
}
| 593 |
2,151 | <filename>media/gpu/android/avda_shared_state.cc<gh_stars>1000+
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/android/avda_shared_state.h"
#include "base/metrics/histogram_macros.h"
#include "base/time/time.h"
#include "media/gpu/android/avda_codec_image.h"
#include "ui/gl/android/surface_texture.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/scoped_make_current.h"
namespace media {
AVDASharedState::AVDASharedState(
scoped_refptr<AVDASurfaceBundle> surface_bundle)
: gl_matrix_{
1, 0, 0, 0, // Default to a sane guess just in case we can't get the
0, 1, 0, 0, // matrix on the first call. Will be Y-flipped later.
0, 0, 1, 0, //
0, 0, 0, 1, // Comment preserves 4x4 formatting.
},
surface_bundle_(surface_bundle),
weak_this_factory_(this) {
// If we're holding a reference to an overlay, then register to drop it if the
// overlay's surface is destroyed.
if (overlay()) {
overlay()->AddSurfaceDestroyedCallback(base::Bind(
&AVDASharedState::ClearOverlay, weak_this_factory_.GetWeakPtr()));
}
}
AVDASharedState::~AVDASharedState() = default;
void AVDASharedState::RenderCodecBufferToTextureOwner(MediaCodecBridge* codec,
int codec_buffer_index) {
if (texture_owner()->IsExpectingFrameAvailable())
texture_owner()->WaitForFrameAvailable();
codec->ReleaseOutputBuffer(codec_buffer_index, true);
texture_owner()->SetReleaseTimeToNow();
}
void AVDASharedState::WaitForFrameAvailable() {
texture_owner()->WaitForFrameAvailable();
}
void AVDASharedState::UpdateTexImage() {
texture_owner()->UpdateTexImage();
// Helpfully, this is already column major.
texture_owner()->GetTransformMatrix(gl_matrix_);
}
void AVDASharedState::GetTransformMatrix(float matrix[16]) const {
memcpy(matrix, gl_matrix_, sizeof(gl_matrix_));
}
void AVDASharedState::ClearReleaseTime() {
if (texture_owner())
texture_owner()->IgnorePendingRelease();
}
void AVDASharedState::ClearOverlay(AndroidOverlay* overlay_raw) {
if (surface_bundle_ && overlay() == overlay_raw)
surface_bundle_ = nullptr;
}
void AVDASharedState::SetPromotionHintCB(
PromotionHintAggregator::NotifyPromotionHintCB cb) {
promotion_hint_cb_ = cb;
}
const PromotionHintAggregator::NotifyPromotionHintCB&
AVDASharedState::GetPromotionHintCB() {
return promotion_hint_cb_;
}
} // namespace media
| 988 |
4,538 | /*
* Copyright (C) 2015-2020 Alibaba Group Holding Limited
*/
#include "aos/hal/rtc.h"
#include "rx8130ce.h"
/**
* This function will initialize the on board CPU real time clock
*
*
* @param[in] rtc rtc device
*
* @return 0 : on success, EIO : if an error occurred with any step
*/
int32_t hal_rtc_init(rtc_dev_t *rtc)
{
int ret = 0;
ret = rx8130ce_init();
if(ret)
{
printf("board rtc init fail\r\n");
return -1;
}
return 0;
}
/**
* This function will return the value of time read from the on board CPU real time clock.
*
* @param[in] rtc rtc device
* @param[out] time pointer to a time structure
*
* @return 0 : on success, EIO : if an error occurred with any step
*/
int32_t hal_rtc_get_time(rtc_dev_t *rtc, rtc_time_t *time)
{
int ret = 0;
ret = rx8130ce_get_time(time, sizeof(rtc_time_t));
if(ret)
{
printf("board rtc get time fail\r\n");
return -1;
}
return 0;
}
/**
* This function will set MCU RTC time to a new value.
*
* @param[in] rtc rtc device
* @param[out] time pointer to a time structure
*
* @return 0 : on success, EIO : if an error occurred with any step
*/
int32_t hal_rtc_set_time(rtc_dev_t *rtc, const rtc_time_t *time)
{
int ret = 0;
ret = rx8130ce_set_time(time, sizeof(rtc_time_t));
if(ret)
{
printf("board rtc set time fail\r\n");
return -1;
}
return 0;
}
/**
* De-initialises an RTC interface, Turns off an RTC hardware interface
*
* @param[in] RTC the interface which should be de-initialised
*
* @return 0 : on success, EIO : if an error occurred with any step
*/
int32_t hal_rtc_finalize(rtc_dev_t *rtc)
{
return 0;
}
| 718 |
790 | #!/usr/bin/env python
import hashlib
import os
import pickle
import time
from functools import wraps
import flask.ext.login as flask_login
from flask import (
Flask, render_template, request, Response, flash, redirect, url_for)
from forms import (
loginForm, initialSetupForm, userForm, wifiForm, vpnPskForm,
resetToDefaultForm, statusForm)
from scripts.pi_mgmt import (
pi_reboot, pi_shutdown, start_ssh_service, update_client)
from scripts.rpi_network_conn import add_wifi, internet_status, reset_wifi
from scripts.vpn_server_conn import (
set_vpn_params, reset_vpn_params, start_vpn, stop_vpn, restart_vpn,
vpn_status, vpn_configuration_status)
app = Flask(__name__)
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
# Flask-Login functions (for regular Pages)
# users = {"admin":{"password":"<PASSWORD>", "salt":"uOMbInZTYYpiCGvEaH8Byw==\n"}}
# default username=admin password=<PASSWORD>, user is prompted to change if default is being used.
with open("/home/pi/goSecure_Web_GUI/users_db.p", "rb") as fin:
users = pickle.load(fin)
class User(flask_login.UserMixin):
pass
@login_manager.user_loader
def user_loader(username):
if username not in users:
return
user = User()
user.id = username
return user
@login_manager.request_loader
def request_loader(request):
username = request.form.get('username')
if username not in users:
return
user = User()
user.id = username
# DO NOT ever store passwords in plaintext and always compare password
# hashes using constant-time comparison!
if user_validate_credentials(request.form['username'], request.form['password']):
user.is_authenticated = True
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
flash("Unauthorized, please log in.", "error")
return redirect(url_for("login"))
# Flask HTTP Basic Auth (for API)
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
"Unauthorized", 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_basic_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not user_validate_credentials(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def flash_form_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
), "error")
# Auth helper functions
# return True is username and password pair match what's in the database
# else return False
def user_validate_credentials(username, password):
if username not in users:
return False
else:
stored_password = users[username]['password']
stored_salt = users[username]['salt']
userPasswordHash = hashlib.sha256(str(stored_salt) + password).hexdigest()
return stored_password == userPasswordHash
# return True is password is changed successfully
# else return False
def user_change_credentials(username, password, new_password):
if username not in users:
return False
else:
# verify current password
if user_validate_credentials(username, password):
#change password
userPasswordHashSalt = os.urandom(16).encode("base64")
userPasswordHash = hashlib.sha256(str(userPasswordHashSalt) + new_password).hexdigest()
users[username]["salt"] = userPasswordHashSalt
users[username]["password"] = <PASSWORD>
with open("/home/pi/goSecure_Web_GUI/users_db.p", "wb") as fout:
pickle.dump(users, fout)
return True
else:
return False
# return True if credentials are reset
# else return False
def user_reset_credentials(username, password):
return user_change_credentials(username, password, "<PASSWORD>")
# Routes for web pages
# 404 page
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
# Login Page
@app.route("/", methods=["GET", "POST"])
def login():
form = loginForm()
if request.method == "GET":
return render_template("login.html", form=form)
elif request.method == "POST":
if form.validate():
username = form.username.data
password = <PASSWORD>
if user_validate_credentials(username, password):
user = User()
user.id = username
flask_login.login_user(user)
# check to see if default credentials are being used. If so, redirect to change password page.
if user_validate_credentials("admin", "go<PASSWORD>"):
flash("Please change the default password.", "notice")
return redirect(url_for("user"))
else:
internet_status_bool = internet_status()
vpn_status_bool = vpn_status()
vpn_configuration_status_bool = vpn_configuration_status()
# check to see if network is up. If not, redirect to network page
if internet_status_bool is False and vpn_configuration_status_bool is True:
flash("Internet is not reachable.", "notice")
return redirect(url_for("wifi"))
# check to see if network and vpn are up. If not, redirect to initial setup page
elif internet_status_bool is False and vpn_status_bool is False:
return redirect(url_for("initial_setup"))
# check to see if vpn is up. If not, redirect to vpn page
elif vpn_status_bool is False:
flash("VPN is not established.", "notice")
return redirect(url_for("vpn_psk"))
else:
return redirect(request.args.get("next") or url_for("status"))
else:
flash("Invalid username or password. Please try again.", "error")
return render_template("login.html", form=form)
else:
flash_form_errors(form)
return render_template("login.html", form=form)
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for("login"))
# User page
@app.route("/status", methods=["GET", "POST"])
@flask_login.login_required
def status():
form = statusForm()
if request.method == "GET":
# check to see if network and vpn are active, red=not active, green=active
internet_status_color = "green" if internet_status() else "red"
vpn_status_color = "green" if vpn_status() else "red"
return render_template("status.html", form=form, internet_status_color=internet_status_color, vpn_status_color=vpn_status_color)
# User page
@app.route("/user", methods=["GET", "POST"])
@flask_login.login_required
def user():
form = userForm()
if request.method == "GET":
form.username.data = flask_login.current_user.id
return render_template("user.html", form=form)
elif request.method == "POST":
if form.validate():
username = form.username.data
password = form.password.data
new_password = form.new_password.data
if user_change_credentials(username, password, new_password):
flash("Your user information has been successfully changed. Please login with the new credentials.", "success")
return redirect(url_for("login"))
else:
flash("Invalid current username or password. Please try again.", "error")
return render_template("user.html", form=form)
else:
flash_form_errors(form)
return render_template("user.html", form=form)
# Initial setup page
@app.route("/initial_setup", methods=["GET", "POST"])
@flask_login.login_required
def initial_setup():
form = initialSetupForm()
if request.method == "GET":
return render_template("initial_setup.html", form=form)
elif request.method == "POST":
if form.validate():
ssid = form.ssid.data.rsplit("-", 1)[0]
psk = form.psk.data
add_wifi(ssid, psk)
if internet_status() is True:
vpn_server = form.vpn_server.data
user_id = form.user_id.data
user_psk = form.user_psk.data
set_vpn_params(vpn_server, user_id, user_psk)
restart_vpn()
flash("Wifi and VPN settings saved!", "success")
return redirect(url_for("status"))
else:
flash("Error! Cannot reach the internet...", "error")
return render_template("initial_setup.html", form=form)
else:
flash("Error! " + str(form.data), "error")
return render_template("initial_setup.html", form=form)
# Wifi page
@app.route("/wifi", methods=["GET", "POST"])
@flask_login.login_required
def wifi():
form = wifiForm()
if request.method == "GET":
return render_template("wifi.html", form=form)
elif request.method == "POST":
if form.validate():
ssid = form.ssid.data.rsplit("-", 1)[0]
psk = form.psk.data
add_wifi(ssid, psk)
time.sleep(5)
if internet_status() is True:
restart_vpn()
time.sleep(5)
flash("Wifi settings saved! VPN Restarted!", "success")
return redirect(url_for("status"))
else:
flash("Error! Cannot reach the internet...", "error")
return render_template("wifi.html", form=form)
else:
flash("Error! " + str(form.data), "error")
return render_template("wifi.html", form=form)
# VPN psk page
@app.route("/vpn_psk", methods=["GET", "POST"])
@flask_login.login_required
def vpn_psk():
form = vpnPskForm()
if request.method == "GET":
return render_template("vpn_psk.html", form=form)
elif request.method == "POST":
if form.validate():
vpn_server = form.vpn_server.data
user_id = form.user_id.data
user_psk = form.user_psk.data
set_vpn_params(vpn_server, user_id, user_psk)
restart_vpn()
if vpn_status():
flash("VPN settings saved and VPN restarted!", "success")
return redirect(url_for("status"))
else:
flash("VPN settings saved and VPN restarted! Unable to establish VPN connection.", "error")
return render_template("vpn_psk.html", form=form)
else:
flash("Error! " + str(form.data), "error")
return render_template("vpn_psk.html", form=form)
# Reset to default page
@app.route("/reset_to_default", methods=["GET", "POST"])
@flask_login.login_required
def reset_to_default():
form = resetToDefaultForm()
if request.method == "GET":
form.username.data = flask_login.current_user.id
return render_template("reset_to_default.html", form=form)
elif request.method == "POST":
if form.validate():
username = form.username.data
password = form.password.data
reset_vpn_params()
reset_wifi()
if user_reset_credentials(username, password):
flash("Your client has been successfully reset to default settings.", "success")
return redirect(url_for("logout"))
else:
flash("Error resetting client.", "error")
return render_template("reset_to_default.html", form=form)
else:
flash_form_errors(form)
return render_template("reset_to_default.html", form=form)
@app.route("/action", methods=["POST"])
@flask_login.login_required
def execute_action():
action = request.form["action"]
if action == "reboot":
pi_reboot()
elif action == "shutdown":
pi_shutdown()
elif action == "start_vpn":
start_vpn()
flash("VPN Started!", "notice")
elif action == "stop_vpn":
stop_vpn()
flash("VPN Stopped!", "notice")
elif action == "restart_vpn":
restart_vpn()
flash("VPN Restarted!", "notice")
elif action == "ssh_service":
start_ssh_service()
flash("SSH Service Started! It will be turned off on reboot.")
elif action == "update_client":
update_client()
flash("Client will reboot... please reload this page in 1 minute.")
else:
form = initialSetupForm()
flash("Error! Invalid Action!", "error")
return redirect(url_for("status"))
# REST API
@app.route("/v1.0/vpn/credentials", methods=["POST", "DELETE"])
@requires_basic_auth
def api_vpn_credentials():
if request.method == "POST":
form = initialSetupForm()
form.vpn_server.data = request.json["vpn_server"]
form.user_id.data = request.json["user_id"]
form.user_psk.data = request.json["user_psk"]
if request.headers['Content-Type'] == 'application/json':
if form.vpn_server.validate(form) and form.user_id.validate(form) and form.user_psk.validate(form):
set_vpn_params(form.vpn_server.data, form.user_id.data, form.user_psk.data)
return "Successfully set vpn_server, user_id, and psk for VPN"
else:
return "Invalid user_id or psk format"
else:
return "415 Unsupported Media Type - Use application/json"
elif request.method == "DELETE":
reset_vpn_params()
return "Successfully reset vpn_server, user_id, and psk for VPN"
else:
return "Only POST and DELETE methods are supported. Refer to the API Documentation"
@app.route("/v1.0/vpn/actions", methods=["POST"])
@requires_basic_auth
def api_vpn_actions():
if request.method == "POST":
if request.headers['Content-Type'] == 'application/json':
action = request.json["action"]
if action == "start_vpn":
if start_vpn():
return "VPN service started, VPN is ESTABLISHED"
else:
return "VPN service started, VPN is NOT ESTABLISHED"
elif action == "stop_vpn":
stop_vpn()
return "VPN service stopped, VPN is NOT ESTABLISHED"
elif action == "restart_vpn":
if restart_vpn():
return "VPN service restarted, VPN is ESTABLISHED"
else:
return "VPN service restarted, VPN is NOT ESTABLISHED"
else:
return "Error! Invalid Action!"
else:
return "415 Unsupported Media Type - Use application/json"
else:
return "Only POST method is supported. Refer to the API Documentation"
if __name__ == "__main__":
app.secret_key = os.urandom(24)
# if SSL key and certificate pair do not exist, create them.
if (os.path.exists("ssl.key") and os.path.exists("ssl.crt")) is not True:
os.system('openssl genrsa 2048 > ssl.key')
os.system('openssl req -new -x509 -nodes -sha256 -days 1095 -subj "/C=US/O=goSecure/CN=goSecureClient" -key ssl.key > ssl.crt')
os.system('sudo chown pi:pi ssl.crt ssl.key')
os.system('sudo chmod 440 ssl.crt ssl.key')
app.run(host="192.168.50.1", port=443, ssl_context=("ssl.crt", "ssl.key"))
| 6,998 |
1,545 | <reponame>imilas/tsai<gh_stars>1000+
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/122_models.TabFusionTransformer.ipynb (unless otherwise specified).
__all__ = ['ifnone', 'Sequential', 'TabFusionBackbone', 'TabFusionTransformer', 'TSTabFusionTransformer']
# Cell
# This is a modified Pytorch implementation based on TabTransformer created by <NAME> (<EMAIL>):
# <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
# TabTransformer: Tabular Data Modeling Using Contextual Embeddings.
# arXiv preprint https://arxiv.org/pdf/2012.06678
# Official repo: https://github.com/awslabs/autogluon/tree/master/tabular/src/autogluon/tabular/models/tab_transformer
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
def ifnone(a, b):
# From fastai.fastcore
"`b` if `a` is None else `a`"
return b if a is None else a
class _Flatten(nn.Module):
def __init__(self, full=False):
super().__init__()
self.full = full
def forward(self, x):
return x.view(-1) if self.full else x.view(x.size(0), -1)
class Sequential(nn.Sequential):
"""Class that allows you to pass one or multiple inputs"""
def forward(self, *x):
for i, module in enumerate(self._modules.values()):
x = module(*x) if isinstance(x, (list, tuple)) else module(x)
return x
class _MLP(nn.Module):
def __init__(self, dims, bn=False, act=None, skip=False, dropout=0., bn_final=False):
super().__init__()
dims_pairs = list(zip(dims[:-1], dims[1:]))
layers = []
for i, (dim_in, dim_out) in enumerate(dims_pairs):
is_last = i >= (len(dims) - 2)
if bn and (not is_last or bn_final): layers.append(nn.BatchNorm1d(dim_in))
if dropout and not is_last:
layers.append(nn.Dropout(dropout))
layers.append(nn.Linear(dim_in, dim_out))
if is_last: break
layers.append(ifnone(act, nn.ReLU()))
self.mlp = nn.Sequential(*layers)
self.shortcut = nn.Linear(dims[0], dims[-1]) if skip else None
def forward(self, x):
if self.shortcut is not None:
return self.mlp(x) + self.shortcut(x)
else:
return self.mlp(x)
class _ScaledDotProductAttention(nn.Module):
def __init__(self, d_k:int, res_attention:bool=False):
super().__init__()
self.d_k,self.res_attention = d_k,res_attention
def forward(self, q, k, v, prev=None, attn_mask=None):
# MatMul (q, k) - similarity scores for all pairs of positions in an input sequence
scores = torch.matmul(q, k) # scores : [bs x n_heads x q_len x q_len]
# Scale
scores = scores / (self.d_k ** 0.5)
# Attention mask (optional)
if attn_mask is not None: # mask with shape [q_len x q_len]
if attn_mask.dtype == torch.bool:
scores.masked_fill_(attn_mask, float('-inf'))
else:
scores += attn_mask
# SoftMax
if prev is not None: scores = scores + prev
attn = F.softmax(scores, dim=-1) # attn : [bs x n_heads x q_len x q_len]
# MatMul (attn, v)
context = torch.matmul(attn, v) # context: [bs x n_heads x q_len x d_v]
if self.res_attention: return context, attn, scores
else: return context, attn
class _MultiheadAttention(nn.Module):
def __init__(self, d_model:int, n_heads:int, d_k:int, d_v:int, res_attention:bool=False):
"""Input shape: Q, K, V:[batch_size (bs) x q_len x d_model], mask:[q_len x q_len]"""
super().__init__()
self.n_heads, self.d_k, self.d_v = n_heads, d_k, d_v
self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False)
self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False)
self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False)
self.W_O = nn.Linear(n_heads * d_v, d_model, bias=False)
self.res_attention = res_attention
# Scaled Dot-Product Attention (multiple heads)
if self.res_attention:
self.sdp_attn = _ScaledDotProductAttention(self.d_k, self.res_attention)
else:
self.sdp_attn = _ScaledDotProductAttention(self.d_k)
def forward(self, Q, K, V, prev=None, attn_mask=None):
bs = Q.size(0)
# Linear (+ split in multiple heads)
q_s = self.W_Q(Q).view(bs, -1, self.n_heads, self.d_k).transpose(1,2) # q_s : [bs x n_heads x q_len x d_k]
k_s = self.W_K(K).view(bs, -1, self.n_heads, self.d_k).permute(0,2,3,1) # k_s : [bs x n_heads x d_k x q_len] - transpose(1,2) + transpose(2,3)
v_s = self.W_V(V).view(bs, -1, self.n_heads, self.d_v).transpose(1,2) # v_s : [bs x n_heads x q_len x d_v]
# Scaled Dot-Product Attention (multiple heads)
if self.res_attention:
context, attn, scores = self.sdp_attn(q_s, k_s, v_s, prev=prev, attn_mask=attn_mask)
else:
context, attn = self.sdp_attn(q_s, k_s, v_s, attn_mask=attn_mask)
# context: [bs x n_heads x q_len x d_v], attn: [bs x n_heads x q_len x q_len]
# Concat
context = context.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.d_v) # context: [bs x q_len x n_heads * d_v]
# Linear
output = self.W_O(context) # context: [bs x q_len x d_model]
if self.res_attention: return output, attn, scores
else: return output, attn # output: [bs x q_len x d_model]
class _TabFusionEncoderLayer(nn.Module):
def __init__(self, q_len, d_model, n_heads, d_k=None, d_v=None, d_ff=None,
res_dropout=0.1, activation="gelu", res_attention=False):
super().__init__()
assert not d_model%n_heads, f"d_model ({d_model}) must be divisible by n_heads ({n_heads})"
d_k = ifnone(d_k, d_model // n_heads)
d_v = ifnone(d_v, d_model // n_heads)
d_ff = ifnone(d_ff, d_model * 4)
# Multi-Head attention
self.res_attention = res_attention
self.self_attn = _MultiheadAttention(d_model, n_heads, d_k, d_v, res_attention=res_attention)
# Add & Norm
self.dropout_attn = nn.Dropout(res_dropout)
self.layernorm_attn = nn.LayerNorm(d_model)
# Position-wise Feed-Forward
self.ff = nn.Sequential(nn.Linear(d_model, d_ff), self._get_activation_fn(activation), nn.Linear(d_ff, d_model))
# Add & Norm
self.dropout_ffn = nn.Dropout(res_dropout)
self.layernorm_ffn = nn.LayerNorm(d_model)
def forward(self, src, prev=None, attn_mask=None):
# Multi-Head attention sublayer
## Multi-Head attention
if self.res_attention:
src2, attn, scores = self.self_attn(src, src, src, prev, attn_mask=attn_mask)
else:
src2, attn = self.self_attn(src, src, src, attn_mask=attn_mask)
self.attn = attn
## Add & Norm
src = src + self.dropout_attn(src2) # Add: residual connection with residual dropout
src = self.layernorm_attn(src) # Norm: layernorm
# Feed-forward sublayer
## Position-wise Feed-Forward
src2 = self.ff(src)
## Add & Norm
src = src + self.dropout_ffn(src2) # Add: residual connection with residual dropout
src = self.layernorm_ffn(src) # Norm: layernorm
if self.res_attention:
return src, scores
else:
return src
def _get_activation_fn(self, activation):
if callable(activation): return activation()
elif activation.lower() == "relu": return nn.ReLU()
elif activation.lower() == "gelu": return nn.GELU()
elif activation.lower() == "mish": return Mish()
raise ValueError(f'{activation} is not available. You can use "relu", "gelu", "mish" or a callable')
class _TabFusionEncoder(nn.Module):
def __init__(self, q_len, d_model, n_heads, d_k=None, d_v=None, d_ff=None, res_dropout=0.1, activation='gelu', res_attention=False, n_layers=1):
super().__init__()
self.layers = nn.ModuleList([_TabFusionEncoderLayer(q_len, d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout,
activation=activation, res_attention=res_attention) for i in range(n_layers)])
self.res_attention = res_attention
def forward(self, src, attn_mask=None):
output = src
scores = None
if self.res_attention:
for mod in self.layers: output, scores = mod(output, prev=scores, attn_mask=attn_mask)
return output
else:
for mod in self.layers: output = mod(output, attn_mask=attn_mask)
return output
class TabFusionBackbone(nn.Module):
def __init__(self, classes, cont_names, d_model=32, n_layers=6, n_heads=8, d_k=None, d_v=None, d_ff=None, init=True,
res_attention=True, attention_act='gelu', res_dropout=0.):
super().__init__()
# Categorical
n_cat = len(classes)
n_classes = [len(v) for v in classes.values()]
self.n_emb = sum(n_classes)
self.embeds = nn.ModuleList([nn.Embedding(ni, d_model) for ni in n_classes])
# Continuous
n_cont = len(cont_names)
self.n_cont = n_cont
self.conv = nn.Conv1d(1, d_model, 1)
if init: nn.init.kaiming_normal_(self.conv.weight)
# Transformer
self.res_drop = nn.Dropout(res_dropout) if res_dropout else None
self.pos_enc = nn.Parameter(torch.zeros(1, (n_cat + n_cont), d_model))
self.transformer = _TabFusionEncoder(n_cat + n_cont, d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout,
activation=attention_act, res_attention=res_attention, n_layers=n_layers)
def forward(self, x_cat, x_cont=None):
# Input encoding
if self.n_emb != 0:
x = [e(x_cat[:,i]).unsqueeze(1) for i,e in enumerate(self.embeds)]
x = torch.cat(x, 1)
if self.n_cont != 0:
x_cont = self.conv(x_cont.unsqueeze(1)).transpose(1,2)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
# Transformer
x += self.pos_enc
if self.res_drop is not None: x = self.res_drop(x)
x = self.transformer(x)
return x
class TabFusionTransformer(Sequential):
def __init__(self, classes, cont_names, c_out,
d_model=32, n_layers=6, n_heads=8, d_k=None, d_v=None, d_ff=None, res_attention=True, attention_act='gelu', res_dropout=0.,
fc_mults=(4, 2), fc_dropout=0., fc_act=None, fc_skip=False, fc_bn=False, bn_final=False, init=True):
super().__init__()
# Backbone
backbone = TabFusionBackbone(classes, cont_names, d_model=d_model, n_layers=n_layers, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, init=init,
res_attention=res_attention, attention_act=attention_act, res_dropout=res_dropout)
# Head
mlp_input_size = (d_model * (len(classes) + len(cont_names)))
hidden_dimensions = list(map(lambda t: int(mlp_input_size * t), fc_mults))
all_dimensions = [mlp_input_size, *hidden_dimensions, c_out]
self.head_nf = mlp_input_size
head = nn.Sequential(*[_Flatten(), _MLP(all_dimensions, act=fc_act, skip=fc_skip, bn=fc_bn, dropout=fc_dropout, bn_final=bn_final)])
super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))
# Cell
class TSTabFusionTransformer(nn.Module):
def __init__(self, c_in, c_out, seq_len, classes, cont_names,
d_model=32, n_layers=6, n_heads=8, d_k=None, d_v=None, d_ff=None, res_attention=True, attention_act='gelu', res_dropout=0.,
fc_mults=(1, .5), fc_dropout=0., fc_act=None, fc_skip=False, fc_bn=False, bn_final=False, init=True):
super().__init__()
# Time series
self.W_P = nn.Conv1d(c_in, d_model, 1)
# Categorical
n_cat = len(classes)
n_classes = [len(v) for v in classes.values()]
self.n_emb = sum(n_classes)
self.embeds = nn.ModuleList([nn.Embedding(ni, d_model) for ni in n_classes])
# Continuous
n_cont = len(cont_names)
self.n_cont = n_cont
self.conv = nn.Conv1d(1, d_model, 1)
if init: nn.init.kaiming_normal_(self.conv.weight)
# Transformer
self.res_drop = nn.Dropout(res_dropout) if res_dropout else None
self.pos_enc = nn.Parameter(torch.zeros(1, (n_cat + n_cont + seq_len), d_model))
self.transformer = _TabFusionEncoder(n_cat + n_cont, d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout,
activation=attention_act, res_attention=res_attention, n_layers=n_layers)
# Head
mlp_input_size = (d_model * (n_cat + n_cont + seq_len))
hidden_dimensions = list(map(lambda t: int(mlp_input_size * t), fc_mults))
all_dimensions = [mlp_input_size, *hidden_dimensions, c_out]
self.head_nf = mlp_input_size
self.head = nn.Sequential(*[_Flatten(), _MLP(all_dimensions, act=fc_act, skip=fc_skip, bn=fc_bn, dropout=fc_dropout, bn_final=bn_final)])
def forward(self, x):
x_ts, (x_cat, x_cont) = x
# Time series
x = self.W_P(x_ts).transpose(1,2)
# Input encoding
if self.n_emb != 0:
x_cat = [e(x_cat[:,i]).unsqueeze(1) for i,e in enumerate(self.embeds)]
x_cat = torch.cat(x_cat, 1)
x = torch.cat([x, x_cat], 1)
if self.n_cont != 0:
x_cont = self.conv(x_cont.unsqueeze(1)).transpose(1,2)
x = torch.cat([x, x_cont], 1)
# Transformer
x += self.pos_enc
if self.res_drop is not None: x = self.res_drop(x)
x = self.transformer(x)
# Head
x = self.head(x)
return x | 7,074 |
4,772 | <reponame>fjacobs/spring-data-examples
package example.repo;
import example.model.Customer80;
import java.util.List;
import org.springframework.data.repository.CrudRepository;
public interface Customer80Repository extends CrudRepository<Customer80, Long> {
List<Customer80> findByLastName(String lastName);
}
| 99 |
309 | # -*- coding: utf-8 -*-
"""
A short Python script
=====================
This demonstrates an example ``.py`` file that is not executed when gallery is
generated (see :ref:`build_pattern`) but nevertheless gets included as an
example. Note that no output is capture as this file is not executed.
"""
# Code source: <NAME>
# License: BSD 3 clause
from __future__ import print_function
print([i for i in range(10)])
| 118 |
335 | <gh_stars>100-1000
{
"word": "Ecumenical",
"definitions": [
"Representing a number of different Christian Churches.",
"Promoting or relating to unity among the world's Christian Churches."
],
"parts-of-speech": "Adjective"
} | 94 |
324 | <filename>tests/utils/test_logger.py
import logging
import os
import pytest
from kale.utils import logger
@pytest.fixture(scope="module")
def testing_logger():
save_dir = os.getcwd()
testing_logger = logger.construct_logger("test", save_dir)
yield testing_logger
# Gather info
filehandler = testing_logger.handlers[0]
log_file_name = filehandler.baseFilename
# Teardown log file
filehandler.close()
os.remove(log_file_name)
# Teardown gitdiff.patch file
[folder, file] = os.path.split(log_file_name)
file_core = os.path.splitext(file)[0]
gitdiff_file_name = os.path.join(folder, file_core + ".gitdiff.patch")
os.remove(gitdiff_file_name)
@pytest.fixture
def log_file_name(testing_logger):
filehandler = testing_logger.handlers[0]
yield filehandler.baseFilename
@pytest.fixture
def gitdiff_file_name(log_file_name):
[folder, file] = os.path.split(log_file_name)
file_core = os.path.splitext(file)[0]
yield os.path.join(folder, file_core + ".gitdiff.patch")
def test_out_file_core():
out_file_eg = logger.out_file_core()
assert isinstance(out_file_eg, str)
def test_logger_type(testing_logger):
assert isinstance(testing_logger, logging.Logger)
def test_log_file_exists(log_file_name):
assert os.path.isfile(log_file_name)
def test_gitdiff_file_exists(gitdiff_file_name):
assert os.path.isfile(gitdiff_file_name)
| 559 |
1,403 | <filename>test/Java/Java-1.6.py
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test Java compilation with a live Java 1.6 "javac" compiler.
"""
import os
import sys
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.dir_fixture('java_version_image')
version = '1.6'
where_javac, java_version = test.java_where_javac(version)
javac_path=os.path.dirname(where_javac)
if ' ' in javac_path:
javac_path ='"%s"'%javac_path
java_arguments=["--javac_path=%s"%javac_path,"--java_version=%s"%version]
test.run(arguments = ['.']+java_arguments)
expect_1 = [
test.workpath('class1', 'com', 'other', 'Example2.class'),
test.workpath('class1', 'com', 'sub', 'foo', 'Example1.class'),
test.workpath('class1', 'com', 'sub', 'foo', 'Example3.class'),
]
expect_2 = [
test.workpath('class2', 'com', 'other', 'Example5.class'),
test.workpath('class2', 'com', 'sub', 'bar', 'Example4.class'),
test.workpath('class2', 'com', 'sub', 'bar', 'Example6.class'),
]
expect_3 = [
test.workpath('class3', 'Empty.class'),
test.workpath('class3', 'Example7.class'),
test.workpath('class3', 'Listener.class'),
test.workpath('class3', 'Private$1.class'),
test.workpath('class3', 'Private.class'),
test.workpath('class3', 'Test$1$1.class'),
test.workpath('class3', 'Test$1.class'),
test.workpath('class3', 'Test$Inner$1.class'),
test.workpath('class3', 'Test$Inner.class'),
test.workpath('class3', 'Test.class'),
]
expect_4 = [
test.workpath('class4', 'NestedExample$1$1.class'),
test.workpath('class4', 'NestedExample$1.class'),
test.workpath('class4', 'NestedExample.class'),
]
expect_5 = [
test.workpath('class5', 'Foo.class'),
test.workpath('class5', 'TestSCons.class'),
]
expect_6 = [
test.workpath('class6', 'test$1.class'),
test.workpath('class6', 'test$inner.class'),
test.workpath('class6', 'test.class'),
]
failed = None
def classes_must_match(dir, expect):
global failed
got = test.java_get_class_files(test.workpath(dir))
if expect != got:
sys.stderr.write("Expected the following class files in '%s':\n" % dir)
for c in expect:
sys.stderr.write(' %s\n' % c)
sys.stderr.write("Got the following class files in '%s':\n" % dir)
for c in got:
sys.stderr.write(' %s\n' % c)
failed = 1
def classes_must_not_exist(dir, expect):
global failed
present = [path for path in expect if os.path.exists(path)]
if present:
sys.stderr.write("Found the following unexpected class files in '%s' after cleaning:\n" % dir)
for c in present:
sys.stderr.write(' %s\n' % c)
failed = 1
classes_must_match('class1', expect_1)
classes_must_match('class2', expect_2)
classes_must_match('class3', expect_3)
classes_must_match('class4', expect_4)
classes_must_match('class5', expect_5)
classes_must_match('class6', expect_6)
test.fail_test(failed)
test.up_to_date(options=["--debug=explain"]+java_arguments,
arguments = '.')
test.run(arguments = ['-c','.']+java_arguments)
classes_must_not_exist('class1', expect_1)
classes_must_not_exist('class2', expect_2)
classes_must_not_exist('class3', expect_3)
classes_must_not_exist('class4', expect_4)
classes_must_not_exist('class5', expect_5)
# This test case should pass, but doesn't.
# The expect_6 list contains the class files that the Java compiler
# actually creates, apparently because of the "private" instantiation
# of the "inner" class. Our parser doesn't currently detect this, so
# it doesn't know to remove that generated class file.
#classes_must_not_exist('class6', expect_6)
test.fail_test(failed)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 1,856 |
354 | #include <gvc.h>
#include <string.h>
#include <stdlib.h>
#include <gvc.h>
extern "C"
{
extern void gv_string_writer_init(GVC_t *gvc);
extern void gv_channel_writer_init(GVC_t *gvc);
extern void gv_writer_reset(GVC_t *gvc);
}
#define agfindattr(x, s) agattrsym(x, s)
#define agraphattr(g, n, s) agattr(g, AGRAPH, n, s)
#define agnodeattr(g, n, s) agattr(g, AGNODE, n, s)
#define agedgeattr(g, n, s) agattr(g, AGEDGE, n, s)
static char emptystring[] = {'\0'};
static GVC_t *gvc;
static void gv_init(void)
{
gvc = gvContext();
}
Agraph_t *graph(char *name)
{
if (!gvc)
gv_init();
return agopen(name, Agundirected, 0);
}
Agraph_t *digraph(char *name)
{
if (!gvc)
gv_init();
return agopen(name, Agdirected, 0);
}
Agraph_t *strictgraph(char *name)
{
if (!gvc)
gv_init();
return agopen(name, Agstrictundirected, 0);
}
Agraph_t *strictdigraph(char *name)
{
if (!gvc)
gv_init();
return agopen(name, Agstrictdirected, 0);
}
Agraph_t *readstring(char *string)
{
if (!gvc)
gv_init();
return agmemread(string);
}
Agraph_t *read(FILE *f)
{
if (!gvc)
gv_init();
return agread(f, NULL);
}
Agraph_t *read(const char *filename)
{
FILE *f;
Agraph_t *g;
f = fopen(filename, "r");
if (!f)
return NULL;
if (!gvc)
gv_init();
g = agread(f, NULL);
fclose(f);
return g;
}
//-------------------------------------------------
Agraph_t *graph(Agraph_t *g, char *name)
{
if (!gvc)
gv_init();
return agsubg(g, name, 1);
}
Agnode_t *node(Agraph_t *g, char *name)
{
if (!gvc)
return NULL;
return agnode(g, name, 1);
}
Agedge_t *edge(Agraph_t *g, Agnode_t *t, Agnode_t *h)
{
if (!gvc || !t || !h || !g)
return NULL;
// edges from/to the protonode are not permitted
if (AGTYPE(t) == AGRAPH || AGTYPE(h) == AGRAPH)
return NULL;
return agedge(g, t, h, NULL, 1);
}
Agedge_t *edge(Agnode_t *t, Agnode_t *h)
{
return edge(agraphof(t), t, h);
}
// induce tail if necessary
Agedge_t *edge(char *tname, Agnode_t *h)
{
return edge(node(agraphof(h), tname), h);
}
// induce head if necessary
Agedge_t *edge(Agnode_t *t, char *hname)
{
return edge(t, node(agraphof(t), hname));
}
// induce tail/head if necessary
Agedge_t *edge(Agraph_t *g, char *tname, char *hname)
{
return edge(g, node(g, tname), node(g, hname));
}
//-------------------------------------------------
static char *myagxget(void *obj, Agsym_t *a)
{
int len;
char *val, *hs;
if (!obj || !a)
return emptystring;
val = agxget(obj, a);
if (!val)
return emptystring;
if (a->name[0] == 'l' && strcmp(a->name, "label") == 0 && aghtmlstr(val))
{
len = strlen(val);
hs = (char *)malloc(len + 3);
hs[0] = '<';
strcpy(hs + 1, val);
hs[len + 1] = '>';
hs[len + 2] = '\0';
return hs;
}
return val;
}
char *getv(Agraph_t *g, Agsym_t *a)
{
return myagxget(g, a);
}
char *getv(Agraph_t *g, char *attr)
{
Agsym_t *a;
if (!g || !attr)
return NULL;
a = agfindattr(agroot(g), attr);
return myagxget(g, a);
}
static void myagxset(void *obj, Agsym_t *a, char *val)
{
int len;
char *hs;
if (a->name[0] == 'l' && val[0] == '<' && strcmp(a->name, "label") == 0)
{
len = strlen(val);
if (val[len - 1] == '>')
{
hs = strdup(val + 1);
*(hs + len - 2) = '\0';
val = agstrdup_html(agraphof(obj), hs);
free(hs);
}
}
agxset(obj, a, val);
}
char *setv(Agraph_t *g, Agsym_t *a, char *val)
{
if (!g || !a || !val)
return NULL;
myagxset(g, a, val);
return val;
}
char *setv(Agraph_t *g, char *attr, char *val)
{
Agsym_t *a;
if (!g || !attr || !val)
return NULL;
a = agfindattr(agroot(g), attr);
if (!a)
a = agraphattr(g->root, attr, emptystring);
myagxset(g, a, val);
return val;
}
//-------------------------------------------------
char *getv(Agnode_t *n, Agsym_t *a)
{
if (!n || !a)
return NULL;
if (AGTYPE(n) == AGRAPH) // protonode
return NULL; // FIXME ??
return myagxget(n, a);
}
char *getv(Agnode_t *n, char *attr)
{
Agraph_t *g;
Agsym_t *a;
if (!n || !attr)
return NULL;
if (AGTYPE(n) == AGRAPH) // protonode
return NULL; // FIXME ??
g = agroot(agraphof(n));
a = agattr(g, AGNODE, attr, NULL);
return myagxget(n, a);
}
char *setv(Agnode_t *n, Agsym_t *a, char *val)
{
if (!n || !a || !val)
return NULL;
if (AGTYPE(n) == AGRAPH) // protonode
return NULL; // FIXME ??
myagxset(n, a, val);
return val;
}
char *setv(Agnode_t *n, char *attr, char *val)
{
Agraph_t *g;
Agsym_t *a;
if (!n || !attr || !val)
return NULL;
if (AGTYPE(n) == AGRAPH)
{ // protonode
g = (Agraph_t *)n;
a = agattr(g, AGNODE, attr, val); // create default attribute in psuodo protonode
// FIXME? - deal with html in "label" attributes
return val;
}
g = agroot(agraphof(n));
a = agattr(g, AGNODE, attr, NULL);
if (!a)
a = agnodeattr(g, attr, emptystring);
myagxset(n, a, val);
return val;
}
//-------------------------------------------------
char *getv(Agedge_t *e, Agsym_t *a)
{
if (!e || !a)
return NULL;
if (AGTYPE(e) == AGRAPH) // protoedge
return NULL; // FIXME ??
return myagxget(e, a);
}
char *getv(Agedge_t *e, char *attr)
{
Agraph_t *g;
Agsym_t *a;
if (!e || !attr)
return NULL;
if (AGTYPE(e) == AGRAPH) // protoedge
return NULL; // FIXME ??
g = agraphof(agtail(e));
a = agattr(g, AGEDGE, attr, NULL);
return myagxget(e, a);
}
char *setv(Agedge_t *e, Agsym_t *a, char *val)
{
if (!e || !a || !val)
return NULL;
if (AGTYPE(e) == AGRAPH) // protoedge
return NULL; // FIXME ??
myagxset(e, a, val);
return val;
}
char *setv(Agedge_t *e, char *attr, char *val)
{
Agraph_t *g;
Agsym_t *a;
if (!e || !attr || !val)
return NULL;
if (AGTYPE(e) == AGRAPH)
{ // protoedge
g = (Agraph_t *)e;
a = agattr(g, AGEDGE, attr, val); // create default attribute in pseudo protoedge
// FIXME? - deal with html in "label" attributes
return val;
}
g = agroot(agraphof(agtail(e)));
a = agattr(g, AGEDGE, attr, NULL);
if (!a)
a = agattr(g, AGEDGE, attr, emptystring);
myagxset(e, a, val);
return val;
}
//-------------------------------------------------
Agraph_t *findsubg(Agraph_t *g, char *name)
{
if (!g || !name)
return NULL;
return agsubg(g, name, 0);
}
Agnode_t *findnode(Agraph_t *g, char *name)
{
if (!g || !name)
return NULL;
return agnode(g, name, 0);
}
Agedge_t *findedge(Agnode_t *t, Agnode_t *h)
{
if (!t || !h)
return NULL;
if (AGTYPE(t) == AGRAPH || AGTYPE(h) == AGRAPH)
return NULL;
return agfindedge(agraphof(t), t, h);
}
Agsym_t *findattr(Agraph_t *g, char *name)
{
if (!g || !name)
return NULL;
return agfindattr(g, name);
}
Agsym_t *findattr(Agnode_t *n, char *name)
{
if (!n || !name)
return NULL;
return agfindattr(n, name);
}
Agsym_t *findattr(Agedge_t *e, char *name)
{
if (!e || !name)
return NULL;
return agfindattr(e, name);
}
//-------------------------------------------------
Agnode_t *headof(Agedge_t *e)
{
if (!e)
return NULL;
if (AGTYPE(e) == AGRAPH)
return NULL;
return aghead(e);
}
Agnode_t *tailof(Agedge_t *e)
{
if (!e)
return NULL;
if (AGTYPE(e) == AGRAPH)
return NULL;
return agtail(e);
}
Agraph_t *graphof(Agraph_t *g)
{
if (!g || g == g->root)
return NULL;
return agroot(g);
}
Agraph_t *graphof(Agedge_t *e)
{
if (!e)
return NULL;
if (AGTYPE(e) == AGRAPH)
return (Agraph_t *)e; /* graph of protoedge is itself recast */
return agraphof(agtail(e));
}
Agraph_t *graphof(Agnode_t *n)
{
if (!n)
return NULL;
if (AGTYPE(n) == AGRAPH)
return (Agraph_t *)n; /* graph of protonode is itself recast */
return agraphof(n);
}
Agraph_t *rootof(Agraph_t *g)
{
if (!g)
return NULL;
return agroot(g);
}
//-------------------------------------------------
Agnode_t *protonode(Agraph_t *g)
{
if (!g)
return NULL;
return (Agnode_t *)g; // gross abuse of the type system!
}
Agedge_t *protoedge(Agraph_t *g)
{
if (!g)
return NULL;
return (Agedge_t *)g; // gross abuse of the type system!
}
//-------------------------------------------------
char *nameof(Agraph_t *g)
{
if (!g)
return NULL;
return agnameof(g);
}
char *nameof(Agnode_t *n)
{
if (!n)
return NULL;
if (AGTYPE(n) == AGRAPH)
return NULL;
return agnameof(n);
}
//char *nameof(Agedge_t *e)
//{
// if (!e)
// return NULL;
// if (AGTYPE(e) == AGRAPH)
// return NULL;
// return agnameof(e);
//}
char *nameof(Agsym_t *a)
{
if (!a)
return NULL;
return a->name;
}
//-------------------------------------------------
bool ok(Agraph_t *g)
{
if (!g)
return false;
return true;
}
bool ok(Agnode_t *n)
{
if (!n)
return false;
return true;
}
bool ok(Agedge_t *e)
{
if (!e)
return false;
return true;
}
bool ok(Agsym_t *a)
{
if (!a)
return false;
return true;
}
//-------------------------------------------------
Agraph_t *firstsubg(Agraph_t *g)
{
if (!g)
return NULL;
return agfstsubg(g);
}
Agraph_t *nextsubg(Agraph_t *g, Agraph_t *sg)
{
if (!g || !sg)
return NULL;
return agnxtsubg(sg);
}
Agraph_t *firstsupg(Agraph_t *g)
{
return g->parent;
}
// Agraph_t *nextsupg(Agraph_t *g, Agraph_t *sg)
// {
// return NULL;
// }
Agedge_t *firstout(Agraph_t *g)
{
Agnode_t *n;
Agedge_t *e;
if (!g)
return NULL;
for (n = agfstnode(g); n; n = agnxtnode(g, n))
{
e = agfstout(g, n);
if (e)
return e;
}
return NULL;
}
Agedge_t *nextout(Agraph_t *g, Agedge_t *e)
{
Agnode_t *n;
Agedge_t *ne;
if (!g || !e)
return NULL;
ne = agnxtout(g, e);
if (ne)
return (ne);
for (n = agnxtnode(g, agtail(e)); n; n = agnxtnode(g, n))
{
ne = agfstout(g, n);
if (ne)
return ne;
}
return NULL;
}
Agedge_t *firstedge(Agraph_t *g)
{
return firstout(g);
}
Agedge_t *nextedge(Agraph_t *g, Agedge_t *e)
{
return nextout(g, e);
}
Agedge_t *firstout(Agnode_t *n)
{
if (!n)
return NULL;
return agfstout(agraphof(n), n);
}
Agedge_t *nextout(Agnode_t *n, Agedge_t *e)
{
if (!n || !e)
return NULL;
return agnxtout(agraphof(n), e);
}
Agnode_t *firsthead(Agnode_t *n)
{
Agedge_t *e;
if (!n)
return NULL;
e = agfstout(agraphof(n), n);
if (!e)
return NULL;
return aghead(e);
}
Agnode_t *nexthead(Agnode_t *n, Agnode_t *h)
{
Agedge_t *e;
Agraph_t *g;
if (!n || !h)
return NULL;
g = agraphof(n);
e = agfindedge(g, n, h);
if (!e)
return NULL;
do
{
e = agnxtout(g, AGMKOUT(e));
if (!e)
return NULL;
} while (aghead(e) == h);
return aghead(e);
}
Agedge_t *firstedge(Agnode_t *n)
{
if (!n)
return NULL;
return agfstedge(agraphof(n), n);
}
Agedge_t *nextedge(Agnode_t *n, Agedge_t *e)
{
if (!n || !e)
return NULL;
return agnxtedge(agraphof(n), e, n);
}
Agedge_t *firstin(Agraph_t *g)
{
Agnode_t *n;
if (!g)
return NULL;
n = agfstnode(g);
if (!n)
return NULL;
return agfstin(g, n);
}
Agedge_t *nextin(Agraph_t *g, Agedge_t *e)
{
Agnode_t *n;
Agedge_t *ne;
if (!g || !e)
return NULL;
ne = agnxtin(g, e);
if (ne)
return (ne);
n = agnxtnode(g, aghead(e));
if (!n)
return NULL;
return agfstin(g, n);
}
Agedge_t *firstin(Agnode_t *n)
{
if (!n)
return NULL;
return agfstin(agraphof(n), n);
}
Agedge_t *nextin(Agnode_t *n, Agedge_t *e)
{
if (!n || !e)
return NULL;
return agnxtin(agraphof(n), e);
}
Agnode_t *firsttail(Agnode_t *n)
{
Agedge_t *e;
if (!n)
return NULL;
e = agfstin(agraphof(n), n);
if (!e)
return NULL;
return agtail(e);
}
Agnode_t *nexttail(Agnode_t *n, Agnode_t *t)
{
Agedge_t *e;
Agraph_t *g;
if (!n || !t)
return NULL;
g = agraphof(n);
e = agfindedge(g, t, n);
if (!e)
return NULL;
do
{
e = agnxtin(g, AGMKIN(e));
if (!e)
return NULL;
} while (agtail(e) == t);
return agtail(e);
}
Agnode_t *firstnode(Agraph_t *g)
{
if (!g)
return NULL;
return agfstnode(g);
}
Agnode_t *nextnode(Agraph_t *g, Agnode_t *n)
{
if (!g || !n)
return NULL;
return agnxtnode(g, n);
}
Agnode_t *firstnode(Agedge_t *e)
{
if (!e)
return NULL;
return agtail(e);
}
Agnode_t *nextnode(Agedge_t *e, Agnode_t *n)
{
if (!e || n != agtail(e))
return NULL;
return aghead(e);
}
Agsym_t *firstattr(Agraph_t *g)
{
if (!g)
return NULL;
g = agroot(g);
return agnxtattr(g, AGRAPH, NULL);
}
Agsym_t *nextattr(Agraph_t *g, Agsym_t *a)
{
if (!g || !a)
return NULL;
g = agroot(g);
return agnxtattr(g, AGRAPH, a);
}
Agsym_t *firstattr(Agnode_t *n)
{
Agraph_t *g;
if (!n)
return NULL;
g = agraphof(n);
return agnxtattr(g, AGNODE, NULL);
}
Agsym_t *nextattr(Agnode_t *n, Agsym_t *a)
{
Agraph_t *g;
if (!n || !a)
return NULL;
g = agraphof(n);
return agnxtattr(g, AGNODE, a);
}
Agsym_t *firstattr(Agedge_t *e)
{
Agraph_t *g;
if (!e)
return NULL;
g = agraphof(agtail(e));
return agnxtattr(g, AGEDGE, NULL);
}
Agsym_t *nextattr(Agedge_t *e, Agsym_t *a)
{
Agraph_t *g;
if (!e || !a)
return NULL;
g = agraphof(agtail(e));
return agnxtattr(g, AGEDGE, a);
}
bool rm(Agraph_t *g)
{
if (!g)
return false;
#if 0
Agraph_t* sg;
for (sg = agfstsubg (g); sg; sg = agnxtsubg (sg))
rm(sg);
if (g == agroot(g))
agclose(g);
else
agdelete(agparent(g), g);
#endif
/* The rm function appears to have the semantics of agclose, so
* we should just do that, and let cgraph take care of all the
* details.
*/
agclose(g);
return true;
}
bool rm(Agnode_t *n)
{
if (!n)
return false;
// removal of the protonode is not permitted
if (agnameof(n)[0] == '\001' && strcmp(agnameof(n), "\001proto") == 0)
return false;
agdelete(agraphof(n), n);
return true;
}
bool rm(Agedge_t *e)
{
if (!e)
return false;
// removal of the protoedge is not permitted
if ((agnameof(aghead(e))[0] == '\001' && strcmp(agnameof(aghead(e)), "\001proto") == 0) || (agnameof(agtail(e))[0] == '\001' && strcmp(agnameof(agtail(e)), "\001proto") == 0))
return false;
agdelete(agroot(agraphof(aghead(e))), e);
return true;
}
bool layout(Agraph_t *g, const char *engine)
{
int err;
if (!g)
return false;
err = gvFreeLayout(gvc, g); /* ignore errors */
err = gvLayout(gvc, g, engine);
return (!err);
}
// annotate the graph with layout information
bool render(Agraph_t *g)
{
if (!g)
return false;
attach_attrs(g);
return true;
}
// render to stdout
bool render(Agraph_t *g, const char *format)
{
int err;
if (!g)
return false;
err = gvRender(gvc, g, format, stdout);
return (!err);
}
// render to an open FILE
bool render(Agraph_t *g, const char *format, FILE *f)
{
int err;
if (!g)
return false;
err = gvRender(gvc, g, format, f);
return (!err);
}
// render to an open channel
bool renderchannel(Agraph_t *g, const char *format, const char *channelname)
{
int err;
if (!g)
return false;
gv_channel_writer_init(gvc);
err = gvRender(gvc, g, format, (FILE *)channelname);
gv_writer_reset(gvc); /* Reset to default */
return (!err);
}
// render to a filename
bool render(Agraph_t *g, const char *format, const char *filename)
{
int err;
if (!g)
return false;
err = gvRenderFilename(gvc, g, format, filename);
return (!err);
}
typedef struct
{
char *data;
int sz; /* buffer size */
int len; /* length of array */
} BA;
// render to string result, using binding-dependent gv_string_writer()
char *renderresult(Agraph_t *g, const char *format)
{
BA ba;
if (!g)
return NULL;
if (!GD_alg(g))
return NULL;
ba.sz = BUFSIZ;
ba.data = (char *)malloc(ba.sz * sizeof(char)); /* must be freed by wrapper code */
ba.len = 0;
gv_string_writer_init(gvc);
(void)gvRender(gvc, g, format, (FILE *)&ba);
gv_writer_reset(gvc); /* Reset to default */
*((int *)GD_alg(g)) = ba.len;
return ba.data;
}
// render to string result, using binding-dependent gv_string_writer()
void renderresult(Agraph_t *g, const char *format, char *outdata)
{
if (!g)
return;
gv_string_writer_init(gvc);
(void)gvRender(gvc, g, format, (FILE *)outdata);
gv_writer_reset(gvc); /* Reset to default */
}
// render to a malloc'ed data string, to be free'd by caller.
char *renderdata(Agraph_t *g, const char *format)
{
int err;
char *data;
unsigned int length;
if (!g)
return NULL;
err = gvRenderData(gvc, g, format, &data, &length);
if (err)
return NULL;
data = (char *)realloc(data, length + 1);
return data;
}
bool write(Agraph_t *g, FILE *f)
{
int err;
if (!g)
return false;
err = agwrite(g, f);
return (!err);
}
bool write(Agraph_t *g, const char *filename)
{
FILE *f;
int err;
if (!g)
return false;
f = fopen(filename, "w");
if (!f)
return false;
err = agwrite(g, f);
fclose(f);
return (!err);
}
#include <vector>
#include <string>
using namespace std;
extern "C"
{
char *csvg()
{
Agraph_t *g = graph((char *)"graph");
vector<Agnode_t *> nodes;
for (int i = 0; i < 10; i++)
{
nodes.push_back(node(g, (char *)std::to_string(i).c_str()));
}
vector<Agedge_t *> edges;
edge(g, nodes[0], nodes[1]);
edge(g, nodes[0], nodes[2]);
edge(g, nodes[1], nodes[3]);
edge(g, nodes[1], nodes[4]);
edge(g, nodes[2], nodes[5]);
edge(g, nodes[2], nodes[6]);
edge(g, nodes[3], nodes[7]);
edge(g, nodes[3], nodes[8]);
edge(g, nodes[4], nodes[9]);
layout(g, (char *)"dot");
return renderdata(g, (char *)"svg");
}
}
| 9,595 |
14,668 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stddef.h>
#include <stdint.h>
#include "net/cert/internal/ocsp.h"
#include "net/der/input.h"
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
net::der::Input single_response_der(data, size);
net::OCSPSingleResponse single_response;
net::ParseOCSPSingleResponse(single_response_der, &single_response);
return 0;
}
| 180 |
778 | #ifndef KRATOS_SWIMMING_DEM_IN_PFEM_UTILS_H
#define KRATOS_SWIMMING_DEM_IN_PFEM_UTILS_H
// /* External includes */
// System includes
// Project includes
#include "includes/variables.h"
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
namespace Kratos
{
class SwimmingDemInPfemUtils
{
public:
typedef ModelPart::NodesContainerType::ContainerType::iterator NodesIteratorType;
KRATOS_CLASS_POINTER_DEFINITION(SwimmingDemInPfemUtils);
/// Default constructor.
SwimmingDemInPfemUtils(){}
/// Destructor.
virtual ~SwimmingDemInPfemUtils(){}
//***************************************************************************************************************
//***************************************************************************************************************
void TransferWalls(ModelPart& r_source_model_part, ModelPart& r_destination_model_part)
{
Properties::Pointer props;
if(r_destination_model_part.GetMesh(0).Properties().size()) {
int props_id = r_destination_model_part.PropertiesBegin()->Id();
props = r_destination_model_part.pGetProperties(props_id);
}
else {
props = Properties::Pointer(new Properties(0));
props->SetValue(STATIC_FRICTION, 0.5773502691896257);
props->SetValue(WALL_COHESION, 0.0);
props->SetValue(COMPUTE_WEAR, 0);
props->SetValue(SEVERITY_OF_WEAR, 0.001);
props->SetValue(IMPACT_WEAR_SEVERITY, 0.001);
props->SetValue(BRINELL_HARDNESS, 200.0);
props->SetValue(YOUNG_MODULUS, 1e20);
props->SetValue(POISSON_RATIO, 0.25);
}
r_destination_model_part.Conditions().Sort();
int id = 1;
if (r_destination_model_part.Conditions().size()) {
id = (r_destination_model_part.ConditionsEnd()-1)->Id() + 1;
}
ModelPart::ElementsContainerType& source_elements = r_source_model_part.Elements();
for (unsigned int i = 0; i < source_elements.size(); i++) {
ModelPart::ElementsContainerType::iterator it = r_source_model_part.ElementsBegin() + i;
Geometry< Node<3> >::Pointer p_geometry = it->pGetGeometry();
Condition::Pointer cond = Condition::Pointer(new RigidFace3D(id, p_geometry, props));
r_destination_model_part.AddCondition(cond);
id++;
}
}
//***************************************************************************************************************
//***************************************************************************************************************
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member r_variables
///@{
///@}
///@name Protected member r_variables
///@{ template<class T, std::size_t dim>
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member r_variables
///@{
///@}
///@name Member r_variables
///@{
RealField mPorosityField;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
SwimmingDemInPfemUtils & operator=(SwimmingDemInPfemUtils const& rOther);
///@}
}; // Class SwimmingDemInPfemUtils
} // namespace Python.
#endif // KRATOS_SWIMMING_DEM_IN_PFEM_UTILS_H
| 1,348 |
791 | <gh_stars>100-1000
package org.xerial.snappy;
import org.junit.AfterClass;
import org.junit.BeforeClass;
/**
*
*/
public class PureJavaSnappyTest
extends SnappyTest
{
@BeforeClass
public static void setUp()
throws Exception
{
PureJavaSnappyTestUtil.setUp();
}
@AfterClass
public static void tearDown()
throws Exception
{
PureJavaSnappyTestUtil.tearDown();
}
}
| 192 |
1,276 | <gh_stars>1000+
#include <iostream>
#include <cstdlib>
#include <vector>
#include <cmath>
#include <algorithm>
#include <tbb/parallel_sort.h>
#include "ticktock.h"
int main() {
size_t n = 1<<24;
std::vector<int> arr(n);
std::generate(arr.begin(), arr.end(), std::rand);
TICK(tbb_parallel_sort);
tbb::parallel_sort(arr.begin(), arr.end(), std::less<int>{});
TOCK(tbb_parallel_sort);
return 0;
}
| 189 |
6,205 | import os
import unittest
from conans.paths import CONANFILE, CONANFILE_TXT
from conans.test.utils.tools import TestClient
class InstallCWDTest(unittest.TestCase):
def setUp(self):
self.client = TestClient(users={"myremote": [("lasote", "mypass")]})
conanfile = """
import os
from conans import ConanFile, tools, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
exports = "*"
settings = "os", "compiler", "arch", "build_type"
generators = "cmake"
def build(self):
pass
"""
self.client.save({CONANFILE: conanfile})
self.client.run("export . lasote/stable")
def test_install_cwd(self):
self.client.save({CONANFILE_TXT: "[requires]MyLib/0.1@lasote/stable"}, clean_first=True)
os.mkdir(os.path.join(self.client.current_folder, "new"))
self.client.run("install . --install-folder new -g cmake")
self.assertTrue(os.path.join(self.client.current_folder, "new", "conanbuildinfo.cmake"))
def install_ref(self):
self.client.run("install MyLib/0.1@lasote/stable --build=missing")
self.assertEqual(["conanfile.py"], os.listdir(self.client.current_folder))
| 486 |
1,380 |
/*
* sophia database
* sphia.org
*
* Copyright (c) <NAME>
* BSD License
*/
#include <sophia.h>
#include <libss.h>
#include <libsf.h>
#include <libsr.h>
#include <libsv.h>
#include <libsd.h>
#include <libst.h>
static void
github_97(void)
{
void *env = sp_env();
t( env != NULL );
t( sp_setstring(env, "sophia.path", st_r.conf->sophia_dir, 0) == 0 );
t( sp_setint(env, "scheduler.threads", 0) == 0 );
t( sp_setstring(env, "log.path", st_r.conf->log_dir, 0) == 0 );
t( sp_setstring(env, "db", "test", 0) == 0 );
t( sp_setint(env, "db.test.compaction.cache", 0) == 0 );
t( sp_setstring(env, "db.test.path", st_r.conf->db_dir, 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "key", 0) == 0 );
t( sp_setstring(env, "db.test.scheme.key", "u32,key(0)", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "value", 0) == 0 );
t( sp_setint(env, "db.test.sync", 0) == 0 );
t( sp_open(env) == 0 );
void *db = sp_getobject(env, "db.test");
t( db != NULL );
/* we must pass sizeof(uint32_t) in sp_setstring() */
uint32_t i = 0;
while ( i < 100 ) {
void *o = sp_document(db);
t( sp_setstring(o, "key", &i, sizeof(i)) == 0 ); /* < */
t( sp_setstring(o, "value", &i, sizeof(i)) == 0 ); /* < */
t( sp_set(db, o) == 0 );
i++;
}
void *cur = sp_cursor(env);
t( cur != NULL );
void *o = sp_document(db);
t( o != NULL );
uint32_t key = 99;
t( sp_setstring(o, "key", &key, sizeof(key)) == 0 ); /* < */
i = 0;
while ((o = sp_get(cur, o)))
i++;
t( i == 1 );
sp_destroy(cur);
t( sp_destroy(env) == 0 );
}
static void
github_104(void)
{
void *env = sp_env();
t( env != NULL );
t( sp_setstring(env, "sophia.path", st_r.conf->sophia_dir, 0) == 0 );
t( sp_setint(env, "scheduler.threads", 0) == 0 );
t( sp_setstring(env, "log.path", st_r.conf->log_dir, 0) == 0 );
t( sp_setstring(env, "db", "test", 0) == 0 );
t( sp_setstring(env, "db.test.path", st_r.conf->db_dir, 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "key", 0) == 0 );
t( sp_setstring(env, "db.test.scheme.key", "string,key(0)", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "value", 0) == 0 );
t( sp_setint(env, "db.test.sync", 0) == 0 );
t( sp_open(env) == 0 );
void *db = sp_getobject(env, "db.test");
t( db != NULL );
char key_a[] = "aa";
char key_b[] = "bb";
char key_c[] = "cc";
char key_d[] = "dd";
void *o = sp_document(db);
t( sp_setstring(o, "key", key_a, sizeof(key_a)) == 0 );
t( sp_set(db, o) == 0 );
o = sp_document(db);
t( sp_setstring(o, "key", key_b, sizeof(key_b)) == 0 );
t( sp_set(db, o) == 0 );
o = sp_document(db);
t( sp_setstring(o, "key", key_c, sizeof(key_c)) == 0 );
t( sp_set(db, o) == 0 );
o = sp_document(db);
t( sp_setstring(o, "key", key_d, sizeof(key_d)) == 0 );
t( sp_set(db, o) == 0 );
void *cur = sp_cursor(env);
t( cur != NULL );
o = sp_document(db);
t( o != NULL );
t( sp_setstring(o, "key", key_b, sizeof(key_b)) == 0 );
t( sp_setstring(o, "order", "<=", 0) == 0 );
int i = 0;
while ((o = sp_get(cur, o))) {
printf(" %s", (char*)sp_getstring(o, "key", 0));
i++;
}
fflush(NULL);
t( i == 2 );
sp_destroy(cur);
t( sp_destroy(env) == 0 );
}
static void
github_112(void)
{
void *env = sp_env();
t( env != NULL );
t( sp_setstring(env, "sophia.path", st_r.conf->sophia_dir, 0) == 0 );
t( sp_setint(env, "scheduler.threads", 0) == 0 );
t( sp_setstring(env, "log.path", st_r.conf->log_dir, 0) == 0 );
t( sp_setstring(env, "db", "test", 0) == 0 );
t( sp_setint(env, "db.test.sync", 0) == 0 );
t( sp_open(env) == 0 );
void *db = sp_getobject(env, "db.test");
t( db != NULL );
char *s = sp_getstring(env, "db.test.compaction.gc", NULL);
t( s == NULL );
t( sp_destroy(env) == 0 );
}
static void
github_117(void)
{
void *env = sp_env();
t( env != NULL );
t( sp_setstring(env, "sophia.path", st_r.conf->sophia_dir, 0) == 0 );
t( sp_setint(env, "scheduler.threads", 0) == 0 );
t( sp_setstring(env, "log.path", st_r.conf->log_dir, 0) == 0 );
t( sp_setint(env, "log.sync", 0) == 0 );
t( sp_setint(env, "log.rotate_sync", 0) == 0 );
int i = 0;
int max = 30;
while (i < max) {
char name[30];
snprintf(name, sizeof(name), "db.t_%d", i);
t( sp_setstring(env, "db", name + 3, 0) == 0 );
void *db = sp_getobject(env, name);
t( db != NULL );
i++;
}
t( sp_open(env) == 0 );
t( sp_destroy(env) == 0 );
}
static void
github_118(void)
{
void *env = sp_env();
t( env != NULL );
t( sp_setstring(env, "sophia.path", st_r.conf->sophia_dir, 0) == 0 );
t( sp_setstring(env, "db", "test", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "key", 0) == 0 );
t( sp_setstring(env, "db.test.scheme.key", "u32,key(0)", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "value", 0) == 0 );
t( sp_open(env) == 0 );
void *db = sp_getobject(env, "db.test");
t( db != NULL );
unsigned key = 123456;
void *o = sp_document(db);
t( o != NULL );
t( sp_setstring(o, "key", &key, sizeof(key)) == 0 );
t( sp_set(db, o) == 0 );
o = sp_document(db);
t( o != NULL );
t( sp_setstring(o, "key", &key, sizeof(key)) == 0 );
o = sp_get(db, o);
t( o != NULL );
sp_destroy(o);
t( sp_getint(env, "db.test.index.count") == 1 );
char *sz = sp_getstring(env, "sophia.status", NULL);
t( strcmp(sz, "online") == 0 );
free(sz);
t( sp_destroy(env) == 0 );
}
static void
github_120(void)
{
/* open or create environment and database */
void *env = sp_env();
sp_setstring(env, "sophia.path", st_r.conf->sophia_dir, 0);
sp_setstring(env, "db", "test", 0);
void *db = sp_getobject(env, "db.test");
int rc = sp_open(env);
if (rc == -1)
goto error;
/* set */
void *o = sp_document(db);
sp_setstring(o, "key", "hello", 0);
sp_setstring(o, "value", "world", 0);
rc = sp_set(db, o);
if (rc == -1)
goto error;
/* get */
o = sp_document(db);
sp_setstring(o, "key", "hello", 0);
o = sp_get(db, o);
if (o) {
/* ensure key and value are correct */
int size;
char *ptr = sp_getstring(o, "key", &size);
t( size == 5 );
t( strncmp(ptr, "hello", 5) == 0 );
ptr = sp_getstring(o, "value", &size);
t( size == 5 );
t( strncmp(ptr, "world", 5) == 0 );
sp_destroy(o);
}
/* delete */
o = sp_document(db);
sp_setstring(o, "key", "hello", 0);
rc = sp_delete(db, o);
if (rc == -1)
goto error;
/* finish work */
sp_destroy(env);
return;
error:;
int size;
char *error = sp_getstring(env, "sophia.error", &size);
printf("error: %s\n", error);
free(error);
sp_destroy(env);
}
static void
github_123(void)
{
rmrf("./abc");
rmrf("./abc_log");
void *env = sp_env();
t( env != NULL );
char path[] = { '.', '/', 'a', 'b', 'c' };
char path_log[] = { '.', '/', 'a', 'b', 'c', '_', 'l', 'o', 'g' };
char name[] = { 't', 'e', 's', 't' };
t( sp_setstring(env, "sophia.path", path, sizeof(path)) == 0 );
t( sp_setstring(env, "db", name, sizeof(name)) == 0 );
t( sp_setstring(env, "log.path", path_log, sizeof(path_log)) == 0 );
t( sp_open(env) == 0 );
t( exists("./", "abc") == 1 );
t( exists("./", "abc_log") == 1 );
void *db = sp_getobject(env, "db.test");
t( db != NULL );
t( sp_destroy(env) == 0 );
rmrf("./abc");
rmrf("./abc_log");
}
static void
github_164(void)
{
void *env = sp_env();
t( env != NULL );
t( sp_setstring(env, "sophia.path", st_r.conf->sophia_dir, 0) == 0 );
t( sp_setint(env, "scheduler.threads", 0) == 0 );
t( sp_setstring(env, "log.path", st_r.conf->log_dir, 0) == 0 );
t( sp_setstring(env, "db", "test", 0) == 0 );
t( sp_setint(env, "db.test.sync", 0) == 0 );
void *db = sp_getobject(env, "db.test");
t( db != NULL );
t( sp_open(env) == 0 );
/* set up */
void *o = sp_document(db);
sp_setstring(o, "key", "key1", 5);
sp_setstring(o, "value", "value1", 7);
t( sp_set(db, o) == 0 );
/* begin transaction TX1*/
void *tx1 = sp_begin(env);
/* READ KEY1 */
o = sp_document(db);
sp_setstring(o, "key", "key1", 5);
o = sp_get(tx1, o);
if (o) {
t( strcmp( sp_getstring(o, "value", NULL), "value1") == 0 );
sp_destroy(o);
}
/* ----------------------------- NEW TX2 ----------------------------- */
void *tx2 = sp_begin(env);
o = sp_document(db);
sp_setstring(o, "key", "key1", 5);
sp_setstring(o, "value", "value2", 7);
t( sp_set(tx2, o) == 0 );
t( sp_commit(tx2) == 0 );
/* ---------------------------- END OF TX2 --------------------------- */
/* get key1 again tx1*/
o = sp_document(db);
sp_setstring(o, "key", "key1", 5);
o = sp_get(tx1, o);
if (o) {
t( strcmp( sp_getstring(o, "value", NULL), "value1") == 0 );
sp_destroy(o);
}
/* commit transaction */
t( sp_commit(tx1) == 0 );
t( sp_destroy(env) == 0 );
}
static void
github_171(void)
{
void *env = sp_env();
t( env != NULL );
t( sp_setstring(env, "sophia.path", st_r.conf->sophia_dir, 0) == 0 );
t( sp_setint(env, "scheduler.threads", 0) == 0 );
t( sp_setstring(env, "log.path", st_r.conf->log_dir, 0) == 0 );
t( sp_setstring(env, "db", "test", 0) == 0 );
t( sp_setint(env, "db.test.compaction.cache", 0) == 0 );
t( sp_setstring(env, "db.test.path", st_r.conf->db_dir, 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "key_i", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "key_j", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "key_k", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "a", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "b", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "c", 0) == 0 );
t( sp_setstring(env, "db.test.scheme", "d", 0) == 0 );
t( sp_setstring(env, "db.test.scheme.key_i", "u32,key(0)", 0) == 0);
t( sp_setstring(env, "db.test.scheme.key_j", "u32,key(1)", 0) == 0);
t( sp_setstring(env, "db.test.scheme.key_k", "u32,key(2)", 0) == 0);
t( sp_setstring(env, "db.test.scheme.a", "u32", 0) == 0);
t( sp_setstring(env, "db.test.scheme.b", "u32", 0) == 0);
t( sp_setstring(env, "db.test.scheme.c", "u32", 0) == 0);
t( sp_setstring(env, "db.test.scheme.d", "u32", 0) == 0);
t( sp_setint(env, "db.test.sync", 0) == 0 );
t( sp_open(env) == 0 );
void *db = sp_getobject(env, "db.test");
t( db != NULL );
void *o = sp_document(db);
t( sp_setint(o, "key_i", 'i') == 0);
t( sp_setint(o, "key_j", 'j') == 0);
t( sp_setint(o, "key_k", 'k') == 0);
t( sp_setint(o, "a", 'a') == 0);
t( sp_setint(o, "b", 'b') == 0);
t( sp_setint(o, "c", 'c') == 0);
t( sp_setint(o, "d", 'd') == 0);
t( sp_set(db, o) == 0 );
o = sp_document(db);
t( sp_setint(o, "key_i", 'i') == 0);
t( sp_setint(o, "key_j", 'j') == 0);
t( sp_setint(o, "key_k", 'k') == 0);
o = sp_get(db, o);
if (o) {
t( sp_getint(o, "a") == 'a' );
t( sp_getint(o, "b") == 'b' );
t( sp_getint(o, "c") == 'c' );
t( sp_getint(o, "d") == 'd' );
sp_destroy(o);
}
t( sp_destroy(env) == 0 );
}
stgroup *github_group(void)
{
stgroup *group = st_group("github");
st_groupadd(group, st_test("ticket_97", github_97));
st_groupadd(group, st_test("ticket_104", github_104));
st_groupadd(group, st_test("ticket_112", github_112));
st_groupadd(group, st_test("ticket_117", github_117));
st_groupadd(group, st_test("ticket_118", github_118));
st_groupadd(group, st_test("ticket_120", github_120));
st_groupadd(group, st_test("ticket_123", github_123));
st_groupadd(group, st_test("ticket_164", github_164));
st_groupadd(group, st_test("ticket_171", github_171));
return group;
}
| 5,221 |
1,224 | <gh_stars>1000+
assert hex(unhex(hex(0xcafebabe))) == 'CAFEBABE'
assert unhex(hex(unhex('cafebabe'))) == 0xCAFEBABE
assert hex(unhex(hex(0xdecaf))) == '000DECAF'
assert unhex(hex(unhex('DECAF'))) == 0xDECAF
print 'OK'
exit() | 99 |
412 | #include <assert.h>
#include "../cprover-string-hack.h"
int main()
{
__CPROVER_string s;
s = __CPROVER_string_literal("pippo");
assert(__CPROVER_string_equal(s, __CPROVER_string_literal("pippo")));
assert(__CPROVER_string_equal(s, __CPROVER_string_literal("mippo")));
return 0;
}
| 125 |
3,288 | /*
* Copyright 2018 ObjectBox Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.objectbox.annotation;
/**
* ObjectBox offers a value and two hash index types, from which it chooses a reasonable default (see {@link #DEFAULT}).
* <p>
* For some queries/use cases it might make sense to override the default choice for optimization purposes.
* <p>
* Note: hash indexes are currently only supported for string properties.
*/
public enum IndexType {
/**
* Use the default index type depending on the property type:
* {@link #VALUE} for scalars and {@link #HASH} for Strings.
*/
DEFAULT,
/**
* Use the property value to build the index.
* For Strings this may occupy more space than the default setting.
*/
VALUE,
/**
* Use a (fast non-cryptographic) hash of the property value to build the index.
* Internally, it uses a 32 bit hash with a decent hash collision behavior.
* Because occasional collisions do not really impact performance, this is usually a better choice than
* {@link #HASH64} as it takes less space.
*/
HASH,
/**
* Use a long (fast non-cryptographic) hash of the property value to build the index.
*/
HASH64
}
| 522 |
14,668 | <reponame>zealoussnow/chromium<gh_stars>1000+
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromeos/network/fake_stub_cellular_networks_provider.h"
#include <algorithm>
#include "base/guid.h"
#include "chromeos/network/cellular_utils.h"
namespace chromeos {
FakeStubCellularNetworksProvider::FakeStubCellularNetworksProvider() = default;
FakeStubCellularNetworksProvider::~FakeStubCellularNetworksProvider() = default;
void FakeStubCellularNetworksProvider::AddStub(const std::string& stub_iccid,
const std::string& stub_eid) {
stub_iccid_and_eid_pairs_.emplace(stub_iccid, stub_eid);
}
void FakeStubCellularNetworksProvider::RemoveStub(const std::string& stub_iccid,
const std::string& stub_eid) {
stub_iccid_and_eid_pairs_.erase(std::make_pair(stub_iccid, stub_eid));
}
bool FakeStubCellularNetworksProvider::AddOrRemoveStubCellularNetworks(
NetworkStateHandler::ManagedStateList& network_list,
NetworkStateHandler::ManagedStateList& new_stub_networks,
const DeviceState* device) {
bool changed = false;
// Add new stub networks if they do not correspond to an existing Shill-backed
// network.
std::vector<IccidEidPair> stubs_to_add =
GetStubsNotBackedByShill(network_list);
if (!stubs_to_add.empty()) {
changed = true;
for (const IccidEidPair& pair : stubs_to_add) {
new_stub_networks.push_back(NetworkState::CreateNonShillCellularNetwork(
pair.first, pair.second, GetGuidForStubIccid(pair.first), device));
stub_networks_add_count_++;
}
}
// Removing existing stub networks if they are no longer applicable.
auto network_list_it = network_list.begin();
while (network_list_it != network_list.end()) {
const NetworkState* network = (*network_list_it)->AsNetworkState();
if (!network->IsNonShillCellularNetwork()) {
++network_list_it;
continue;
}
std::string iccid = network->iccid();
const auto stub_it = std::find_if(
stub_iccid_and_eid_pairs_.begin(), stub_iccid_and_eid_pairs_.end(),
[&iccid](const IccidEidPair& pair) { return pair.first == iccid; });
// Stub network which corresponds to a removed stub ICCID; remove.
if (stub_it == stub_iccid_and_eid_pairs_.end()) {
changed = true;
network_list_it = network_list.erase(network_list_it);
continue;
}
const auto shill_state_it =
std::find_if(network_list.begin(), network_list.end(),
[&iccid](const std::unique_ptr<ManagedState>& state) {
const NetworkState* network = state->AsNetworkState();
return !network->IsNonShillCellularNetwork() &&
network->iccid() == iccid;
});
if (shill_state_it == network_list.end()) {
++network_list_it;
continue;
}
// Stub network which has been replaced by a non-stub network; remove.
changed = true;
network_list_it = network_list.erase(network_list_it);
}
return changed;
}
bool FakeStubCellularNetworksProvider::GetStubNetworkMetadata(
const std::string& iccid,
const DeviceState* cellular_device,
std::string* service_path_out,
std::string* guid_out) {
const auto it = std::find_if(
stub_iccid_and_eid_pairs_.begin(), stub_iccid_and_eid_pairs_.end(),
[&iccid](const IccidEidPair& pair) { return pair.first == iccid; });
if (it == stub_iccid_and_eid_pairs_.end())
return false;
*service_path_out = GenerateStubCellularServicePath(iccid);
*guid_out = GetGuidForStubIccid(iccid);
return true;
}
const std::string& FakeStubCellularNetworksProvider::GetGuidForStubIccid(
const std::string& iccid) {
std::string& guid = iccid_to_guid_map_[iccid];
// If we have not yet generated a GUID for this ICCID, generate one.
if (guid.empty())
guid = base::GenerateGUID();
return guid;
}
std::vector<FakeStubCellularNetworksProvider::IccidEidPair>
FakeStubCellularNetworksProvider::GetStubsNotBackedByShill(
const NetworkStateHandler::ManagedStateList& network_list) const {
std::vector<IccidEidPair> not_backed_by_shill;
for (const IccidEidPair& pair : stub_iccid_and_eid_pairs_) {
const auto shill_state_it =
std::find_if(network_list.begin(), network_list.end(),
[&pair](const std::unique_ptr<ManagedState>& state) {
return state->AsNetworkState()->iccid() == pair.first;
});
// Only need to add a stub network if the stub ICCID does not match the
// ICCID of a Shill-backed network.
if (shill_state_it == network_list.end())
not_backed_by_shill.push_back(pair);
}
return not_backed_by_shill;
}
} // namespace chromeos
| 2,018 |
713 | from collections import deque
class Solution(object):
def sequenceReconstruction(self, org, seqs):
"""
:type org: List[int]
:type seqs: List[List[int]]
:rtype: bool
"""
sortedOrder = []
if len(org) <= 0:
return False
# a. Initialize the graph
inDegree = {} # count of incoming edges
graph = {} # adjacency list graph
for seq in seqs:
for num in seq:
inDegree[num] = 0
graph[num] = []
# b. Build the graph
for seq in seqs:
for idx in range(0, len(seq) - 1):
parent, child = seq[idx], seq[idx + 1]
graph[parent].append(child)
inDegree[child] += 1
# if we don't have ordering rules for all the numbers we'll not able to uniquely construct the sequence
if len(inDegree) != len(org):
return False
# c. Find all sources i.e., all vertices with 0 in-degrees
sources = deque()
for key in inDegree:
if inDegree[key] == 0:
sources.append(key)
# d. For each source, add it to the sortedOrder and subtract one from all of its children's in-degrees
# if a child's in-degree becomes zero, add it to the sources queue
while sources:
if len(sources) > 1:
return False # more than one sources mean, there is more than one way to reconstruct the sequence
if org[len(sortedOrder)] != sources[0]:
return False # the next source(or number) is different from the original sequence
vertex = sources.popleft()
sortedOrder.append(vertex)
for child in graph[vertex]: # get the node's children to decrement their in-degrees
inDegree[child] -= 1
if inDegree[child] == 0:
sources.append(child)
# if sortedOrder's size is not equal to original sequence's size, there is no unique way to construct
return len(sortedOrder) == len(org)
| 1,117 |
416 | /*
* Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tencentcloudapi.tsf.v20180326.models;
import com.tencentcloudapi.common.AbstractModel;
import com.google.gson.annotations.SerializedName;
import com.google.gson.annotations.Expose;
import java.util.HashMap;
public class LaneInfo extends AbstractModel{
/**
* 泳道ID
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("LaneId")
@Expose
private String LaneId;
/**
* 泳道名称
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("LaneName")
@Expose
private String LaneName;
/**
* 泳道备注
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("Remark")
@Expose
private String Remark;
/**
* 创建时间
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("CreateTime")
@Expose
private Long CreateTime;
/**
* 更新时间
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("UpdateTime")
@Expose
private Long UpdateTime;
/**
* 泳道部署组
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("LaneGroupList")
@Expose
private LaneGroup [] LaneGroupList;
/**
* 是否入口应用
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("Entrance")
@Expose
private Boolean Entrance;
/**
* 泳道已经关联部署组的命名空间列表
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("NamespaceIdList")
@Expose
private String [] NamespaceIdList;
/**
* Get 泳道ID
注意:此字段可能返回 null,表示取不到有效值。
* @return LaneId 泳道ID
注意:此字段可能返回 null,表示取不到有效值。
*/
public String getLaneId() {
return this.LaneId;
}
/**
* Set 泳道ID
注意:此字段可能返回 null,表示取不到有效值。
* @param LaneId 泳道ID
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setLaneId(String LaneId) {
this.LaneId = LaneId;
}
/**
* Get 泳道名称
注意:此字段可能返回 null,表示取不到有效值。
* @return LaneName 泳道名称
注意:此字段可能返回 null,表示取不到有效值。
*/
public String getLaneName() {
return this.LaneName;
}
/**
* Set 泳道名称
注意:此字段可能返回 null,表示取不到有效值。
* @param LaneName 泳道名称
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setLaneName(String LaneName) {
this.LaneName = LaneName;
}
/**
* Get 泳道备注
注意:此字段可能返回 null,表示取不到有效值。
* @return Remark 泳道备注
注意:此字段可能返回 null,表示取不到有效值。
*/
public String getRemark() {
return this.Remark;
}
/**
* Set 泳道备注
注意:此字段可能返回 null,表示取不到有效值。
* @param Remark 泳道备注
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setRemark(String Remark) {
this.Remark = Remark;
}
/**
* Get 创建时间
注意:此字段可能返回 null,表示取不到有效值。
* @return CreateTime 创建时间
注意:此字段可能返回 null,表示取不到有效值。
*/
public Long getCreateTime() {
return this.CreateTime;
}
/**
* Set 创建时间
注意:此字段可能返回 null,表示取不到有效值。
* @param CreateTime 创建时间
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setCreateTime(Long CreateTime) {
this.CreateTime = CreateTime;
}
/**
* Get 更新时间
注意:此字段可能返回 null,表示取不到有效值。
* @return UpdateTime 更新时间
注意:此字段可能返回 null,表示取不到有效值。
*/
public Long getUpdateTime() {
return this.UpdateTime;
}
/**
* Set 更新时间
注意:此字段可能返回 null,表示取不到有效值。
* @param UpdateTime 更新时间
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setUpdateTime(Long UpdateTime) {
this.UpdateTime = UpdateTime;
}
/**
* Get 泳道部署组
注意:此字段可能返回 null,表示取不到有效值。
* @return LaneGroupList 泳道部署组
注意:此字段可能返回 null,表示取不到有效值。
*/
public LaneGroup [] getLaneGroupList() {
return this.LaneGroupList;
}
/**
* Set 泳道部署组
注意:此字段可能返回 null,表示取不到有效值。
* @param LaneGroupList 泳道部署组
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setLaneGroupList(LaneGroup [] LaneGroupList) {
this.LaneGroupList = LaneGroupList;
}
/**
* Get 是否入口应用
注意:此字段可能返回 null,表示取不到有效值。
* @return Entrance 是否入口应用
注意:此字段可能返回 null,表示取不到有效值。
*/
public Boolean getEntrance() {
return this.Entrance;
}
/**
* Set 是否入口应用
注意:此字段可能返回 null,表示取不到有效值。
* @param Entrance 是否入口应用
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setEntrance(Boolean Entrance) {
this.Entrance = Entrance;
}
/**
* Get 泳道已经关联部署组的命名空间列表
注意:此字段可能返回 null,表示取不到有效值。
* @return NamespaceIdList 泳道已经关联部署组的命名空间列表
注意:此字段可能返回 null,表示取不到有效值。
*/
public String [] getNamespaceIdList() {
return this.NamespaceIdList;
}
/**
* Set 泳道已经关联部署组的命名空间列表
注意:此字段可能返回 null,表示取不到有效值。
* @param NamespaceIdList 泳道已经关联部署组的命名空间列表
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setNamespaceIdList(String [] NamespaceIdList) {
this.NamespaceIdList = NamespaceIdList;
}
public LaneInfo() {
}
/**
* NOTE: Any ambiguous key set via .set("AnyKey", "value") will be a shallow copy,
* and any explicit key, i.e Foo, set via .setFoo("value") will be a deep copy.
*/
public LaneInfo(LaneInfo source) {
if (source.LaneId != null) {
this.LaneId = new String(source.LaneId);
}
if (source.LaneName != null) {
this.LaneName = new String(source.LaneName);
}
if (source.Remark != null) {
this.Remark = new String(source.Remark);
}
if (source.CreateTime != null) {
this.CreateTime = new Long(source.CreateTime);
}
if (source.UpdateTime != null) {
this.UpdateTime = new Long(source.UpdateTime);
}
if (source.LaneGroupList != null) {
this.LaneGroupList = new LaneGroup[source.LaneGroupList.length];
for (int i = 0; i < source.LaneGroupList.length; i++) {
this.LaneGroupList[i] = new LaneGroup(source.LaneGroupList[i]);
}
}
if (source.Entrance != null) {
this.Entrance = new Boolean(source.Entrance);
}
if (source.NamespaceIdList != null) {
this.NamespaceIdList = new String[source.NamespaceIdList.length];
for (int i = 0; i < source.NamespaceIdList.length; i++) {
this.NamespaceIdList[i] = new String(source.NamespaceIdList[i]);
}
}
}
/**
* Internal implementation, normal users should not use it.
*/
public void toMap(HashMap<String, String> map, String prefix) {
this.setParamSimple(map, prefix + "LaneId", this.LaneId);
this.setParamSimple(map, prefix + "LaneName", this.LaneName);
this.setParamSimple(map, prefix + "Remark", this.Remark);
this.setParamSimple(map, prefix + "CreateTime", this.CreateTime);
this.setParamSimple(map, prefix + "UpdateTime", this.UpdateTime);
this.setParamArrayObj(map, prefix + "LaneGroupList.", this.LaneGroupList);
this.setParamSimple(map, prefix + "Entrance", this.Entrance);
this.setParamArraySimple(map, prefix + "NamespaceIdList.", this.NamespaceIdList);
}
}
| 4,976 |
1,840 | /**
* Copyright Pravega Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.pravega.shared.metrics;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.composite.CompositeMeterRegistry;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import io.micrometer.influx.InfluxMeterRegistry;
import io.micrometer.statsd.StatsdMeterRegistry;
import io.pravega.test.common.SerializedClassRunner;
import lombok.Cleanup;
import lombok.extern.slf4j.Slf4j;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Tests for StatsProvider
*/
@Slf4j
@RunWith(SerializedClassRunner.class)
public class StatsProviderTest {
@Test
public void testStatsProviderStartAndClose() {
//To improve test case isolation, create a new registry instead of using the global one.
@Cleanup
CompositeMeterRegistry localRegistry = new CompositeMeterRegistry();
MetricsConfig appConfig = MetricsConfig.builder()
.with(MetricsConfig.ENABLE_STATISTICS, true)
.with(MetricsConfig.ENABLE_STATSD_REPORTER, true)
.with(MetricsConfig.ENABLE_INFLUXDB_REPORTER, false)
.build();
@Cleanup
StatsProvider statsProvider = new StatsProviderImpl(appConfig, localRegistry);
statsProvider.start();
for (MeterRegistry registry : localRegistry.getRegistries()) {
assertFalse(registry instanceof InfluxMeterRegistry);
assertTrue(registry instanceof StatsdMeterRegistry);
}
statsProvider.close();
assertTrue(0 == localRegistry.getRegistries().size());
}
@Test
public void testStatsProviderStartWithoutExporting() {
MetricsConfig appConfig = MetricsConfig.builder()
.with(MetricsConfig.ENABLE_STATISTICS, true)
.with(MetricsConfig.ENABLE_STATSD_REPORTER, true)
.with(MetricsConfig.ENABLE_INFLUXDB_REPORTER, true)
.build();
@Cleanup
CompositeMeterRegistry localRegistry = new CompositeMeterRegistry();
@Cleanup
StatsProvider statsProvider = new StatsProviderImpl(appConfig, localRegistry);
statsProvider.startWithoutExporting();
for (MeterRegistry registry : localRegistry.getRegistries()) {
assertTrue(registry instanceof SimpleMeterRegistry);
assertFalse(registry instanceof InfluxMeterRegistry);
assertFalse(registry instanceof StatsdMeterRegistry);
}
statsProvider.close();
assertTrue(0 == localRegistry.getRegistries().size());
}
@Test (expected = Exception.class)
public void testStatsProviderNoRegisterBound() {
MetricsConfig appConfig = MetricsConfig.builder()
.with(MetricsConfig.ENABLE_STATISTICS, true)
.with(MetricsConfig.ENABLE_STATSD_REPORTER, false)
.with(MetricsConfig.ENABLE_INFLUXDB_REPORTER, false)
.build();
@Cleanup
CompositeMeterRegistry localRegistry = new CompositeMeterRegistry();
@Cleanup
StatsProvider statsProvider = new StatsProviderImpl(appConfig, localRegistry);
statsProvider.start();
}
}
| 1,463 |
354 | /***********************************************************************************************************************
* OpenStudio(R), Copyright (c) 2008-2021, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
*
* (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
*
* (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission from the respective party.
*
* (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works
* may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior
* written permission from Alliance for Sustainable Energy, LLC.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED
* STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************************************************************/
#include <gtest/gtest.h>
#include "ModelFixture.hpp"
#include "../Model_Impl.hpp"
#include "../DaylightingDeviceLightWell.hpp"
#include "../DaylightingDeviceLightWell_Impl.hpp"
#include "../SubSurface.hpp"
#include "../../utilities/geometry/Point3d.hpp"
using namespace openstudio::model;
using namespace openstudio;
TEST_F(ModelFixture, DaylightingDeviceLightWell) {
Model model;
Point3dVector points;
points.push_back(Point3d(0, 0, 1));
points.push_back(Point3d(0, 0, 0));
points.push_back(Point3d(0, 1, 0));
points.push_back(Point3d(0, 1, 1));
SubSurface window(points, model);
EXPECT_EQ("FixedWindow", window.subSurfaceType());
EXPECT_FALSE(window.daylightingDeviceLightWell());
EXPECT_EQ(0u, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
DaylightingDeviceLightWell lightWell(window, 1, 2, 3, 0.75);
UUID lightWellHandle = lightWell.handle();
ASSERT_TRUE(window.daylightingDeviceLightWell());
EXPECT_EQ(lightWellHandle, window.daylightingDeviceLightWell()->handle());
ASSERT_TRUE(window.addDaylightingDeviceLightWell());
EXPECT_EQ(lightWellHandle, window.addDaylightingDeviceLightWell()->handle());
EXPECT_EQ(window.addDaylightingDeviceLightWell()->handle(), window.daylightingDeviceLightWell()->handle());
EXPECT_EQ(window.handle(), lightWell.subSurface().handle());
EXPECT_EQ(1u, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
lightWell.remove();
EXPECT_EQ(0u, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
EXPECT_FALSE(window.daylightingDeviceLightWell());
ASSERT_TRUE(window.addDaylightingDeviceLightWell());
lightWellHandle = window.addDaylightingDeviceLightWell()->handle();
EXPECT_EQ(1u, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
// Calling it a second time while a Light Well already exists returns the existing one, doesn't create a second
ASSERT_TRUE(window.addDaylightingDeviceLightWell());
EXPECT_EQ(lightWellHandle, window.addDaylightingDeviceLightWell()->handle());
EXPECT_EQ(1u, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
ASSERT_TRUE(window.addDaylightingDeviceLightWell());
EXPECT_EQ(lightWellHandle, window.addDaylightingDeviceLightWell()->handle());
ASSERT_TRUE(window.daylightingDeviceLightWell());
EXPECT_EQ(lightWellHandle, window.daylightingDeviceLightWell()->handle());
// changing to door removes light light well
EXPECT_TRUE(window.setSubSurfaceType("Door"));
EXPECT_EQ("Door", window.subSurfaceType());
EXPECT_FALSE(window.daylightingDeviceLightWell());
EXPECT_FALSE(window.addDaylightingDeviceLightWell());
EXPECT_EQ(0u, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
}
TEST_F(ModelFixture, DaylightingDeviceLightWell_Throw) {
Model model;
Point3dVector points;
points.push_back(Point3d(0, 0, 1));
points.push_back(Point3d(0, 0, 0));
points.push_back(Point3d(0, 1, 0));
points.push_back(Point3d(0, 1, 1));
SubSurface door(points, model);
EXPECT_TRUE(door.setSubSurfaceType("Door"));
EXPECT_EQ("Door", door.subSurfaceType());
EXPECT_EQ(0, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
EXPECT_ANY_THROW({ DaylightingDeviceLightWell lightWell(door); });
EXPECT_EQ(0, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
// change to a window
EXPECT_TRUE(door.setSubSurfaceType("FixedWindow"));
EXPECT_EQ("FixedWindow", door.subSurfaceType());
// first one succeeds
EXPECT_NO_THROW({ DaylightingDeviceLightWell lightWell(door); });
EXPECT_EQ(1, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
// second call throws
EXPECT_ANY_THROW({ DaylightingDeviceLightWell lightWell(door); });
EXPECT_EQ(1, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
// changing to door removes light light well
EXPECT_TRUE(door.setSubSurfaceType("Door"));
EXPECT_EQ("Door", door.subSurfaceType());
EXPECT_FALSE(door.daylightingDeviceLightWell());
EXPECT_FALSE(door.addDaylightingDeviceLightWell());
EXPECT_EQ(0, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
}
TEST_F(ModelFixture, DaylightingDeviceLightWell_Throw2) {
Model model;
Point3dVector points;
points.push_back(Point3d(0, 0, 1));
points.push_back(Point3d(0, 0, 0));
points.push_back(Point3d(0, 1, 0));
points.push_back(Point3d(0, 1, 1));
SubSurface door(points, model);
EXPECT_TRUE(door.setSubSurfaceType("Door"));
EXPECT_EQ("Door", door.subSurfaceType());
EXPECT_EQ(0, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
bool didThrow = false;
try {
DaylightingDeviceLightWell lightWell(door);
} catch (const openstudio::Exception&) {
didThrow = true;
}
EXPECT_TRUE(didThrow);
EXPECT_EQ(0, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
// change to a window
EXPECT_TRUE(door.setSubSurfaceType("FixedWindow"));
EXPECT_EQ("FixedWindow", door.subSurfaceType());
// first one succeeds
didThrow = false;
try {
DaylightingDeviceLightWell lightWell(door);
} catch (const openstudio::Exception&) {
didThrow = true;
}
EXPECT_FALSE(didThrow);
EXPECT_EQ(1, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
// second call throws
didThrow = false;
try {
DaylightingDeviceLightWell lightWell(door);
} catch (const openstudio::Exception&) {
didThrow = true;
}
EXPECT_TRUE(didThrow);
EXPECT_EQ(1, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
// changing to door removes light light well
EXPECT_TRUE(door.setSubSurfaceType("Door"));
EXPECT_EQ("Door", door.subSurfaceType());
EXPECT_FALSE(door.daylightingDeviceLightWell());
EXPECT_FALSE(door.addDaylightingDeviceLightWell());
EXPECT_EQ(0, model.getConcreteModelObjects<DaylightingDeviceLightWell>().size());
}
TEST_F(ModelFixture, DaylightingDeviceLightWell_SettersGetters) {
Model model;
Point3dVector points;
points.push_back(Point3d(0, 0, 1));
points.push_back(Point3d(0, 0, 0));
points.push_back(Point3d(0, 1, 0));
points.push_back(Point3d(0, 1, 1));
SubSurface window(points, model);
EXPECT_EQ("FixedWindow", window.subSurfaceType());
DaylightingDeviceLightWell lightWell(window);
EXPECT_EQ(window, lightWell.subSurface());
EXPECT_EQ(1.2, lightWell.heightofWell());
EXPECT_EQ(12.0, lightWell.perimeterofBottomofWell());
EXPECT_EQ(9.0, lightWell.areaofBottomofWell());
EXPECT_EQ(0.7, lightWell.visibleReflectanceofWellWalls());
EXPECT_TRUE(lightWell.setHeightofWell(1));
EXPECT_TRUE(lightWell.setPerimeterofBottomofWell(2));
EXPECT_TRUE(lightWell.setAreaofBottomofWell(3));
EXPECT_TRUE(lightWell.setVisibleReflectanceofWellWalls(0.9));
EXPECT_EQ(1, lightWell.heightofWell());
EXPECT_EQ(2, lightWell.perimeterofBottomofWell());
EXPECT_EQ(3, lightWell.areaofBottomofWell());
EXPECT_EQ(0.9, lightWell.visibleReflectanceofWellWalls());
}
| 3,005 |
3,083 | <filename>index_tests/templates/member_ref_in_template.cc
template <class T>
struct C {
T x;
void bar();
};
template <class T>
void foo() {
C<T> d;
d.x; // spelling range is empty, use cursor extent for range
d.bar(); // spelling range is empty, use cursor extent for range
auto e = new C<T>;
e->x; // `x` seems not exposed by libclang
e->bar(); // `bar` seems not exposed by libclang
}
/*
EXTRA_FLAGS:
-fms-extensions
-fms-compatibility
-fdelayed-template-parsing
OUTPUT:
{
"includes": [],
"skipped_ranges": [],
"usr2func": [{
"usr": 6875364467121018690,
"detailed_name": "void foo()",
"qual_name_offset": 5,
"short_name": "foo",
"spell": "8:6-8:9|8:1-8:11|2|-1",
"bases": [],
"vars": [],
"callees": [],
"kind": 12,
"parent_kind": 0,
"storage": 0,
"declarations": [],
"derived": [],
"uses": []
}, {
"usr": 8905286151237717330,
"detailed_name": "void C::bar()",
"qual_name_offset": 5,
"short_name": "bar",
"bases": [],
"vars": [],
"callees": [],
"kind": 6,
"parent_kind": 0,
"storage": 0,
"declarations": ["4:8-4:11|4:3-4:13|1025|-1"],
"derived": [],
"uses": []
}],
"usr2type": [{
"usr": 8402783583255987702,
"detailed_name": "struct C {}",
"qual_name_offset": 7,
"short_name": "C",
"spell": "2:8-2:9|2:1-5:2|2|-1",
"bases": [],
"funcs": [8905286151237717330],
"types": [],
"vars": [{
"L": 5866801090710377175,
"R": -1
}],
"alias_of": 0,
"kind": 23,
"parent_kind": 0,
"declarations": [],
"derived": [],
"instances": [],
"uses": []
}, {
"usr": 14750650276757822712,
"detailed_name": "T",
"qual_name_offset": 0,
"short_name": "T",
"spell": "1:17-1:18|1:11-1:18|2|-1",
"bases": [],
"funcs": [],
"types": [],
"vars": [],
"alias_of": 0,
"kind": 26,
"parent_kind": 5,
"declarations": [],
"derived": [],
"instances": [5866801090710377175],
"uses": []
}],
"usr2var": [{
"usr": 5866801090710377175,
"detailed_name": "T C::x",
"qual_name_offset": 2,
"short_name": "x",
"spell": "3:5-3:6|3:3-3:6|1026|-1",
"type": 14750650276757822712,
"kind": 8,
"parent_kind": 23,
"storage": 0,
"declarations": [],
"uses": []
}]
}
*/
| 1,342 |
432 | <gh_stars>100-1000
/* Gimple Represented as Polyhedra.
Copyright (C) 2009, 2010 Free Software Foundation, Inc.
Contributed by <NAME> <<EMAIL>>
and <NAME> <<EMAIL>>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#ifdef HAVE_cloog
#include "ppl_c.h"
#include "cloog/cloog.h"
#include "graphite-cloog-util.h"
#include "graphite-cloog-compat.h"
/* Counts the number of constraints in PCS. */
static int
ppl_Constrain_System_number_of_constraints (ppl_const_Constraint_System_t pcs)
{
ppl_Constraint_System_const_iterator_t cit, end;
int num = 0;
ppl_new_Constraint_System_const_iterator (&cit);
ppl_new_Constraint_System_const_iterator (&end);
for (ppl_Constraint_System_begin (pcs, cit),
ppl_Constraint_System_end (pcs, end);
!ppl_Constraint_System_const_iterator_equal_test (cit, end);
ppl_Constraint_System_const_iterator_increment (cit))
num++;
ppl_delete_Constraint_System_const_iterator (cit);
ppl_delete_Constraint_System_const_iterator (end);
return num;
}
static void
oppose_constraint (CloogMatrix *m, int row)
{
int k;
/* Do not oppose the first column: it is the eq/ineq one. */
/* Cast needed to remove warning that is generated as CLooG isl
is using an unsigned int for NbColumns and CLooG PPL is
using a signed int for NBColumns. */
for (k = 1; k < (int)m->NbColumns; k++)
mpz_neg (m->p[row][k], m->p[row][k]);
}
/* Inserts constraint CSTR at row ROW of matrix M. */
static void
insert_constraint_into_matrix (CloogMatrix *m, int row,
ppl_const_Constraint_t cstr)
{
ppl_Coefficient_t c;
ppl_dimension_type i, dim, nb_cols = m->NbColumns;
ppl_Constraint_space_dimension (cstr, &dim);
ppl_new_Coefficient (&c);
for (i = 0; i < dim; i++)
{
ppl_Constraint_coefficient (cstr, i, c);
ppl_Coefficient_to_mpz_t (c, m->p[row][i + 1]);
}
for (i = dim; i < nb_cols - 1; i++)
mpz_set_si (m->p[row][i + 1], 0);
ppl_Constraint_inhomogeneous_term (cstr, c);
ppl_Coefficient_to_mpz_t (c, m->p[row][nb_cols - 1]);
mpz_set_si (m->p[row][0], 1);
switch (ppl_Constraint_type (cstr))
{
case PPL_CONSTRAINT_TYPE_LESS_THAN:
oppose_constraint (m, row);
case PPL_CONSTRAINT_TYPE_GREATER_THAN:
mpz_sub_ui (m->p[row][nb_cols - 1],
m->p[row][nb_cols - 1], 1);
break;
case PPL_CONSTRAINT_TYPE_LESS_OR_EQUAL:
oppose_constraint (m, row);
case PPL_CONSTRAINT_TYPE_GREATER_OR_EQUAL:
break;
case PPL_CONSTRAINT_TYPE_EQUAL:
mpz_set_si (m->p[row][0], 0);
break;
default:
/* Not yet implemented. */
gcc_unreachable();
}
ppl_delete_Coefficient (c);
}
/* Creates a CloogMatrix from constraint system PCS. */
static CloogMatrix *
new_Cloog_Matrix_from_ppl_Constraint_System (ppl_const_Constraint_System_t pcs)
{
CloogMatrix *matrix;
ppl_Constraint_System_const_iterator_t cit, end;
ppl_dimension_type dim;
int rows;
int row = 0;
rows = ppl_Constrain_System_number_of_constraints (pcs);
ppl_Constraint_System_space_dimension (pcs, &dim);
matrix = cloog_matrix_alloc (rows, dim + 2);
ppl_new_Constraint_System_const_iterator (&cit);
ppl_new_Constraint_System_const_iterator (&end);
for (ppl_Constraint_System_begin (pcs, cit),
ppl_Constraint_System_end (pcs, end);
!ppl_Constraint_System_const_iterator_equal_test (cit, end);
ppl_Constraint_System_const_iterator_increment (cit))
{
ppl_const_Constraint_t c;
ppl_Constraint_System_const_iterator_dereference (cit, &c);
insert_constraint_into_matrix (matrix, row, c);
row++;
}
ppl_delete_Constraint_System_const_iterator (cit);
ppl_delete_Constraint_System_const_iterator (end);
return matrix;
}
/* Creates a CloogMatrix from polyhedron PH. */
CloogMatrix *
new_Cloog_Matrix_from_ppl_Polyhedron (ppl_const_Polyhedron_t ph)
{
ppl_const_Constraint_System_t pcs;
CloogMatrix *res;
ppl_Polyhedron_get_constraints (ph, &pcs);
res = new_Cloog_Matrix_from_ppl_Constraint_System (pcs);
return res;
}
/* Translates row ROW of the CloogMatrix MATRIX to a PPL Constraint. */
static ppl_Constraint_t
cloog_matrix_to_ppl_constraint (CloogMatrix *matrix, int row)
{
int j;
ppl_Constraint_t cstr;
ppl_Coefficient_t coef;
ppl_Linear_Expression_t expr;
ppl_dimension_type dim = matrix->NbColumns - 2;
ppl_new_Coefficient (&coef);
ppl_new_Linear_Expression_with_dimension (&expr, dim);
/* Cast needed to remove warning that is generated as CLooG isl
is using an unsigned int for NbColumns and CLooG PPL is
using a signed int for NBColumns. */
for (j = 1; j < (int)matrix->NbColumns - 1; j++)
{
ppl_assign_Coefficient_from_mpz_t (coef, matrix->p[row][j]);
ppl_Linear_Expression_add_to_coefficient (expr, j - 1, coef);
}
ppl_assign_Coefficient_from_mpz_t (coef,
matrix->p[row][matrix->NbColumns - 1]);
ppl_Linear_Expression_add_to_inhomogeneous (expr, coef);
ppl_delete_Coefficient (coef);
if (mpz_sgn (matrix->p[row][0]) == 0)
ppl_new_Constraint (&cstr, expr, PPL_CONSTRAINT_TYPE_EQUAL);
else
ppl_new_Constraint (&cstr, expr, PPL_CONSTRAINT_TYPE_GREATER_OR_EQUAL);
ppl_delete_Linear_Expression (expr);
return cstr;
}
/* Creates a PPL constraint system from MATRIX. */
static void
new_Constraint_System_from_Cloog_Matrix (ppl_Constraint_System_t *pcs,
CloogMatrix *matrix)
{
int i;
ppl_new_Constraint_System (pcs);
/* Cast needed to remove warning that is generated as CLooG isl
is using an unsigned int for NbColumns and CLooG PPL is
using a signed int for NBColumns. */
for (i = 0; i < (int)matrix->NbRows; i++)
{
ppl_Constraint_t c = cloog_matrix_to_ppl_constraint (matrix, i);
ppl_Constraint_System_insert_Constraint (*pcs, c);
ppl_delete_Constraint (c);
}
}
/* Creates a PPL Polyhedron from MATRIX. */
void
new_C_Polyhedron_from_Cloog_Matrix (ppl_Polyhedron_t *ph,
CloogMatrix *matrix)
{
ppl_Constraint_System_t cs;
new_Constraint_System_from_Cloog_Matrix (&cs, matrix);
ppl_new_C_Polyhedron_recycle_Constraint_System (ph, cs);
}
/* Creates a CloogDomain from polyhedron PH. */
CloogDomain *
new_Cloog_Domain_from_ppl_Polyhedron (ppl_const_Polyhedron_t ph, int nb_params,
CloogState *state ATTRIBUTE_UNUSED)
{
CloogMatrix *mat = new_Cloog_Matrix_from_ppl_Polyhedron (ph);
CloogDomain *res = cloog_domain_from_cloog_matrix (state, mat, nb_params);
cloog_matrix_free (mat);
return res;
}
/* Create a CloogScattering from polyhedron PH. */
CloogScattering *
new_Cloog_Scattering_from_ppl_Polyhedron (ppl_const_Polyhedron_t ph,
int nb_params ATTRIBUTE_UNUSED,
int nb_scatt ATTRIBUTE_UNUSED,
CloogState *state ATTRIBUTE_UNUSED)
{
#ifdef CLOOG_ORG
CloogMatrix *mat = new_Cloog_Matrix_from_ppl_Polyhedron (ph);
CloogScattering *res = cloog_scattering_from_cloog_matrix (state, mat,
nb_scatt,
nb_params);
cloog_matrix_free (mat);
return res;
#else
return new_Cloog_Domain_from_ppl_Polyhedron (ph, nb_params, state);
#endif
}
/* Creates a CloogDomain from a pointset powerset PS. */
CloogDomain *
new_Cloog_Domain_from_ppl_Pointset_Powerset
(ppl_Pointset_Powerset_C_Polyhedron_t ps, int nb_params,
CloogState *state ATTRIBUTE_UNUSED)
{
CloogDomain *res = NULL;
ppl_Pointset_Powerset_C_Polyhedron_iterator_t it, end;
ppl_new_Pointset_Powerset_C_Polyhedron_iterator (&it);
ppl_new_Pointset_Powerset_C_Polyhedron_iterator (&end);
for (ppl_Pointset_Powerset_C_Polyhedron_iterator_begin (ps, it),
ppl_Pointset_Powerset_C_Polyhedron_iterator_end (ps, end);
!ppl_Pointset_Powerset_C_Polyhedron_iterator_equal_test (it, end);
ppl_Pointset_Powerset_C_Polyhedron_iterator_increment (it))
{
ppl_const_Polyhedron_t ph;
CloogDomain *tmp;
ppl_Pointset_Powerset_C_Polyhedron_iterator_dereference (it, &ph);
tmp = new_Cloog_Domain_from_ppl_Polyhedron (ph, nb_params, state);
if (res == NULL)
res = tmp;
else
res = cloog_domain_union (res, tmp);
}
ppl_delete_Pointset_Powerset_C_Polyhedron_iterator (it);
ppl_delete_Pointset_Powerset_C_Polyhedron_iterator (end);
gcc_assert (res != NULL);
return res;
}
/* Print to FILE the matrix MAT in OpenScop format. OUTPUT is the number
of output dimensions, INPUT is the number of input dimensions, LOCALS
is the number of existentially quantified variables and PARAMS is the
number of parameters. */
static void
openscop_print_cloog_matrix (FILE *file, CloogMatrix *mat,
int output, int input, int locals,
int params)
{
int i, j;
fprintf (file, "%d %d %d %d %d %d \n", cloog_matrix_nrows (mat),
cloog_matrix_ncolumns (mat), output, input, locals, params);
for (i = 0; i < cloog_matrix_nrows (mat); i++)
{
for (j = 0; j < cloog_matrix_ncolumns (mat); j++)
if (j == 0)
fprintf (file, "%ld ", mpz_get_si (mat->p[i][j]));
else
fprintf (file, "%6ld ", mpz_get_si (mat->p[i][j]));
fprintf (file, "\n");
}
}
/* Print to FILE the polyhedron PH in OpenScop format. OUTPUT is the number
of output dimensions, INPUT is the number of input dimensions, LOCALS is
the number of existentially quantified variables and PARAMS is the number
of parameters. */
void
openscop_print_polyhedron_matrix (FILE *file, ppl_const_Polyhedron_t ph,
int output, int input, int locals,
int params)
{
CloogMatrix *mat = new_Cloog_Matrix_from_ppl_Polyhedron (ph);
openscop_print_cloog_matrix (file, mat, output, input, locals, params);
cloog_matrix_free (mat);
}
/* Read from FILE a matrix in OpenScop format. OUTPUT is the number of
output dimensions, INPUT is the number of input dimensions, LOCALS
is the number of existentially quantified variables and PARAMS is the
number of parameters. */
static CloogMatrix *
openscop_read_cloog_matrix (FILE *file, int *output, int *input, int *locals,
int *params)
{
int nb_rows, nb_cols, i, j;
CloogMatrix *mat;
int *openscop_matrix_header, *matrix_line;
openscop_matrix_header = openscop_read_N_int (file, 6);
nb_rows = openscop_matrix_header[0];
nb_cols = openscop_matrix_header[1];
*output = openscop_matrix_header[2];
*input = openscop_matrix_header[3];
*locals = openscop_matrix_header[4];
*params = openscop_matrix_header[5];
free (openscop_matrix_header);
if (nb_rows == 0 || nb_cols == 0)
return NULL;
mat = cloog_matrix_alloc (nb_rows, nb_cols);
mat->NbRows = nb_rows;
mat->NbColumns = nb_cols;
for (i = 0; i < nb_rows; i++)
{
matrix_line = openscop_read_N_int (file, nb_cols);
for (j = 0; j < nb_cols; j++)
mpz_set_si (mat->p[i][j], matrix_line[j]);
}
return mat;
}
/* Read from FILE the polyhedron PH in OpenScop format. OUTPUT is the number
of output dimensions, INPUT is the number of input dimensions, LOCALS is
the number of existentially quantified variables and PARAMS is the number
of parameters. */
void
openscop_read_polyhedron_matrix (FILE *file, ppl_Polyhedron_t *ph,
int *output, int *input, int *locals,
int *params)
{
CloogMatrix *mat;
mat = openscop_read_cloog_matrix (file, output, input, locals, params);
if (!mat)
*ph = NULL;
else
{
new_C_Polyhedron_from_Cloog_Matrix (ph, mat);
cloog_matrix_free (mat);
}
}
#endif
| 5,299 |
451 | <filename>BenchElise/bench/api_vecto.cpp
/*eLiSe06/05/99
Copyright (C) 1999 <NAME>
eLiSe : Elements of a Linux Image Software Environment
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Author: <NAME> IGN/MATIS
Internet: <EMAIL>
Phone: (33) 01 43 98 81 28
eLiSe06/05/99*/
#include "general/all.h"
#include "api/vecto.h"
template <class T> Pt2d<T> complex2Pt2d(complex<T> p)
{return Pt2d<T>(p.real(),p.imag());}
class MyVectoAct : public EL_CALL_BACK_VECTO
{
public :
void ElVectoAction
(
ComplexI p0,
ComplexI p1,
const vector<ComplexI> & pts_interm,
const vector<int> & d_interm
)
{
_W1.draw_seg
(
complex2Pt2d(p0),
complex2Pt2d(p1),
_Pdisc(P8COL::red)
);
for (INT k=0; k< (int)pts_interm.size() ; k++)
{
_W2.draw_circle_abs
(
complex2Pt2d(pts_interm[k]),
2.0,
_Pdisc(P8COL::green)
);
_W3.draw_circle_loc
(
complex2Pt2d(pts_interm[k]),
d_interm[k] * 0.5,
_Pdisc(P8COL::blue)
);
}
}
MyVectoAct
(
Video_Win W1,
Video_Win W2,
Video_Win W3,
Disc_Pal Pdisc
) :
_W1 (W1),
_W2 (W2),
_W3 (W3),
_Pdisc (Pdisc)
{
}
private :
Video_Win _W1;
Video_Win _W2;
Video_Win _W3;
Disc_Pal _Pdisc;
};
void t0
(
Pt2di sz,
Video_Win W1,
Video_Win W2,
Video_Win W3,
Disc_Pal Pdisc
)
{
char * name = "/home/pierrot/Data/Cad/TPuzzle/SKEL.tif";
EL_API_VECTO::ParamSkel PSkel;
EL_API_VECTO::ParamApprox PApp;
EL_API_VECTO API(PSkel,PApp,20);
EL_API_VECTO::ParamFile PFile(name,false);
INT x,y;
scanf("%d %d",&x,&y);
Im2D_Bits<1> Im(sz.x,sz.y);
EL_API_VECTO::ParamImage aPIm
(
ComplexI(sz.x,sz.y),
false, // <
1, // SEUIL
Im.data()[0], // Adr 1er element
1 // Nb Bit
);
ELISE_COPY
(
W1.all_pts(),
trans(Tiff_Im(name).in(0),Pt2di(x,y)),
W1.odisc() | W2.odisc() | W3.odisc() | Im.out()
);
MyVectoAct MVA(W1,W2,W3,Pdisc);
API.vecto
(
// PFile,
aPIm,
MVA,
ComplexI(x,y),
ComplexI(sz.x,sz.y)
);
}
int main(int,char**)
{
int SZX = 400;
int SZY = 400;
int ZOOM = 1;
Video_Win W1 = Video_Win ::WStd(Pt2di(SZX,SZY),ZOOM);
Video_Win W2 = Video_Win ::WStd(Pt2di(SZX,SZY),ZOOM,W1);
Video_Win W3 = Video_Win ::WStd(Pt2di(SZX,SZY),ZOOM,W1);
while (true)
t0 (Pt2di(SZX,SZY),W1,W2,W3,W1.pdisc());
}
| 2,539 |
3,001 | import os
import shutil
from datetime import date
from pathlib import Path
from zipfile import ZipFile
import pandas as pd
import requests
from tqdm import tqdm
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_PATH = os.path.join(BASE_DIR, 'data')
KEYS = ('candidates', 'parties', 'committees')
YEARS = range(2010, 2017, 2)
class Donation:
"""Context manager to download, read data from a given year and cleanup"""
URL = 'http://agencia.tse.jus.br/estatistica/sead/odsele/prestacao_contas'
ZIPNAMES = {
2010: 'prestacao_contas_2010.zip',
2012: 'prestacao_final_2012.zip',
2014: 'prestacao_final_2014.zip',
2016: 'prestacao_contas_final_2016.zip',
}
FILENAMES = {
2012: (
'receitas_candidatos_2012_brasil.txt',
'receitas_partidos_2012_brasil.txt',
'receitas_comites_2012_brasil.txt'
),
2014: (
'receitas_candidatos_2014_brasil.txt',
'receitas_partidos_2014_brasil.txt',
'receitas_comites_2014_brasil.txt'
),
2016: (
'receitas_candidatos_prestacao_contas_final_2016_brasil.txt',
'receitas_partidos_prestacao_contas_final_2016_brasil.txt',
None
)
}
NORMALIZE_COLUMNS = {
'candidates': {
'Descricao da receita': 'Descrição da receita',
'Especie recurso': 'Espécie recurso',
'Numero candidato': 'Número candidato',
'Numero do documento': 'Número do documento',
'Numero Recibo Eleitoral': 'Número Recibo Eleitoral',
'Sigla Partido': 'Sigla Partido'
},
'parties': {
'Sigla Partido': 'Sigla Partido',
'Número recibo eleitoral': 'Número Recibo Eleitoral'
},
'committees': {
'Sigla Partido': 'Sigla Partido',
'Tipo comite': 'Tipo Comite',
'Número recibo eleitoral': 'Número Recibo Eleitoral'
}
}
TRANSLATIONS = {
'Cargo': 'post',
'CNPJ Prestador Conta': 'accountable_company_id',
'Cod setor econômico do doador': 'donor_economic_setor_id',
'Cód. Eleição': 'election_id',
'CPF do candidato': 'candidate_cpf',
'CPF do vice/suplente': 'substitute_cpf',
'CPF/CNPJ do doador': 'donor_cnpj_or_cpf',
'CPF/CNPJ do doador originário':
'original_donor_cnpj_or_cpf',
'Data da receita': 'revenue_date',
'Data e hora': 'date_and_time',
'Desc. Eleição': 'election_description',
'Descrição da receita': 'revenue_description',
'Entrega em conjunto?': 'batch',
'Espécie recurso': 'type_of_revenue',
'Fonte recurso': 'source_of_revenue',
'Município': 'city',
'Nome candidato': 'candidate_name',
'Nome da UE': 'electoral_unit_name',
'Nome do doador': 'donor_name',
'Nome do doador (Receita Federal)':
'donor_name_for_federal_revenue',
'Nome do doador originário': 'original_donor_name',
'Nome do doador originário (Receita Federal)':
'original_donor_name_for_federal_revenue',
'Número candidato': 'candidate_number',
'Número candidato doador': 'donor_candidate_number',
'Número do documento': 'document_number',
'Número partido doador': 'donor_party_number',
'Número Recibo Eleitoral': 'electoral_receipt_number',
'Número UE': 'electoral_unit_number',
'Sequencial Candidato': 'candidate_sequence',
'Sequencial prestador conta': 'accountable_sequence',
'Sequencial comite': 'committee_sequence',
'Sequencial Diretorio': 'party_board_sequence',
'Setor econômico do doador': 'donor_economic_sector',
'Setor econômico do doador originário':
'original_donor_economic_sector',
'Sigla da UE': 'electoral_unit_abbreviation',
'Sigla Partido': 'party_acronym',
'Sigla UE doador': 'donor_electoral_unit_abbreviation',
'Tipo de documento': 'document_type',
'Tipo diretorio': 'party_board_type',
'Tipo doador originário': 'original_donor_type',
'Tipo partido': 'party_type',
'Tipo receita': 'revenue_type',
'Tipo comite': 'committee_type',
'UF': 'state',
'Valor receita': 'revenue_value'
}
def __init__(self, year):
self.year = year
self.zip_file = self.ZIPNAMES.get(year)
self.url = '{}/{}'.format(self.URL, self.zip_file)
self.directory, _ = os.path.splitext(self.zip_file)
self.path = Path(self.directory)
def _download(self):
"""Saves file from `url` into local `path` showing a progress bar"""
print('Downloading {}…'.format(self.url))
request = requests.get(self.url, stream=True)
total = int(request.headers.get('content-length', 0))
with open(self.zip_file, 'wb') as file_handler:
block_size = 2 ** 15 # ~ 32kB
kwargs = dict(total=total, unit='B', unit_scale=True)
with tqdm(**kwargs) as progress_bar:
for data in request.iter_content(block_size):
file_handler.write(data)
progress_bar.update(block_size)
def _unzip(self):
print('Uncompressing {}…'.format(self.zip_file))
with ZipFile(self.zip_file, 'r') as zip_handler:
zip_handler.extractall(self.directory)
def _read_csv(self, path, chunksize=None):
"""Wrapper to read CSV with default args and an optional `chunksize`"""
kwargs = dict(low_memory=False, encoding="ISO-8859-1", sep=';')
if chunksize:
kwargs['chunksize'] = 10000
data = pd.read_csv(path, **kwargs)
return pd.concat([chunk for chunk in data]) if chunksize else data
def _data_by_pattern(self, pattern):
"""
Given a glob pattern, loads all files matching this pattern, and then
concats them all in a single data frame
"""
data = [self._read_csv(name) for name in self.path.glob(pattern)]
return pd.concat(data)
def _data(self):
"""
Returns a dictionary with data frames for candidates, parties and
committees
"""
files = self.FILENAMES.get(self.year)
if not files: # it's 2010, a different file architecture
return {
'candidates': self._data_by_pattern('**/ReceitasCandidatos*'),
'parties': self._data_by_pattern('**/ReceitasPartidos*'),
'committees': self._data_by_pattern('**/ReceitasComites*')
}
paths = (
os.path.join(self.directory, filename)
for filename in files
if filename
)
return {
key: self._read_csv(path, chunksize=10000)
for key, path in zip(KEYS, paths)
if os.path.exists(path)
}
@property
def data(self):
"""Takes self._data, clean, normalizes and translate it"""
data = self._data()
for key in KEYS:
normalize_columns = self.NORMALIZE_COLUMNS.get(key)
if key in data:
# strip columns names ('foobar ' -> 'foobar')
names = data[key].columns.values
cleaned_columns = {name: name.strip() for name in names}
data[key].rename(columns=cleaned_columns, inplace=True)
# normalize & translate
data[key].rename(columns=normalize_columns, inplace=True)
data[key].rename(columns=self.TRANSLATIONS, inplace=True)
return data
def __enter__(self):
self._download()
self._unzip()
return self
def __exit__(self, exc_type, exc_value, traceback):
print('Cleaning up source files from {}…'.format(self.year))
os.remove(self.zip_file)
shutil.rmtree(self.directory)
def save(key, data):
"""Given a key and a data frame, saves it compressed in LZMA"""
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
prefix = date.today().strftime('%Y-%m-%d')
filename = '{}-donations-{}.xz'.format(prefix, key)
print('Saving {}…'.format(filename))
data.to_csv(os.path.join(DATA_PATH, filename), compression='xz')
def fetch_data_from(year):
with Donation(year) as donation:
return donation.data
if __name__ == '__main__':
by_year = tuple(fetch_data_from(year) for year in YEARS)
for key in KEYS:
data = pd.concat([
dataframes.get(key) for dataframes in by_year
if isinstance(dataframes.get(key), pd.DataFrame)
])
save(key, data)
| 4,168 |
10,225 | <reponame>sixdouglas/quarkus
package io.quarkus.neo4j.deployment;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.time.Duration;
/**
* This implements the protocol version negotiation of bolt. Testing to see if in address will respond to this is a
* quick way to find out if it's a running bolt server.
* <p>
* This class first appeared in https://github.com/michael-simons/junit-jupiter-causal-cluster-testcontainer-extension
* by <NAME> and <NAME>
*/
final class BoltHandshaker {
private static final int magicToken = <PASSWORD>;
// Versions message that cannot be matched because it is all zeros.
private static final byte[] versionsMessage = {
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00,
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00,
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00,
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00
};
private final String address;
private final int port;
BoltHandshaker(String address, int port) {
this.address = address;
this.port = port;
}
private boolean doBoltHandshake(String address, int port, int timeoutMillis) {
try (Socket socket = new Socket()) {
// Set the socket timeout for blocking operations
socket.setSoTimeout(timeoutMillis);
// Connects this socket to the server (also with the specified timeout value).
socket.connect(new InetSocketAddress(address, port), timeoutMillis);
DataOutputStream dOut = new DataOutputStream(socket.getOutputStream());
DataInputStream dIn = new DataInputStream(socket.getInputStream());
// Send magic token (0x<PASSWORD>)
dOut.writeInt(magicToken);
dOut.flush();
// Send 4 supported versions
// Except we don't support any versions and communicate that by sending all zeros
dOut.write(versionsMessage);
dOut.flush();
// Receive agreed version
// It should be 0 because there are no possible versions we can agree on
int response = dIn.readInt();
assert response == 0;
// Because we cannot agree on a version the server should close its side of the connection
// resulting in EOF (-1) on all subsequent reads.
return dIn.read() == -1;
} catch (IOException exception) {
// Return false if handshake fails
return false;
}
}
boolean isBoltPortReachable(Duration timeout) {
int timeoutMillis = Math.toIntExact(timeout.toMillis());
return doBoltHandshake(address, port, timeoutMillis);
}
}
| 1,106 |
16,461 | /**
* Copyright (c) 2015-present, Horcrux.
* All rights reserved.
*
* This source code is licensed under the MIT-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#import <Foundation/Foundation.h>
#import "ABI43_0_0RNSVGUIKit.h"
#import "ABI43_0_0RNSVGContainer.h"
#import "ABI43_0_0RNSVGCGFCRule.h"
#import "ABI43_0_0RNSVGSvgView.h"
#import "ABI43_0_0RNSVGPath.h"
#import "ABI43_0_0RNSVGGlyphContext.h"
@interface ABI43_0_0RNSVGGroup : ABI43_0_0RNSVGPath <ABI43_0_0RNSVGContainer>
@property (nonatomic, strong) NSDictionary *font;
- (void)renderPathTo:(CGContextRef)context rect:(CGRect)rect;
- (void)renderGroupTo:(CGContextRef)context rect:(CGRect)rect;
- (ABI43_0_0RNSVGGlyphContext *)getGlyphContext;
- (void)pushGlyphContext;
- (void)popGlyphContext;
@end
| 340 |
619 | <reponame>moredu/upm
/*
* Author: <NAME> <<EMAIL>>
* Copyright (c) 2014-2017 Intel Corporation.
*
* This program and the accompanying materials are made available under the
* terms of the The MIT License which is available at
* https://opensource.org/licenses/MIT.
*
* SPDX-License-Identifier: MIT
*/
#include <iostream>
#include <signal.h>
#include "enc03r.hpp"
#include "upm_utilities.h"
using namespace std;
bool shouldRun = true;
// analog voltage, usually 3.3 or 5.0
#define CALIBRATION_SAMPLES 1000
void
sig_handler(int signo)
{
if (signo == SIGINT)
shouldRun = false;
}
int
main()
{
signal(SIGINT, sig_handler);
//! [Interesting]
// Instantiate a ENC03R on analog pin A0
upm::ENC03R gyro(0);
// The first thing we need to do is calibrate the sensor.
cout << "Please place the sensor in a stable location, and do not" << endl;
cout << "move it while calibration takes place." << endl;
cout << "This may take a couple of minutes." << endl;
gyro.calibrate(CALIBRATION_SAMPLES);
cout << "Calibration complete. Reference value: " << gyro.calibrationValue() << endl;
// Read the input and print both the raw value and the angular velocity,
// waiting 0.1 seconds between readings
while (shouldRun) {
gyro.update();
cout << "Angular velocity: " << gyro.angularVelocity() << " deg/s" << endl;
upm_delay_us(100000);
}
//! [Interesting]
cout << "Exiting" << endl;
return 0;
}
| 554 |
752 | <gh_stars>100-1000
# Copyright 2018-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import brew
import logging
import numpy as np
logging.basicConfig()
log = logging.getLogger("av_model")
log.setLevel(logging.INFO)
from models.builder.video_model \
import VideoModelBuilder
from models.builder.audio_model \
import AudioModelBuilder
# For more depths, add the block config here
BLOCK_CONFIG = {
18: (2, 2, 2, 2),
26: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
68: (3, 4, 23, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
SHALLOW_FILTER_CONFIG = [
[64, 64],
[128, 128],
[256, 256],
[512, 512]
]
DEEP_FILTER_CONFIG = [
[256, 64],
[512, 128],
[1024, 256],
[2048, 512]
]
model_blocktype = {
'r2plus1d': '2.5d',
'r3d': '3d',
'ir-csn': '3d-sep',
'ip-csn': '0.3d'
}
def create_model(
model,
model_name,
model_depth,
num_labels,
num_channels,
crop_size,
clip_length,
data,
is_test,
use_full_ft=True,
acoustic_data="logmels",
channel_multiplier=1.0,
bottleneck_multiplier=1.0,
use_dropout=False,
conv1_temporal_stride=1,
conv1_temporal_kernel=3,
use_convolutional_pred=False,
use_pool1=False,
audio_input_3d=False,
g_blend=False,
):
if model_name[0:9] == 'av_resnet':
model_type = model_name[10:]
block_type = model_blocktype[model_type]
if model_depth <= 18 or model_depth == 34:
transformation_type = 'simple_block'
else:
transformation_type = 'bottleneck'
if crop_size == 112 or crop_size == 128:
assert use_convolutional_pred or crop_size == 112
final_spatial_kernel = 7
elif crop_size == 224 or crop_size == 256:
assert use_convolutional_pred or crop_size == 224
if use_pool1:
final_spatial_kernel = 7
else:
final_spatial_kernel = 14
else:
print('unknown crop size')
assert 0
last_out = create_av_resnet(
model=model,
data=data,
num_input_channels=num_channels,
num_labels=num_labels,
acoustic_data=acoustic_data,
is_test=is_test,
use_full_ft=use_full_ft,
no_bias=True,
final_spatial_kernel=final_spatial_kernel,
final_temporal_kernel=int(clip_length / 8 / conv1_temporal_stride),
model_depth=model_depth,
block_type=block_type,
transformation_type=transformation_type,
bottleneck_multiplier=bottleneck_multiplier,
channel_multiplier=channel_multiplier,
conv1_temporal_stride=conv1_temporal_stride,
conv1_temporal_kernel=conv1_temporal_kernel,
clip_length=clip_length,
use_convolutional_pred=use_convolutional_pred,
use_pool1=use_pool1,
audio_input_3d=audio_input_3d,
g_blend=g_blend,
)
elif model_name == "a_resnet":
if model_depth <= 18 or model_depth == 34:
transformation_type = 'simple_block'
else:
transformation_type = 'bottleneck'
last_out = create_acoustic_resnet(
model=model,
data=data,
num_labels=num_labels,
is_test=is_test,
no_bias=True,
model_depth=model_depth,
transformation_type=transformation_type,
audio_input_3d=audio_input_3d,
)
else:
log.info('Unknown model name...')
return last_out
# 3d or (2+1)d audio-visual resnets
# visual input: 3 x t*8 x 112(224) x 112(224)
# audio input: 1 x 100 x 40 (channel x time x frequency)
def create_av_resnet(
model,
data,
num_input_channels,
num_labels,
acoustic_data="logmels",
label=None,
is_test=False,
use_full_ft=True,
no_bias=0,
final_spatial_kernel=7,
final_temporal_kernel=1,
model_depth=18,
block_type='3d',
transformation_type='simple_block',
bottleneck_multiplier=1.0,
channel_multiplier=1.0,
use_dropout=False,
conv1_temporal_stride=1,
conv1_temporal_kernel=3,
spatial_bn_mom=0.9,
clip_length=8,
use_convolutional_pred=False,
use_pool1=False,
audio_input_3d=False,
g_blend=False,
):
# sanity checking of model params
assert conv1_temporal_kernel == 3 or conv1_temporal_kernel == 5
# conv1 + maxpool for visual model
if block_type != '2.5d' and block_type != '2.5d-sep':
model.ConvNd(
data,
'v_conv1',
num_input_channels,
64,
[conv1_temporal_kernel, 7, 7],
weight_init=("MSRAFill", {}),
strides=[conv1_temporal_stride, 2, 2],
pads=[1 if conv1_temporal_kernel == 3 else 2, 3, 3] * 2,
no_bias=no_bias
)
else:
model.ConvNd(
data,
'v_conv1_middle',
num_input_channels,
45,
[1, 7, 7],
weight_init=("MSRAFill", {}),
strides=[1, 2, 2],
pads=[0, 3, 3] * 2,
no_bias=no_bias
)
model.SpatialBN(
'v_conv1_middle',
'v_conv1_middle_spatbn_relu',
45,
epsilon=1e-3,
momentum=spatial_bn_mom,
is_test=is_test
)
model.Relu('v_conv1_middle_spatbn_relu', 'v_conv1_middle_spatbn_relu')
model.ConvNd(
'v_conv1_middle_spatbn_relu',
'v_conv1',
45,
64,
[conv1_temporal_kernel, 1, 1],
weight_init=("MSRAFill", {}),
strides=[conv1_temporal_stride, 1, 1],
pads=[1 if conv1_temporal_kernel == 3 else 2, 0, 0] * 2,
no_bias=no_bias
)
model.SpatialBN(
'v_conv1',
'v_conv1_spatbn_relu',
64,
epsilon=1e-3,
momentum=spatial_bn_mom,
is_test=is_test
)
v_conv1 = model.Relu('v_conv1_spatbn_relu', 'v_conv1_spatbn_relu')
if use_pool1:
v_conv1 = model.MaxPool(
'v_conv1_spatbn_relu',
'pool1',
kernels=[1, 3, 3],
strides=[1, 2, 2],
pads=[0, 1, 1] * 2,
)
# conv1 equivalent of audio model
# it is approximated by a bunch of 3x3 kernels
if audio_input_3d:
acoustic_data_swap = model.NCHW2NHWC(
acoustic_data, acoustic_data + "_NHWC")
acoustic_data_greyscale = model.ReduceBackMean(
acoustic_data_swap,
acoustic_data_swap + '_c_pool',
num_reduce_dim=1)
acoustic_data = acoustic_data_greyscale
a_conv1 = model.Conv(acoustic_data, 'a_conv1', 1, 16, kernel=3, pad=1)
a_builder = AudioModelBuilder(
model, a_conv1, no_bias=no_bias,
is_test=is_test, spatial_bn_mom=spatial_bn_mom, prefix='a_')
a_builder.add_simple_block(16, 32, down_sampling=True)
a_builder.add_simple_block(32, 32)
a_builder.add_simple_block(32, 32)
(n1, n2, n3, n4) = BLOCK_CONFIG[model_depth]
# Residual blocks...
v_builder = VideoModelBuilder(
model, v_conv1, no_bias=no_bias,
is_test=is_test, spatial_bn_mom=spatial_bn_mom, prefix='v_')
if transformation_type == 'simple_block':
v_transformation = v_builder.add_simple_block
a_transformation = a_builder.add_simple_block
elif transformation_type == 'bottleneck':
v_transformation = v_builder.add_bottleneck
a_transformation = a_builder.add_bottleneck
else:
print('Unknown transformation type...')
if model_depth <= 34:
filter_config = SHALLOW_FILTER_CONFIG
else:
filter_config = DEEP_FILTER_CONFIG
filter_config = np.multiply(
filter_config, channel_multiplier).astype(np.int)
# conv_2x
v_transformation(
64, filter_config[0][0], int(bottleneck_multiplier * filter_config[0][1]),
block_type=block_type)
a_transformation(32, filter_config[0][0], filter_config[0][1],
down_sampling=True)
for _ in range(n1 - 1):
v_transformation(
filter_config[0][0], filter_config[0][0],
int(bottleneck_multiplier * filter_config[0][1]),
block_type=block_type)
a_transformation(
filter_config[0][0], filter_config[0][0], filter_config[0][1])
# conv_3x
v_transformation(
filter_config[0][0], filter_config[1][0],
int(bottleneck_multiplier * filter_config[1][1]),
down_sampling=True, block_type=block_type)
a_transformation(
filter_config[0][0], filter_config[1][0], filter_config[1][1],
down_sampling=True)
for _ in range(n2 - 1):
v_transformation(
filter_config[1][0], filter_config[1][0],
int(bottleneck_multiplier * filter_config[1][1]),
block_type=block_type)
a_transformation(
filter_config[1][0], filter_config[1][0], filter_config[1][1])
# conv_4x
v_transformation(
filter_config[1][0], filter_config[2][0],
int(bottleneck_multiplier * filter_config[2][1]),
down_sampling=True, block_type=block_type)
a_transformation(
filter_config[1][0], filter_config[2][0], filter_config[2][1],
down_sampling=True)
for _ in range(n3 - 1):
v_transformation(
filter_config[2][0], filter_config[2][0],
int(bottleneck_multiplier * filter_config[2][1]),
block_type=block_type)
a_transformation(
filter_config[2][0], filter_config[2][0], filter_config[2][1])
# conv_5x
if clip_length < 8:
v_transformation(
filter_config[2][0], filter_config[3][0],
int(bottleneck_multiplier * filter_config[3][1]),
down_sampling=True, down_sampling_temporal=False,
block_type=block_type)
else:
v_transformation(
filter_config[2][0], filter_config[3][0],
int(bottleneck_multiplier * filter_config[3][1]),
down_sampling=True, block_type=block_type)
a_transformation(
filter_config[2][0], filter_config[3][0], filter_config[3][1],
down_sampling=True)
for _ in range(n4 - 1):
v_transformation(
filter_config[3][0], filter_config[3][0],
int(bottleneck_multiplier * filter_config[3][1]),
block_type=block_type)
a_transformation(
filter_config[3][0], filter_config[3][0], filter_config[3][1])
# Final layers
# final pool for visual model
v_builder.prev_blob = model.AveragePool(
v_builder.prev_blob,
'v_final_avg',
kernels=[
final_temporal_kernel,
final_spatial_kernel,
final_spatial_kernel
],
strides=[1, 1, 1],
)
# final pool for audio model
a_builder.prev_blob = model.MaxPool(
a_builder.prev_blob,
'a_final_avg',
kernels=[4, 2],
stride=1
)
last_a_layer = a_builder.prev_blob
last_v_layer = v_builder.prev_blob
if use_convolutional_pred:
assert is_test
a_last_3D = model.ExpandDims(
last_a_layer, last_a_layer + '_3D', dims=[4]
)
a_last_tile_1 = model.Tile(
a_last_3D,
a_last_3D + '_tiled_1',
tiles=2,
axis=3
)
a_last_tile_2 = model.Tile(
a_last_tile_1,
a_last_3D + '_tiled_2',
tiles=2,
axis=4
)
av_concat = model.Concat(
[last_v_layer, a_last_tile_2],
'av_concat',
axis=1
)
if use_dropout:
dropout = brew.dropout(
model, av_concat, 'dropout', is_test=is_test
)
else:
dropout = av_concat
if not use_full_ft:
dropout = model.StopGradient(dropout, dropout)
dim = 2 * filter_config[3][0]
fc_dim = int(dim / 2)
fc1 = model.ConvNd(
dropout,
'av_fc1',
dim,
fc_dim,
[1, 1, 1],
weight_init=("MSRAFill", {}),
strides=[1, 1, 1],
pads=[0, 0, 0] * 2,
no_bias=False
)
relu1 = brew.relu(model, fc1, fc1)
fc2 = model.ConvNd(
relu1,
'av_fc2',
fc_dim,
fc_dim,
[1, 1, 1],
weight_init=("MSRAFill", {}),
strides=[1, 1, 1],
pads=[0, 0, 0] * 2,
no_bias=False
)
relu2 = brew.relu(model, fc2, fc2)
last_out = model.ConvNd(
relu2,
'last_out_L{}'.format(num_labels),
fc_dim,
num_labels,
[1, 1, 1],
weight_init=("MSRAFill", {}),
strides=[1, 1, 1],
pads=[0, 0, 0] * 2,
no_bias=False
)
return last_out
else:
# reduce to 4D tensor
v_builder.prev_blob = model.Squeeze(
v_builder.prev_blob, 'v_final_avg_squeezed', dims=[4])
last_v_layer = v_builder.prev_blob
av_concat = model.Concat(
[last_v_layer, last_a_layer],
'av_concat',
axis=1
)
if use_dropout:
dropout = brew.dropout(
model, av_concat, 'dropout', is_test=is_test
)
else:
dropout = av_concat
dim = 2 * filter_config[3][0]
fc_dim = int(dim / 2)
fc1 = brew.fc(model, dropout, 'av_fc1', dim, fc_dim)
relu1 = brew.relu(model, fc1, fc1)
fc2 = brew.fc(model, relu1, 'av_fc2', fc_dim, fc_dim)
relu2 = brew.relu(model, fc2, fc2)
last_out = brew.fc(
model, relu2, 'last_out_L{}'.format(num_labels), fc_dim, num_labels)
if g_blend:
a_last_out = brew.fc(
model,
last_a_layer,
'a_last_out_L{}'.format(num_labels),
filter_config[3][0],
num_labels,
)
v_last_out = brew.fc(
model,
last_v_layer,
'v_last_out_L{}'.format(num_labels),
filter_config[3][0],
num_labels,
)
return [a_last_out, v_last_out, last_out]
return last_out
# audio input: 1 x 100 x 40 (channel x time x frequency)
def create_acoustic_resnet(
model,
data,
num_labels,
label=None,
is_test=False,
no_bias=0,
final_spatial_kernel=2,
final_temporal_kernel=4,
model_depth=50,
transformation_type='simple_block',
spatial_bn_mom=0.9,
audio_input_3d=False,
):
if audio_input_3d:
if audio_input_3d:
data_swap = model.NCHW2NHWC(
data, data + "_NHWC")
data_greyscale = model.ReduceBackMean(
data_swap,
'logmels',
num_reduce_dim=1)
data = data_greyscale
conv1 = model.Conv(data, 'conv1', 1, 16, kernel=3, pad=1)
builder = AudioModelBuilder(
model, conv1, no_bias=no_bias,
is_test=is_test, spatial_bn_mom=spatial_bn_mom)
builder.add_simple_block(16, 32, down_sampling=True)
builder.add_simple_block(32, 32)
builder.add_simple_block(32, 32)
(n1, n2, n3, n4) = BLOCK_CONFIG[model_depth]
if transformation_type == 'simple_block':
transformation = builder.add_simple_block
elif transformation_type == 'bottleneck':
transformation = builder.add_bottleneck
else:
print('Unknown transformation type...')
if model_depth <= 34:
filter_config = SHALLOW_FILTER_CONFIG
else:
filter_config = DEEP_FILTER_CONFIG
# conv_2x
transformation(32, filter_config[0][0], filter_config[0][1],
down_sampling=True)
for _ in range(n1 - 1):
transformation(
filter_config[0][0], filter_config[0][0], filter_config[0][1])
# conv_3x
transformation(
filter_config[0][0], filter_config[1][0], filter_config[1][1],
down_sampling=True)
for _ in range(n2 - 1):
transformation(
filter_config[1][0], filter_config[1][0], filter_config[1][1])
# conv_4x
transformation(
filter_config[1][0], filter_config[2][0], filter_config[2][1],
down_sampling=True)
for _ in range(n3 - 1):
transformation(
filter_config[2][0], filter_config[2][0], filter_config[2][1])
# conv_5x
transformation(
filter_config[2][0], filter_config[3][0], filter_config[3][1],
down_sampling=True)
for _ in range(n4 - 1):
transformation(
filter_config[3][0], filter_config[3][0], filter_config[3][1])
# Final layers
final_avg = model.MaxPool(
builder.prev_blob,
'final_avg',
kernels=[final_temporal_kernel, final_spatial_kernel],
stride=1
)
# final_avg = builder.prev_blob
last_out = brew.fc(
model,
final_avg,
'last_out_L{}'.format(num_labels),
filter_config[3][0],
num_labels
)
return last_out
| 9,332 |
1,350 | <filename>sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaults.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.dataprotection.models;
import com.azure.core.http.rest.PagedIterable;
import com.azure.core.http.rest.Response;
import com.azure.core.util.Context;
/** Resource collection API of BackupVaults. */
public interface BackupVaults {
/**
* Returns resource collection belonging to a subscription.
*
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return backupVaultResourceList.
*/
PagedIterable<BackupVaultResource> list();
/**
* Returns resource collection belonging to a subscription.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return backupVaultResourceList.
*/
PagedIterable<BackupVaultResource> list(Context context);
/**
* Returns resource collection belonging to a resource group.
*
* @param resourceGroupName The name of the resource group where the backup vault is present.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return backupVaultResourceList.
*/
PagedIterable<BackupVaultResource> listByResourceGroup(String resourceGroupName);
/**
* Returns resource collection belonging to a resource group.
*
* @param resourceGroupName The name of the resource group where the backup vault is present.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return backupVaultResourceList.
*/
PagedIterable<BackupVaultResource> listByResourceGroup(String resourceGroupName, Context context);
/**
* Returns a resource belonging to a resource group.
*
* @param resourceGroupName The name of the resource group where the backup vault is present.
* @param vaultName The name of the backup vault.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return backupVault Resource.
*/
BackupVaultResource getByResourceGroup(String resourceGroupName, String vaultName);
/**
* Returns a resource belonging to a resource group.
*
* @param resourceGroupName The name of the resource group where the backup vault is present.
* @param vaultName The name of the backup vault.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return backupVault Resource.
*/
Response<BackupVaultResource> getByResourceGroupWithResponse(
String resourceGroupName, String vaultName, Context context);
/**
* Deletes a BackupVault resource from the resource group.
*
* @param resourceGroupName The name of the resource group where the backup vault is present.
* @param vaultName The name of the backup vault.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
void deleteByResourceGroup(String resourceGroupName, String vaultName);
/**
* Deletes a BackupVault resource from the resource group.
*
* @param resourceGroupName The name of the resource group where the backup vault is present.
* @param vaultName The name of the backup vault.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
Response<Void> deleteWithResponse(String resourceGroupName, String vaultName, Context context);
/**
* API to check for resource name availability.
*
* @param resourceGroupName The name of the resource group where the backup vault is present.
* @param location The location in which uniqueness will be verified.
* @param parameters Check name availability request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return checkNameAvailabilityResult.
*/
CheckNameAvailabilityResult checkNameAvailability(
String resourceGroupName, String location, CheckNameAvailabilityRequest parameters);
/**
* API to check for resource name availability.
*
* @param resourceGroupName The name of the resource group where the backup vault is present.
* @param location The location in which uniqueness will be verified.
* @param parameters Check name availability request.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return checkNameAvailabilityResult.
*/
Response<CheckNameAvailabilityResult> checkNameAvailabilityWithResponse(
String resourceGroupName, String location, CheckNameAvailabilityRequest parameters, Context context);
/**
* Returns a resource belonging to a resource group.
*
* @param id the resource ID.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return backupVault Resource.
*/
BackupVaultResource getById(String id);
/**
* Returns a resource belonging to a resource group.
*
* @param id the resource ID.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return backupVault Resource.
*/
Response<BackupVaultResource> getByIdWithResponse(String id, Context context);
/**
* Deletes a BackupVault resource from the resource group.
*
* @param id the resource ID.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
void deleteById(String id);
/**
* Deletes a BackupVault resource from the resource group.
*
* @param id the resource ID.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
Response<Void> deleteByIdWithResponse(String id, Context context);
/**
* Begins definition for a new BackupVaultResource resource.
*
* @param name resource name.
* @return the first stage of the new BackupVaultResource definition.
*/
BackupVaultResource.DefinitionStages.Blank define(String name);
}
| 2,744 |
412 | <gh_stars>100-1000
public class Test
{
public void testSuccess()
{
StringBuilder sb = new StringBuilder("abc");
Integer i = new Integer(3);
sb.append(i);
assert sb.toString().equals("abc3");
sb.append((Object)"xyz");
assert sb.toString().equals("abc3xyz");
sb.append((Object)null);
assert sb.toString().equals("abc3xyznull");
}
public void testNoPropagation(Object obj)
{
StringBuilder sb = new StringBuilder();
assert obj.toString().equals("");
}
}
| 197 |
14,668 | <reponame>zealoussnow/chromium
// Copyright (c) 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/extensions/settings_api_bubble_helpers.h"
#include <utility>
#include "build/build_config.h"
#include "build/chromeos_buildflags.h"
#include "chrome/browser/extensions/extension_web_ui.h"
#include "chrome/browser/extensions/settings_api_bubble_delegate.h"
#include "chrome/browser/extensions/settings_api_helpers.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/ui/browser_dialogs.h"
#include "chrome/browser/ui/browser_finder.h"
#include "chrome/browser/ui/browser_window.h"
#include "chrome/browser/ui/extensions/extension_message_bubble_bridge.h"
#include "chrome/browser/ui/extensions/extension_settings_overridden_dialog.h"
#include "chrome/browser/ui/extensions/extensions_container.h"
#include "chrome/browser/ui/extensions/settings_overridden_params_providers.h"
#include "chrome/browser/ui/tabs/tab_strip_model.h"
#include "chrome/browser/ui/toolbar/toolbar_actions_model.h"
#include "chrome/common/extensions/manifest_handlers/settings_overrides_handler.h"
#include "chrome/common/url_constants.h"
#include "components/prefs/pref_registry.h"
#include "components/prefs/pref_registry_simple.h"
#include "content/public/browser/browser_url_handler.h"
#include "content/public/browser/navigation_entry.h"
#include "extensions/browser/extension_prefs.h"
#include "extensions/common/constants.h"
namespace extensions {
namespace {
// Whether the NTP post-install UI is enabled. By default, this is limited to
// Windows, Mac, and ChromeOS, but can be overridden for testing.
#if defined(OS_WIN) || defined(OS_MAC) || BUILDFLAG(IS_CHROMEOS_ASH)
bool g_ntp_post_install_ui_enabled = true;
#else
bool g_ntp_post_install_ui_enabled = false;
#endif
// Whether to acknowledge existing extensions overriding the NTP for the active
// profile. Active on MacOS to rollout the NTP bubble without prompting for
// previously-installed extensions.
// TODO(devlin): This has been rolled out on Mac for awhile; we can flip this to
// false (and keep the logic around for when/if we decide to expand the warning
// treatment to Linux).
bool g_acknowledge_existing_ntp_extensions =
#if defined(OS_MAC)
true;
#else
false;
#endif
// The name of the preference indicating whether existing NTP extensions have
// been automatically acknowledged.
const char kDidAcknowledgeExistingNtpExtensions[] =
"ack_existing_ntp_extensions";
#if defined(OS_WIN) || defined(OS_MAC)
void ShowSettingsApiBubble(SettingsApiOverrideType type,
Browser* browser) {
ToolbarActionsModel* model = ToolbarActionsModel::Get(browser->profile());
if (model->has_active_bubble())
return;
std::unique_ptr<ExtensionMessageBubbleController> settings_api_bubble(
new ExtensionMessageBubbleController(
new SettingsApiBubbleDelegate(browser->profile(), type), browser));
if (!settings_api_bubble->ShouldShow())
return;
settings_api_bubble->SetIsActiveBubble();
std::unique_ptr<ToolbarActionsBarBubbleDelegate> bridge(
new ExtensionMessageBubbleBridge(std::move(settings_api_bubble)));
browser->window()->GetExtensionsContainer()->ShowToolbarActionBubbleAsync(
std::move(bridge));
}
#endif
} // namespace
// Whether a given ntp-overriding extension has been acknowledged by the user.
// The terse key value is because the pref has migrated between code layers.
const char kNtpOverridingExtensionAcknowledged[] = "ack_ntp_bubble";
void SetNtpPostInstallUiEnabledForTesting(bool enabled) {
g_ntp_post_install_ui_enabled = enabled;
}
base::AutoReset<bool> SetAcknowledgeExistingNtpExtensionsForTesting(
bool should_acknowledge) {
return base::AutoReset<bool>(&g_acknowledge_existing_ntp_extensions,
should_acknowledge);
}
void AcknowledgePreExistingNtpExtensions(Profile* profile) {
DCHECK(g_acknowledge_existing_ntp_extensions);
ExtensionRegistry* registry = ExtensionRegistry::Get(profile);
PrefService* profile_prefs = profile->GetPrefs();
// Only acknowledge existing extensions once per profile.
if (profile_prefs->GetBoolean(kDidAcknowledgeExistingNtpExtensions))
return;
profile_prefs->SetBoolean(kDidAcknowledgeExistingNtpExtensions, true);
ExtensionPrefs* prefs = ExtensionPrefs::Get(profile);
for (const auto& extension : registry->enabled_extensions()) {
const URLOverrides::URLOverrideMap& overrides =
URLOverrides::GetChromeURLOverrides(extension.get());
if (overrides.find(chrome::kChromeUINewTabHost) != overrides.end()) {
prefs->UpdateExtensionPref(extension->id(),
kNtpOverridingExtensionAcknowledged,
std::make_unique<base::Value>(true));
}
}
}
void RegisterSettingsOverriddenUiPrefs(PrefRegistrySimple* registry) {
registry->RegisterBooleanPref(kDidAcknowledgeExistingNtpExtensions, false,
PrefRegistry::NO_REGISTRATION_FLAGS);
}
void MaybeShowExtensionControlledHomeNotification(Browser* browser) {
#if defined(OS_WIN) || defined(OS_MAC)
ShowSettingsApiBubble(BUBBLE_TYPE_HOME_PAGE, browser);
#endif
}
void MaybeShowExtensionControlledSearchNotification(
content::WebContents* web_contents,
AutocompleteMatch::Type match_type) {
#if defined(OS_WIN) || defined(OS_MAC)
if (!AutocompleteMatch::IsSearchType(match_type) ||
match_type == AutocompleteMatchType::SEARCH_OTHER_ENGINE) {
return;
}
Browser* browser = chrome::FindBrowserWithWebContents(web_contents);
if (!browser)
return;
absl::optional<ExtensionSettingsOverriddenDialog::Params> params =
settings_overridden_params::GetSearchOverriddenParams(browser->profile());
if (!params)
return;
auto dialog = std::make_unique<ExtensionSettingsOverriddenDialog>(
std::move(*params), browser->profile());
if (!dialog->ShouldShow())
return;
chrome::ShowExtensionSettingsOverriddenDialog(std::move(dialog), browser);
#endif
}
void MaybeShowExtensionControlledNewTabPage(
Browser* browser, content::WebContents* web_contents) {
if (!g_ntp_post_install_ui_enabled)
return;
// Acknowledge existing extensions if necessary.
if (g_acknowledge_existing_ntp_extensions)
AcknowledgePreExistingNtpExtensions(browser->profile());
// Jump through a series of hoops to see if the web contents is pointing to
// an extension-controlled NTP.
// TODO(devlin): Some of this is redundant with the checks in the bubble/
// dialog. We should consolidate, but that'll be simpler once we only have
// one UI option. In the meantime, extra checks don't hurt.
content::NavigationEntry* entry =
web_contents->GetController().GetVisibleEntry();
if (!entry)
return;
GURL active_url = entry->GetURL();
if (!active_url.SchemeIs(extensions::kExtensionScheme))
return; // Not a URL that we care about.
// See if the current active URL matches a transformed NewTab URL.
GURL ntp_url(chrome::kChromeUINewTabURL);
content::BrowserURLHandler::GetInstance()->RewriteURLIfNecessary(
&ntp_url, web_contents->GetBrowserContext());
if (ntp_url != active_url)
return; // Not being overridden by an extension.
Profile* const profile = browser->profile();
ToolbarActionsModel* model = ToolbarActionsModel::Get(profile);
if (model->has_active_bubble())
return;
absl::optional<ExtensionSettingsOverriddenDialog::Params> params =
settings_overridden_params::GetNtpOverriddenParams(profile);
if (!params)
return;
auto dialog = std::make_unique<ExtensionSettingsOverriddenDialog>(
std::move(*params), profile);
if (!dialog->ShouldShow())
return;
chrome::ShowExtensionSettingsOverriddenDialog(std::move(dialog), browser);
}
} // namespace extensions
| 2,683 |
364 | /*
* Copyright 2014-2019 <NAME>. Distributed under the Boost
* Software License, Version 1.0. (See accompanying file
* LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*/
//[oglplus_bitfield
template <typename Bits>
class Bitfield {
public:
Bitfield(); /*< Constructs an empty (zero) bitfield. >*/
Bitfield(Bits _bit); /*<
Construct a bitfield from a single value of [^Bits].
>*/
Bitfield(Bits _bit_a, Bits _bit_b); /*<
Construct a bitfield from a pair of [^Bits].
>*/
Bitfield(const std::initializer_list<Bits>& bits); /*<
Construct a bitfield from an initializer list of [^Bits].
>*/
template <typename Iter>
Bitfield(Iter pos, Iter end); /*<
Construction from a pair of iterators through [^Bits].
>*/
friend Bitfield operator|(Bitfield bf, Bits b); /*<
Bitwise-or operator for combining [^Bits] into a bitfield.
>*/
Bitfield& operator|=(Bits b); /*<
Bitwise-or operator for combining [^Bits] into a bitfield.
>*/
bool Test(Bits b) const; /*<
These functions test if a specified bit is set.
>*/
bool Has(Bits b) const;
};
template <typename Bits>
__Bitfield<Bits> operator|(Bits b1, Bits b2);
//]
| 459 |
6,557 | <filename>packages/@react-aria/combobox/intl/lt-LT.json
{
"buttonLabel": "Rodyti pasiūlymus",
"countAnnouncement": "Yra {optionCount, plural, one {# parinktis} other {# parinktys (-ių)}}.",
"focusAnnouncement": "{isGroupChange, select, true {Įvesta grupė {groupTitle}, su {groupCount, plural, one {# parinktimi} other {# parinktimis (-ių)}}. } other {}}{optionText}{isSelected, select, true {, pasirinkta} other {}}",
"listboxLabel": "Pasiūlymai",
"selectedAnnouncement": "{optionText}, pasirinkta"
}
| 197 |
711 | package com.java110.core.context;
import java.io.Serializable;
import java.net.UnknownHostException;
import java.util.Map;
/**
* 交互日志
*
* id 业务ID,每次接口交互时的唯一编码 如订单ID 业务ID等
* logContextId 上下文ID ,每次调用 每个系统只有一个contextId 每次不一样 生成规则为uuId
* ip 当前主机IP
* port 当前应用监听端口
* url 交互地址 调用端和服务提供端 必须一致
* serviceCode 服务编码
* serviceName 服务名称
* type S 调用端 C服务提供端
* timestamp 时间戳
* status 状态 S表示成功 F表示失败
* messageHeaders 消息头部信息 目前只有调用端保存日志
* message 消息 目前只有调用端保存
* Created by wuxw on 2018/6/7.
*/
public interface TransactionLog extends Serializable {
//业务ID
public String getTransactionId();
/**
* 日志上下文ID
* @return
*/
public String getDataFlowId();
//获取主机Ip
public String getHostIp();
//获取监听端口
public String getPort();
public String getAppId();
public String getUserId();
public void setAppId(String appId);
public void setUserId(String userId);
//业务编码,如果是批量受理就取第一个
public String getServiceCode();
//业务名称 如果是批量受理就取第一个
public String getServiceName();
//时间
public String getTimestamp();
//编码 S 表示成功 F其他表示失败
/**
* 当前状态 调用的时候和接受的时候统一写成S
* 返回时如果返回失败则写为F
* @return S 表示成功 F其他表示失败
*/
public String getLogStatus();
/**
* 耗时
* @return
*/
public long getCostTime();
/**
* 接口请求消息
* @return
*/
public String getRequestMessage();
public String getResponseMessage();
/**
* 重新构建 TransactionLog 对象 主要用于服务调用方
* @return
*/
public TransactionLog reBuilder(String appId,String userId,String requestMessage,String responseMessage,String logStatus,long costTime);
/**
* 重新构建 TransactionLog 对象 主要用于服务提供方
* @param requestMessage 请求数据
* @param responseMessage 返回数据
* @param logStatus 数据交互状态
* @return
*/
public TransactionLog reBuilder(String requestMessage,String responseMessage,String logStatus,long costTime);
/**
* 转换成json模式
* @return
*/
public String toString();
}
| 1,318 |
521 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef NSCATEGORYMANAGER_H
#define NSCATEGORYMANAGER_H
#include "prio.h"
#include "prlock.h"
#include "plarena.h"
#include "nsClassHashtable.h"
#include "nsICategoryManager.h"
#define NS_CATEGORYMANAGER_CLASSNAME "Category Manager"
/* 16d222a6-1dd2-11b2-b693-f38b02c021b2 */
#define NS_CATEGORYMANAGER_CID \
{ 0x16d222a6, 0x1dd2, 0x11b2, \
{0xb6, 0x93, 0xf3, 0x8b, 0x02, 0xc0, 0x21, 0xb2} }
/**
* a "leaf-node", managed by the nsCategoryNode hashtable.
*
* we need to keep a "persistent value" (which will be written to the registry)
* and a non-persistent value (for the current runtime): these are usually
* the same, except when aPersist==PR_FALSE. The strings are permanently arena-
* allocated, and will never go away.
*/
class CategoryLeaf : public nsDepCharHashKey
{
public:
CategoryLeaf(const char* aKey)
: nsDepCharHashKey(aKey),
pValue(nsnull),
nonpValue(nsnull) { }
const char* pValue;
const char* nonpValue;
};
/**
* CategoryNode keeps a hashtable of it's entries.
* the CategoryNode itself is permanently allocated in
* the arena.
*/
class CategoryNode
{
public:
NS_METHOD GetLeaf(const char* aEntryName,
char** _retval);
NS_METHOD AddLeaf(const char* aEntryName,
const char* aValue,
PRBool aPersist,
PRBool aReplace,
char** _retval,
PLArenaPool* aArena);
NS_METHOD DeleteLeaf(const char* aEntryName,
PRBool aDontPersist);
void Clear() {
PR_Lock(mLock);
mTable.Clear();
PR_Unlock(mLock);
}
PRUint32 Count() {
PR_Lock(mLock);
PRUint32 tCount = mTable.Count();
PR_Unlock(mLock);
return tCount;
}
NS_METHOD Enumerate(nsISimpleEnumerator** _retval);
PRBool WritePersistentEntries(PRFileDesc* fd, const char* aCategoryName);
// CategoryNode is arena-allocated, with the strings
static CategoryNode* Create(PLArenaPool* aArena);
~CategoryNode();
void operator delete(void*) { }
private:
CategoryNode() : mLock(nsnull) { }
void* operator new(size_t aSize, PLArenaPool* aArena);
nsTHashtable<CategoryLeaf> mTable;
PRLock* mLock;
};
/**
* The main implementation of nsICategoryManager.
*
* This implementation is thread-safe.
*/
class nsCategoryManager
: public nsICategoryManager
{
public:
NS_DECL_ISUPPORTS
NS_DECL_NSICATEGORYMANAGER
/**
* Write the categories to the XPCOM persistent registry.
* This is to be used by nsComponentManagerImpl (and NO ONE ELSE).
*/
NS_METHOD WriteCategoryManagerToRegistry(PRFileDesc* fd);
nsCategoryManager() : mLock(nsnull) { }
private:
friend class nsCategoryManagerFactory;
static nsCategoryManager* Create();
~nsCategoryManager();
CategoryNode* get_category(const char* aName);
PLArenaPool mArena;
nsClassHashtable<nsDepCharHashKey, CategoryNode> mTable;
PRLock* mLock;
};
#endif
| 1,635 |
14,668 | <reponame>zealoussnow/chromium<filename>sql/statement.cc<gh_stars>1000+
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sql/statement.h"
#include <stddef.h>
#include <stdint.h>
#include "base/containers/span.h"
#include "base/dcheck_is_on.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/sequence_checker.h"
#include "base/strings/string_piece_forward.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
#include "third_party/sqlite/sqlite3.h"
namespace sql {
// This empty constructor initializes our reference with an empty one so that
// we don't have to null-check the ref_ to see if the statement is valid: we
// only have to check the ref's validity bit.
Statement::Statement()
: ref_(base::MakeRefCounted<Database::StatementRef>(nullptr,
nullptr,
false)) {}
Statement::Statement(scoped_refptr<Database::StatementRef> ref)
: ref_(std::move(ref)) {}
Statement::~Statement() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Free the resources associated with this statement. We assume there's only
// one statement active for a given sqlite3_stmt at any time, so this won't
// mess with anything.
Reset(true);
}
void Statement::Assign(scoped_refptr<Database::StatementRef> ref) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
Reset(true);
ref_ = std::move(ref);
}
void Statement::Clear() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
Assign(base::MakeRefCounted<Database::StatementRef>(nullptr, nullptr, false));
succeeded_ = false;
}
bool Statement::CheckValid() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Allow operations to fail silently if a statement was invalidated
// because the database was closed by an error handler.
DLOG_IF(FATAL, !ref_->was_valid())
<< "Cannot call mutating statements on an invalid statement.";
return is_valid();
}
int Statement::StepInternal() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!CheckValid())
return SQLITE_ERROR;
absl::optional<base::ScopedBlockingCall> scoped_blocking_call;
ref_->InitScopedBlockingCall(FROM_HERE, &scoped_blocking_call);
int ret = sqlite3_step(ref_->stmt());
return CheckError(ret);
}
bool Statement::Run() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << "Run() must be called exactly once";
run_called_ = true;
DCHECK(!step_called_) << "Run() must not be mixed with Step()";
#endif // DCHECK_IS_ON()
return StepInternal() == SQLITE_DONE;
}
bool Statement::Step() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << "Run() must not be mixed with Step()";
step_called_ = true;
#endif // DCHECK_IS_ON()
return StepInternal() == SQLITE_ROW;
}
void Statement::Reset(bool clear_bound_vars) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
absl::optional<base::ScopedBlockingCall> scoped_blocking_call;
ref_->InitScopedBlockingCall(FROM_HERE, &scoped_blocking_call);
if (is_valid()) {
if (clear_bound_vars)
sqlite3_clear_bindings(ref_->stmt());
// StepInternal() cannot track success because statements may be reset
// before reaching SQLITE_DONE. Don't call CheckError() because
// sqlite3_reset() returns the last step error, which StepInternal() already
// checked.
sqlite3_reset(ref_->stmt());
}
// Potentially release dirty cache pages if an autocommit statement made
// changes.
if (ref_->database())
ref_->database()->ReleaseCacheMemoryIfNeeded(false);
succeeded_ = false;
#if DCHECK_IS_ON()
run_called_ = false;
step_called_ = false;
#endif // DCHECK_IS_ON()
}
bool Statement::Succeeded() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return is_valid() && succeeded_;
}
void Statement::BindNull(int param_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " must not be called after Run()";
DCHECK(!step_called_) << __func__ << " must not be called after Step()";
#endif // DCHECK_IS_ON()
if (!is_valid())
return;
DCHECK_GE(param_index, 0);
DCHECK_LT(param_index, sqlite3_bind_parameter_count(ref_->stmt()))
<< "Invalid parameter index";
int sqlite_error_code = sqlite3_bind_null(ref_->stmt(), param_index + 1);
DCHECK_EQ(sqlite_error_code, SQLITE_OK);
}
void Statement::BindBool(int param_index, bool val) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return BindInt64(param_index, val ? 1 : 0);
}
void Statement::BindInt(int param_index, int val) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " must not be called after Run()";
DCHECK(!step_called_) << __func__ << " must not be called after Step()";
#endif // DCHECK_IS_ON()
if (!is_valid())
return;
DCHECK_GE(param_index, 0);
DCHECK_LT(param_index, sqlite3_bind_parameter_count(ref_->stmt()))
<< "Invalid parameter index";
int sqlite_error_code = sqlite3_bind_int(ref_->stmt(), param_index + 1, val);
DCHECK_EQ(sqlite_error_code, SQLITE_OK);
}
void Statement::BindInt64(int param_index, int64_t val) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " must not be called after Run()";
DCHECK(!step_called_) << __func__ << " must not be called after Step()";
#endif // DCHECK_IS_ON()
if (!is_valid())
return;
DCHECK_GE(param_index, 0);
DCHECK_LT(param_index, sqlite3_bind_parameter_count(ref_->stmt()))
<< "Invalid parameter index";
int sqlite_error_code =
sqlite3_bind_int64(ref_->stmt(), param_index + 1, val);
DCHECK_EQ(sqlite_error_code, SQLITE_OK);
}
void Statement::BindDouble(int param_index, double val) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " must not be called after Run()";
DCHECK(!step_called_) << __func__ << " must not be called after Step()";
#endif // DCHECK_IS_ON()
if (!is_valid())
return;
DCHECK_GE(param_index, 0);
DCHECK_LT(param_index, sqlite3_bind_parameter_count(ref_->stmt()))
<< "Invalid parameter index";
int sqlite_error_code =
sqlite3_bind_double(ref_->stmt(), param_index + 1, val);
DCHECK_EQ(sqlite_error_code, SQLITE_OK);
}
void Statement::BindTime(int param_index, base::Time val) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " must not be called after Run()";
DCHECK(!step_called_) << __func__ << " must not be called after Step()";
#endif // DCHECK_IS_ON()
if (!is_valid())
return;
DCHECK_GE(param_index, 0);
DCHECK_LT(param_index, sqlite3_bind_parameter_count(ref_->stmt()))
<< "Invalid parameter index";
int64_t int_value = val.ToDeltaSinceWindowsEpoch().InMicroseconds();
int sqlite_error_code =
sqlite3_bind_int64(ref_->stmt(), param_index + 1, int_value);
DCHECK_EQ(sqlite_error_code, SQLITE_OK);
}
void Statement::BindCString(int param_index, const char* val) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " must not be called after Run()";
DCHECK(!step_called_) << __func__ << " must not be called after Step()";
#endif // DCHECK_IS_ON()
DCHECK(val);
if (!is_valid())
return;
DCHECK_GE(param_index, 0);
DCHECK_LT(param_index, sqlite3_bind_parameter_count(ref_->stmt()))
<< "Invalid parameter index";
// If the string length is more than SQLITE_MAX_LENGTH (or the per-database
// SQLITE_LIMIT_LENGTH limit), sqlite3_bind_text() fails with SQLITE_TOOBIG.
//
// We're not currently handling this error. SQLITE_MAX_LENGTH is set to the
// default (1 billion bytes) in Chrome's SQLite build, so this is an unlilely
// issue.
int sqlite_error_code = sqlite3_bind_text(ref_->stmt(), param_index + 1, val,
-1, SQLITE_TRANSIENT);
DCHECK_EQ(sqlite_error_code, SQLITE_OK);
}
void Statement::BindString(int param_index, base::StringPiece value) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " must not be called after Run()";
DCHECK(!step_called_) << __func__ << " must not be called after Step()";
#endif // DCHECK_IS_ON()
if (!is_valid())
return;
DCHECK_GE(param_index, 0);
DCHECK_LT(param_index, sqlite3_bind_parameter_count(ref_->stmt()))
<< "Invalid parameter index";
// base::StringPiece::data() may return null for empty pieces. In particular,
// this may happen when the StringPiece is created from the default
// constructor.
//
// However, sqlite3_bind_text() always interprets a nullptr data argument as a
// NULL value, instead of an empty BLOB value.
static constexpr char kEmptyPlaceholder[] = {0x00};
const char* data = (value.size() > 0) ? value.data() : kEmptyPlaceholder;
// If the string length is more than SQLITE_MAX_LENGTH (or the per-database
// SQLITE_LIMIT_LENGTH limit), sqlite3_bind_text() fails with SQLITE_TOOBIG.
//
// We're not currently handling this error. SQLITE_MAX_LENGTH is set to the
// default (1 billion bytes) in Chrome's SQLite build, so this is an unlilely
// issue.
int sqlite_error_code = sqlite3_bind_text(ref_->stmt(), param_index + 1, data,
value.size(), SQLITE_TRANSIENT);
DCHECK_EQ(sqlite_error_code, SQLITE_OK);
}
void Statement::BindString16(int param_index, base::StringPiece16 value) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return BindString(param_index, base::UTF16ToUTF8(value));
}
void Statement::BindBlob(int param_index, base::span<const uint8_t> value) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " must not be called after Run()";
DCHECK(!step_called_) << __func__ << " must not be called after Step()";
#endif // DCHECK_IS_ON()
if (!is_valid())
return;
DCHECK_GE(param_index, 0);
DCHECK_LT(param_index, sqlite3_bind_parameter_count(ref_->stmt()))
<< "Invalid parameter index";
// span::data() may return null for empty spans. In particular, this may
// happen when the span is created out of a std::vector, because
// std::vector::data() may (or may not) return null for empty vectors.
//
// However, sqlite3_bind_blob() always interprets a nullptr data argument as a
// NULL value, instead of an empty BLOB value.
//
// While the difference between NULL and an empty BLOB may not matter in some
// cases, it may also cause subtle bugs in other cases. So, we cannot pass
// span.data() directly to sqlite3_bind_blob().
static constexpr uint8_t kEmptyPlaceholder[] = {0x00};
const uint8_t* data = (value.size() > 0) ? value.data() : kEmptyPlaceholder;
// If the string length is more than SQLITE_MAX_LENGTH (or the per-database
// SQLITE_LIMIT_LENGTH limit), sqlite3_bind_text() fails with SQLITE_TOOBIG.
//
// We're not currently handling this error. SQLITE_MAX_LENGTH is set to the
// default (1 billion bytes) in Chrome's SQLite build, so this is an unlilely
// issue.
int sqlite_error_code = sqlite3_bind_blob(ref_->stmt(), param_index + 1, data,
value.size(), SQLITE_TRANSIENT);
DCHECK_EQ(sqlite_error_code, SQLITE_OK);
}
int Statement::ColumnCount() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!is_valid())
return 0;
return sqlite3_column_count(ref_->stmt());
}
// Verify that our enum matches sqlite's values.
static_assert(static_cast<int>(ColumnType::kInteger) == SQLITE_INTEGER,
"INTEGER mismatch");
static_assert(static_cast<int>(ColumnType::kFloat) == SQLITE_FLOAT,
"FLOAT mismatch");
static_assert(static_cast<int>(ColumnType::kText) == SQLITE_TEXT,
"TEXT mismatch");
static_assert(static_cast<int>(ColumnType::kBlob) == SQLITE_BLOB,
"BLOB mismatch");
static_assert(static_cast<int>(ColumnType::kNull) == SQLITE_NULL,
"NULL mismatch");
ColumnType Statement::GetColumnType(int col) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
return static_cast<enum ColumnType>(sqlite3_column_type(ref_->stmt(), col));
}
bool Statement::ColumnBool(int column_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return static_cast<bool>(ColumnInt64(column_index));
}
int Statement::ColumnInt(int column_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return 0;
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
return sqlite3_column_int(ref_->stmt(), column_index);
}
int64_t Statement::ColumnInt64(int column_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return 0;
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
return sqlite3_column_int64(ref_->stmt(), column_index);
}
double Statement::ColumnDouble(int column_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return 0;
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
return sqlite3_column_double(ref_->stmt(), column_index);
}
base::Time Statement::ColumnTime(int column_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return base::Time();
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
int64_t int_value = sqlite3_column_int64(ref_->stmt(), column_index);
return base::Time::FromDeltaSinceWindowsEpoch(base::Microseconds(int_value));
}
std::string Statement::ColumnString(int column_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return std::string();
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
const char* string_buffer = reinterpret_cast<const char*>(
sqlite3_column_text(ref_->stmt(), column_index));
int size = sqlite3_column_bytes(ref_->stmt(), column_index);
std::string result;
if (string_buffer && size > 0)
result.assign(string_buffer, size);
return result;
}
std::u16string Statement::ColumnString16(int column_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return std::u16string();
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
std::string string = ColumnString(column_index);
return string.empty() ? std::u16string() : base::UTF8ToUTF16(string);
}
base::span<const uint8_t> Statement::ColumnBlob(int column_index) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return base::span<const uint8_t>();
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
int result_size = sqlite3_column_bytes(ref_->stmt(), column_index);
const void* result_buffer = sqlite3_column_blob(ref_->stmt(), column_index);
DCHECK(result_size == 0 || result_buffer != nullptr)
<< "sqlite3_column_blob() returned a null buffer for a non-empty BLOB";
return base::make_span(static_cast<const uint8_t*>(result_buffer),
result_size);
}
bool Statement::ColumnBlobAsString(int column_index, std::string* result) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return false;
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
const void* result_buffer = sqlite3_column_blob(ref_->stmt(), column_index);
int size = sqlite3_column_bytes(ref_->stmt(), column_index);
if (result_buffer && size > 0) {
result->assign(reinterpret_cast<const char*>(result_buffer), size);
} else {
result->clear();
}
return true;
}
bool Statement::ColumnBlobAsVector(int column_index,
std::vector<char>* result) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
#if DCHECK_IS_ON()
DCHECK(!run_called_) << __func__ << " can be used after Step(), not Run()";
DCHECK(step_called_) << __func__ << " can only be used after Step()";
#endif // DCHECK_IS_ON()
if (!CheckValid())
return false;
DCHECK_GE(column_index, 0);
DCHECK_LT(column_index, sqlite3_data_count(ref_->stmt()))
<< "Invalid column index";
const void* result_buffer = sqlite3_column_blob(ref_->stmt(), column_index);
int size = sqlite3_column_bytes(ref_->stmt(), column_index);
if (result_buffer && size > 0) {
// Unlike std::string, std::vector does not have an assign() overload that
// takes a buffer and a size.
result->assign(static_cast<const char*>(result_buffer),
static_cast<const char*>(result_buffer) + size);
} else {
result->clear();
}
return true;
}
bool Statement::ColumnBlobAsVector(int column_index,
std::vector<uint8_t>* result) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return ColumnBlobAsVector(column_index,
reinterpret_cast<std::vector<char>*>(result));
}
const char* Statement::GetSQLStatement() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return sqlite3_sql(ref_->stmt());
}
int Statement::CheckError(int err) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Please don't add DCHECKs here, OnSqliteError() already has them.
succeeded_ = (err == SQLITE_OK || err == SQLITE_ROW || err == SQLITE_DONE);
if (!succeeded_ && ref_.get() && ref_->database())
return ref_->database()->OnSqliteError(err, this, nullptr);
return err;
}
} // namespace sql
| 7,676 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.