max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
322 | /*****************************************************************************
* Copyright [2019]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <Common/msgq.h>
#include <Common/logger.h>
#include <Common/config.h>
#include <Common/util.h>
#include <Common/datastruct.h>
#include <string.h>
#ifdef _WIN32
#include <nanomsg/src/nn.h>
#include <nanomsg/src/pair.h>
#include <nanomsg/src/pubsub.h>
#include <nanomsg/src/pipeline.h>
#else
#include <nanomsg/nn.h>
#include <nanomsg/pair.h>
#include <nanomsg/pubsub.h>
#include <nanomsg/pipeline.h>
// #include <zmq.h>
#endif
#include <iostream>
#include <cassert>
namespace StarQuant {
CMsgq::CMsgq(MSGQ_PROTOCOL protocol, string url) {
protocol_ = protocol;
url_ = url;
logger = SQLogger::getLogger("SYS");
}
IMessenger::IMessenger() {
logger = SQLogger::getLogger("SYS");
}
CMsgqNanomsg::CMsgqNanomsg(MSGQ_PROTOCOL protocol, string url, bool binding)
: CMsgq(protocol, url) {
endpoint_ = url;
if (protocol_ == MSGQ_PROTOCOL::PAIR) {
sock_ = nn_socket(AF_SP, NN_PAIR);
assert(sock_ >= 0);
int32_t to = 100;
assert(nn_setsockopt(sock_, NN_SOL_SOCKET, NN_RCVTIMEO, &to, sizeof(to)) >= 0);
if (binding) {
eid_ = nn_bind(sock_, endpoint_.c_str());
} else {
eid_ = nn_connect(sock_, endpoint_.c_str());
}
} else if (protocol_ == MSGQ_PROTOCOL::PUB) {
sock_ = nn_socket(AF_SP, NN_PUB);
assert(sock_ >= 0);
int32_t sndbuff = 1024*1024*256;
assert(nn_setsockopt(sock_, NN_SOL_SOCKET, NN_SNDBUF, &sndbuff, sizeof(sndbuff)) >=0 );
eid_ = nn_bind(sock_, endpoint_.c_str());
} else if (protocol_ == MSGQ_PROTOCOL::SUB) {
sock_ = nn_socket(AF_SP, NN_SUB);
assert(sock_ >= 0);
int32_t to = 100;
assert(nn_setsockopt(sock_, NN_SOL_SOCKET, NN_RCVTIMEO, &to, sizeof(to)) >= 0);
nn_setsockopt(sock_, NN_SUB, NN_SUB_SUBSCRIBE, "", 0); // subscribe to topic
eid_ = nn_connect(sock_, endpoint_.c_str());
} else if (protocol_ == MSGQ_PROTOCOL::PUSH) {
sock_ = nn_socket(AF_SP, NN_PUSH);
assert(sock_ >= 0);
eid_ = nn_connect(sock_, endpoint_.c_str());
} else if (protocol_ == MSGQ_PROTOCOL::PULL) {
sock_ = nn_socket(AF_SP, NN_PULL);
assert(sock_ >= 0);
eid_ = nn_bind(sock_, endpoint_.c_str());
}
if (eid_ < 0 || sock_ < 0) {
LOG_ERROR(logger, "Nanomsg connect sock " << endpoint_ << "error");
}
}
std::mutex CMsgqEMessenger::sendlock_;
std::unique_ptr<CMsgq> CMsgqEMessenger::msgq_send_;
CMsgqEMessenger::CMsgqEMessenger(string name, string url_send) {
name_ = name;
if (msgq_recv_ == nullptr) {
msgq_recv_ = std::make_unique<CMsgqNanomsg>(MSGQ_PROTOCOL::SUB, url_send);
}
}
CMsgqEMessenger::CMsgqEMessenger(string url_send) {
if (msgq_recv_ == nullptr) {
msgq_recv_ = std::make_unique<CMsgqNanomsg>(MSGQ_PROTOCOL::SUB, url_send);
}
}
void CMsgqEMessenger::send(shared_ptr<MsgHeader> Msg, int32_t mode) {
string msgout = Msg->serialize();
lock_guard<std::mutex> g(CMsgqEMessenger::sendlock_);
CMsgqEMessenger::msgq_send_->sendmsg(msgout, mode);
}
shared_ptr<MsgHeader> CMsgqEMessenger::recv(int32_t mode) {
string msgin = msgq_recv_->recmsg(mode);
if (msgin.empty())
return nullptr;
// LOG_DEBUG(logger, name_ <<" recv msg:"<<msgin);
try {
string des;
string src;
string stype;
stringstream ss(msgin);
getline(ss, des, SERIALIZATION_SEPARATOR);
getline(ss, src, SERIALIZATION_SEPARATOR);
getline(ss, stype, SERIALIZATION_SEPARATOR);
MSG_TYPE mtype = MSG_TYPE(stoi(stype));
std::shared_ptr<MsgHeader> pheader;
switch (mtype) {
case MSG_TYPE_ORDER:
pheader = make_shared<OrderMsg>();
pheader->msgtype_ = MSG_TYPE_ORDER;
pheader->deserialize(msgin);
break;
case MSG_TYPE_ORDER_CTP:
pheader = make_shared<CtpOrderMsg>();
pheader->deserialize(msgin);
break;
case MSG_TYPE_ORDER_PAPER:
pheader = make_shared<PaperOrderMsg>();
pheader->deserialize(msgin);
break;
case MSG_TYPE_SUBSCRIBE_MARKET_DATA:
pheader = make_shared<SubscribeMsg>(des, src);
pheader->deserialize(msgin);
break;
case MSG_TYPE_UNSUBSCRIBE:
pheader = make_shared<UnSubscribeMsg>(des, src);
pheader->deserialize(msgin);
break;
case MSG_TYPE_ORDER_ACTION:
case MSG_TYPE_CANCEL_ORDER:
pheader = make_shared<OrderActionMsg>();
pheader->deserialize(msgin);
break;
case MSG_TYPE_CANCEL_ALL:
pheader = make_shared<CancelAllMsg>();
pheader->deserialize(msgin);
break;
case MSG_TYPE_QRY_CONTRACT:
pheader = make_shared<QryContractMsg>(des, src);
pheader->deserialize(msgin);
break;
default:
pheader = make_shared<MsgHeader>(des, src, mtype);
break;
}
return pheader;
}
catch (std::exception& e) {
LOG_ERROR(logger, e.what() << " [orignial msg:" << msgin << "]");
return nullptr;
}
catch(...) {
LOG_ERROR(logger, "MSGQ cannot deserialize msg:" << msgin);
return nullptr;
}
}
std::mutex CMsgqRMessenger::sendlock_;
std::unique_ptr<CMsgq> CMsgqRMessenger::msgq_send_;
void CMsgqRMessenger::Send(shared_ptr<MsgHeader> Msg, int32_t mode) {
string msgout = Msg->serialize();
lock_guard<std::mutex> g(CMsgqRMessenger::sendlock_);
CMsgqRMessenger::msgq_send_->sendmsg(msgout, mode);
}
CMsgqRMessenger::CMsgqRMessenger(string url_recv) {
msgq_recv_ = std::make_unique<CMsgqNanomsg>(MSGQ_PROTOCOL::PULL, url_recv);
// msgq_send_ = std::make_unique<CMsgqNanomsg>(MSGQ_PROTOCOL::PUB, url_send);
}
void CMsgqRMessenger::send(shared_ptr<MsgHeader> Msg, int32_t mode) {
string msgout = Msg->serialize();
lock_guard<std::mutex> g(CMsgqRMessenger::sendlock_);
CMsgqRMessenger::msgq_send_->sendmsg(msgout, mode);
}
void CMsgqRMessenger::relay() {
string msgpull = msgq_recv_->recmsg(0);
if (msgpull.empty())
return;
// cout<<"recv msg at"<<ymdhmsf6();
// 特殊标志,表明消息让策略进程收到
if (msgpull[0] == RELAY_DESTINATION) {
lock_guard<std::mutex> g(CMsgqEMessenger::sendlock_);
// 将消息发回,让策略进程收到
CMsgqEMessenger::msgq_send_->sendmsg(msgpull);
} else {
lock_guard<std::mutex> g(CMsgqRMessenger::sendlock_);
// 转发消息到各个engine
CMsgqRMessenger::msgq_send_->sendmsg(msgpull);
}
}
CMsgqNanomsg::~CMsgqNanomsg() {
nn_shutdown(sock_, eid_);
nn_close(sock_);
}
void CMsgqNanomsg::sendmsg(const string& msg, int32_t dontwait) {
int32_t bytes = nn_send(sock_, msg.c_str(), msg.size() + 1, dontwait);
if (bytes != msg.size()+1) {
LOG_ERROR(logger, "Nanomsg " << endpoint_ << " send msg error, return:" << bytes);
}
}
void CMsgqNanomsg::sendmsg(const char* msg, int32_t dontwait) {
int32_t bytes = nn_send(sock_, msg, strlen(msg) + 1, dontwait);
}
string CMsgqNanomsg::recmsg(int32_t dontwait) {
char* buf = nullptr;
int32_t bytes = nn_recv(sock_, &buf, NN_MSG, dontwait);
if (bytes > 0 && buf != nullptr) {
string msg(buf, bytes);
buf != nullptr && nn_freemsg(buf);
return msg;
} else {
return string();
}
}
CMsgqZmq::CMsgqZmq(MSGQ_PROTOCOL protocol, string port, bool binding)
: CMsgq(protocol, port) {
// if (protocol_ == MSGQ_PROTOCOL::PAIR)
// {
// context_ = zmq_ctx_new();
// socket_ = zmq_socket(context_, ZMQ_PAIR);
// if (binding) {
// endpoint_ = "tcp://*:" + port;
// rc_ = zmq_bind(socket_, endpoint_.c_str());
// }
// else {
// endpoint_ = "tcp://localhost:" + port;
// rc_ = zmq_connect(socket_, endpoint_.c_str());
// }
// }
// else if (protocol_ == MSGQ_PROTOCOL::PUB) {
// context_ = zmq_ctx_new();
// socket_ = zmq_socket(context_, ZMQ_PUB);
// endpoint_ = "tcp://*:" + port;
// rc_ = zmq_bind(socket_, endpoint_.c_str());
// }
// else if (protocol_ == MSGQ_PROTOCOL::SUB) {
// context_ = zmq_ctx_new();
// socket_ = zmq_socket(context_, ZMQ_SUB);
// endpoint_ = "tcp://localhost:" + port;
// rc_ = zmq_connect(socket_, endpoint_.c_str());
// }
}
CMsgqZmq::~CMsgqZmq() {
// rc_ = zmq_unbind(socket_, endpoint_.c_str());
// zmq_close(socket_);
// zmq_ctx_shutdown(context_);
// zmq_term(context_);
// zmq_ctx_destroy(context_);
}
// zmq doesn't have global nn_term(), has to be set non-blocking, ZMQ_DONTWAIT
void CMsgqZmq::sendmsg(const string& msg, int32_t dontwait) {
// int32_t bytes = zmq_send(socket_, msg.c_str(), msg.size() + 1, 0);
}
void CMsgqZmq::sendmsg(const char* msg, int32_t dontwait) {
// int32_t bytes = zmq_send(socket_, msg, strlen(msg)+1, 0);
}
string CMsgqZmq::recmsg(int32_t blockingflags) {
// int32_t bytes = zmq_recv(socket_, buf_, 1024, blockingflags);//ZMQ_NOBLOCK
// if (bytes > 0) {
// buf_[bytes] = '\0';
// string msg(buf_);
// return msg;
// }
// else {
// return string();
// }
}
mutex MsgCenter::msglock_;
std::unique_ptr<CMsgq> MsgCenter:: msgq_pub_;
} // namespace StarQuant
| 5,023 |
892 | <gh_stars>100-1000
{
"schema_version": "1.2.0",
"id": "GHSA-7386-4wm9-v7xr",
"modified": "2022-02-12T00:01:05Z",
"published": "2022-02-09T00:00:30Z",
"aliases": [
"CVE-2022-21805"
],
"details": "Reflected cross-site scripting vulnerability in the attached file name of php_mailform versions prior to Version 1.40 allows a remote unauthenticated attacker to inject an arbitrary script via unspecified vectors.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2022-21805"
},
{
"type": "WEB",
"url": "https://github.com/econosys-system/php_mailform"
},
{
"type": "WEB",
"url": "https://jvn.jp/en/jp/JVN16690037/index.html"
}
],
"database_specific": {
"cwe_ids": [
"CWE-79"
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 413 |
3,897 | /*
* Copyright (c) 2018-2019, Arm Limited and affiliates.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MBED_LINKED_LIST_H
#define MBED_LINKED_LIST_H
#include "LinkEntry.h"
#include "LinkedListBase.h"
/**
* \defgroup drivers_LinkedList LinkedList class
* \ingroup drivers-internal-api-usb
* @{
*/
template<class T>
class LinkedList: public LinkedListBase {
public:
LinkedList() {}
~LinkedList() {}
/**
* Return the element at the head of the list
*
* @return The element at the head of the list or NULL if the list is empty
*/
T *head()
{
return static_cast<T *>(LinkedListBase::head());
}
/**
* Add an element to the tail of the list
*
* @param entry New element to add
*/
void enqueue(T *entry)
{
LinkedListBase::enqueue(static_cast<LinkEntry *>(entry));
}
/**
* Remove the element at the head of the list
*
* @return The element at the head of the list or NULL if the list is empty
*/
T *dequeue()
{
return static_cast<T *>(LinkedListBase::dequeue());
}
};
/** @}*/
#endif
| 600 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<filename>docs/data/leg-t2/036/03602130.json
{"nom":"Montlevicq","circ":"2ème circonscription","dpt":"Indre","inscrits":103,"abs":37,"votants":66,"blancs":6,"nuls":1,"exp":59,"res":[{"nuance":"LR","nom":"M. <NAME>","voix":33},{"nuance":"MDM","nom":"<NAME>","voix":26}]} | 131 |
1,454 | /**
* The MIT License (MIT)
* Copyright (c) 2020 铭软科技(mingsoft.net)
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package net.mingsoft.cms.biz.impl;
import net.mingsoft.base.biz.impl.BaseBizImpl;
import net.mingsoft.base.dao.IBaseDao;
import net.mingsoft.cms.bean.CategoryBean;
import net.mingsoft.cms.bean.ContentBean;
import net.mingsoft.cms.biz.IContentBiz;
import net.mingsoft.cms.dao.ICategoryDao;
import net.mingsoft.cms.dao.IContentDao;
import net.mingsoft.cms.entity.ContentEntity;
import net.mingsoft.mdiy.entity.ModelEntity;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Map;
/**
* 文章管理持久化层
* @author 铭飞开发团队
* 创建日期:2019-11-28 15:12:32<br/>
* 历史修订:<br/>
*/
@Service("cmscontentBizImpl")
public class ContentBizImpl extends BaseBizImpl<IContentDao, ContentEntity> implements IContentBiz {
/*
* log4j日志记录
*/
protected final Logger LOG = LoggerFactory.getLogger(this.getClass());
@Autowired
private IContentDao contentDao;
/**
* 栏目管理业务层
*/
@Autowired
private ICategoryDao categoryDao;
@Value("${ms.html-dir:html}")
private String htmlDir;
@Override
protected IBaseDao getDao() {
// TODO Auto-generated method stub
return contentDao;
}
@Override
public List<CategoryBean> queryIdsByCategoryIdForParser(ContentBean contentBean) {
return this.contentDao.queryIdsByCategoryIdForParser(contentBean);
}
@Override
public int getSearchCount(ModelEntity contentModel, List diyList, Map whereMap, int appId, String categoryIds) {
if (contentModel!=null) {
return contentDao.getSearchCount(contentModel.getModelTableName(),diyList,whereMap, appId,categoryIds);
}
return contentDao.getSearchCount(null,null,whereMap, appId,categoryIds);
}
}
| 1,018 |
1,968 | <filename>librtt/Rtt_PlatformVideoProvider.h<gh_stars>1000+
//////////////////////////////////////////////////////////////////////////////
//
// This file is part of the Corona game engine.
// For overview and more information on licensing please refer to README.md
// Home page: https://github.com/coronalabs/corona
// Contact: <EMAIL>
//
//////////////////////////////////////////////////////////////////////////////
#ifndef _Rtt_PlatformVideoProvider_H__
#define _Rtt_PlatformVideoProvider_H__
#include "Core/Rtt_Types.h"
#include "Rtt_PlatformMediaProviderBase.h"
// ----------------------------------------------------------------------------
namespace Rtt
{
// ----------------------------------------------------------------------------
class PlatformVideoProvider : public PlatformMediaProviderBase
{
public:
typedef PlatformMediaProviderBase Super;
PlatformVideoProvider( const ResourceHandle<lua_State> & handle ) : PlatformMediaProviderBase( handle )
{
}
public:
public:
virtual bool Supports( int source ) const = 0;
virtual bool Show( int source, lua_State* L, int maxTime, int quality ) = 0;
public:
protected:
struct VideoProviderResult
{
VideoProviderResult() : SelectedVideoFileName(NULL)
{ }
const char* SelectedVideoFileName;
int Duration;
long Size;
};
static void AddProperties( lua_State *L, void* userdata );
// ----------------------------------------------------------------------------
};
} // namespace Rtt
// ----------------------------------------------------------------------------
#endif // _Rtt_PlatformVideoProvider_H__
| 412 |
4,403 | package cn.hutool.core.net;
import cn.hutool.core.util.CharsetUtil;
import org.junit.Assert;
import org.junit.Test;
public class UrlDecoderTest {
@Test
public void decodeForPathTest(){
Assert.assertEquals("+", URLDecoder.decodeForPath("+", CharsetUtil.CHARSET_UTF_8));
}
}
| 117 |
669 | <gh_stars>100-1000
from rbgt_pybind import (
Body,
Camera,
RegionModality,
RenderDataBody,
RendererGeometry,
# Tracker,
Viewer,
filesystem,
Transform3fA,
mat2t3fA,
t3fA2mat,
)
import os
import math
import time
import numpy as np
import cv2
class TestTracker(object):
def __init__(self, image_handle):
self.region_modalities = []
self.viewers = []
self.cameras = {}
self.occlusion_renderers = {}
self.n_corr_iterations = 7
self.n_update_iterations = 2
self.visualization_time = 0
self.viewer_time = 1
self.start_tracking = False
self.tracking_started = False
self.image_handle = image_handle
self.image_cvMat = None
def AddRegionModality(self, region_modality):
self.region_modalities.append(region_modality)
def AddViewer(self, viewer):
self.viewers.append(viewer)
def set_n_corr_iterations(self, n_corr_iterations):
self.n_corr_iterations = n_corr_iterations
def set_n_update_iterations(self, set_n_update_iterations):
self.set_n_update_iterations = set_n_update_iterations
def set_visualization_time(self, set_visualization_time):
self.set_visualization_time = set_visualization_time
def set_viewer_time(self, set_viewer_time):
self.set_viewer_time = set_viewer_time
def SetUpObjects(self):
self.cameras = {}
self.occlusion_renderers = {}
for region_modality in self.region_modalities:
if region_modality.camera_ptr():
if region_modality.camera_ptr().name() not in self.cameras.keys():
self.cameras[
region_modality.camera_ptr().name()
] = region_modality.camera_ptr()
if region_modality.occlusion_mask_renderer_ptr():
if (
region_modality.occlusion_mask_renderer_ptr().name()
not in self.occlusion_renderers.keys()
):
self.occlusion_renderers[
region_modality.occlusion_mask_renderer_ptr().name()
] = region_modality.occlusion_mask_renderer_ptr()
for viewer in self.viewers:
if viewer.camera_ptr():
if viewer.camera_ptr().name() not in self.cameras.keys():
self.cameras[viewer.camera_ptr().name()] = viewer.camera_ptr()
def CalculateBeforeCameraUpdate(self):
for region_modality in self.region_modalities:
if not region_modality.CalculateBeforeCameraUpdate():
return False
return True
def UpdateCameras(self):
self.image_cvMat = self.image_handle.get_image()
if self.image_cvMat is None:
return False
for camera in self.cameras.values():
if not camera.UpdateImage2(self.image_cvMat):
return False
return True
def StartOcclusionMaskRendering(self):
for occlusion_renderer in self.occlusion_renderers.values():
if not occlusion_renderer.StartRendering():
return False
return True
def CalculateCorrespondences(self, corr_iteration):
for region_modality in self.region_modalities:
if not region_modality.CalculateCorrespondences(corr_iteration):
return False
return True
def CalculatePoseUpdate(self):
for region_modality in self.region_modalities:
if not region_modality.CalculatePoseUpdate():
return False
return True
def UpdateViewers(self, save_idx):
if self.viewers:
for viewer in self.viewers:
viewer.UpdateViewer(save_idx)
return True
def StartTracker(self, start_tracking):
self.start_tracking = start_tracking
self.SetUpObjects()
i = 0
while True:
if self.start_tracking:
for region_modality in self.region_modalities:
if not region_modality.StartModality():
return
self.tracking_started = True
self.start_tracking = False
if self.tracking_started:
if not self.ExecuteTrackingCycle(i):
return
else:
if not self.ExecuteViewingCycle(i):
return
def ExecuteTrackingCycle(self, i):
if not self.CalculateBeforeCameraUpdate():
return False
if not self.UpdateCameras():
return False
for j in range(self.n_corr_iterations):
corr_save_idx = i * self.n_corr_iterations + j
if not self.StartOcclusionMaskRendering():
return False
if not self.CalculateCorrespondences(j):
return False
for update_iter in range(self.n_update_iterations):
update_save_idx = corr_save_idx * self.n_update_iterations + update_iter
if not self.CalculatePoseUpdate():
return False
if not self.UpdateViewers(i):
return False
return True
def ExecuteViewingCycle(self, i):
if not self.UpdateCameras():
return False
return self.UpdateViewers(i)
| 2,577 |
8,586 | <gh_stars>1000+
/* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#ifndef COMMON_EXPRESSION_COLUMNEXPRESSION_H_
#define COMMON_EXPRESSION_COLUMNEXPRESSION_H_
#include "common/expression/Expression.h"
namespace nebula {
/**
* This is an internal type of expression
* It has no corresponding rules in parser.
* you can get the corresponding value by column index
*/
class ColumnExpression final : public Expression {
public:
ColumnExpression& operator=(const ColumnExpression& rhs) = delete;
ColumnExpression& operator=(ColumnExpression&&) = delete;
static ColumnExpression* make(ObjectPool* pool, int32_t index = 0) {
return pool->add(new ColumnExpression(pool, index));
}
const Value& eval(ExpressionContext& ctx) override;
void accept(ExprVisitor* visitor) override;
Expression* clone() const override {
return ColumnExpression::make(pool_, index_);
}
std::string toString() const override;
bool operator==(const Expression& expr) const override;
private:
explicit ColumnExpression(ObjectPool* pool, int32_t index = 0)
: Expression(pool, Kind::kColumn), index_(index) {}
void writeTo(Encoder& encoder) const override;
void resetFrom(Decoder&) override;
private:
Value result_;
int32_t index_;
};
} // namespace nebula
#endif // COMMON_EXPRESSION_COLUMNEXPRESSION_H_
| 435 |
375 | /*
* The MIT License
*
* Copyright (c) 2017 CloudBees, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package jenkins.plugins.git;
import hudson.model.Run;
import hudson.model.TaskListener;
import hudson.plugins.git.GitException;
import hudson.plugins.git.GitSCM;
import hudson.plugins.git.extensions.GitSCMExtension;
import java.io.IOException;
import java.util.List;
import jenkins.scm.api.mixin.TagSCMHead;
import org.eclipse.jgit.transport.RefSpec;
import org.eclipse.jgit.transport.RemoteConfig;
import org.jenkinsci.plugins.gitclient.CloneCommand;
import org.jenkinsci.plugins.gitclient.FetchCommand;
import org.jenkinsci.plugins.gitclient.GitClient;
/**
* Used to reset the default clone behaviour for {@link GitSCM} instances created by {@link GitSCMBuilder}.
* Does not have a descriptor as we do not expect this extension to be user-visible.
* With this extension, we anticipate:
* <ul>
* <li>tags will not be cloned or fetched</li>
* <li>refspecs will be honoured on clone</li>
* </ul>
*
* @since 3.4.0
*/
public class GitSCMSourceDefaults extends GitSCMExtension {
/**
* Determines whether tags should be fetched... only relevant if we implement support for {@link TagSCMHead}.
*/
private final boolean includeTags;
/**
* Constructor.
*
* @param includeTags {@code true} to request fetching tags.
*/
public GitSCMSourceDefaults(boolean includeTags) {
this.includeTags = includeTags;
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GitSCMSourceDefaults that = (GitSCMSourceDefaults) o;
return includeTags == that.includeTags;
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return (includeTags ? 1 : 0);
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return "GitSCMSourceDefaults{" +
"includeTags=" + includeTags +
'}';
}
/**
* {@inheritDoc}
*/
@Override
public void decorateCloneCommand(GitSCM scm, Run<?, ?> build, GitClient git, TaskListener listener,
CloneCommand cmd) throws IOException, InterruptedException, GitException {
listener.getLogger()
.printf("Cloning with configured refspecs honoured and %s tags%n", includeTags ? "with" : "without");
RemoteConfig rc = scm.getRepositories().get(0);
List<RefSpec> refspecs = rc.getFetchRefSpecs();
cmd.refspecs(refspecs);
cmd.tags(includeTags);
}
/**
* {@inheritDoc}
*/
@Override
@Deprecated
public void decorateFetchCommand(GitSCM scm, GitClient git, TaskListener listener, FetchCommand cmd)
throws IOException, InterruptedException, GitException {
listener.getLogger()
.printf("Fetching %s tags%n", includeTags ? "with" : "without");
cmd.tags(includeTags);
}
}
| 1,544 |
335 | {
"word": "Brachistochrone",
"definitions": [
"A curve between two points along which a body can move under gravity in a shorter time than for any other curve."
],
"parts-of-speech": "Noun"
} | 78 |
384 | package yifeiyuan.practice.practicedemos.itemtouchhelper;
import android.os.Bundle;
import android.view.Menu;
import android.view.MenuItem;
import yifeiyuan.practice.practicedemos.R;
import yifeiyuan.practice.practicedemos.base.ToolbarActivity;
/**
*
* todo 排序 拖动
*
*/
public class TouchHelperActivity extends ToolbarActivity {
SimpleSwipedismissFragment simpleSwipedismissFragment;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_swipe_dismiss);
simpleSwipedismissFragment = new SimpleSwipedismissFragment();
getSupportFragmentManager().beginTransaction().add(R.id.container,simpleSwipedismissFragment).commit();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.menu_swipe_dismiss, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
//todo
int id = item.getItemId();
if (id == R.id.action_settings) {
return true;
}
return super.onOptionsItemSelected(item);
}
}
| 458 |
507 | <reponame>mjuenema/python-terrascript
# terrascript/snowflake/r.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class snowflake_account_grant(terrascript.Resource):
pass
class snowflake_api_integration(terrascript.Resource):
pass
class snowflake_database(terrascript.Resource):
pass
class snowflake_database_grant(terrascript.Resource):
pass
class snowflake_external_function(terrascript.Resource):
pass
class snowflake_external_table(terrascript.Resource):
pass
class snowflake_external_table_grant(terrascript.Resource):
pass
class snowflake_file_format(terrascript.Resource):
pass
class snowflake_file_format_grant(terrascript.Resource):
pass
class snowflake_function(terrascript.Resource):
pass
class snowflake_function_grant(terrascript.Resource):
pass
class snowflake_integration_grant(terrascript.Resource):
pass
class snowflake_managed_account(terrascript.Resource):
pass
class snowflake_masking_policy(terrascript.Resource):
pass
class snowflake_masking_policy_grant(terrascript.Resource):
pass
class snowflake_materialized_view(terrascript.Resource):
pass
class snowflake_materialized_view_grant(terrascript.Resource):
pass
class snowflake_network_policy(terrascript.Resource):
pass
class snowflake_network_policy_attachment(terrascript.Resource):
pass
class snowflake_notification_integration(terrascript.Resource):
pass
class snowflake_pipe(terrascript.Resource):
pass
class snowflake_pipe_grant(terrascript.Resource):
pass
class snowflake_procedure(terrascript.Resource):
pass
class snowflake_procedure_grant(terrascript.Resource):
pass
class snowflake_resource_monitor(terrascript.Resource):
pass
class snowflake_resource_monitor_grant(terrascript.Resource):
pass
class snowflake_role(terrascript.Resource):
pass
class snowflake_role_grants(terrascript.Resource):
pass
class snowflake_row_access_policy(terrascript.Resource):
pass
class snowflake_row_access_policy_grant(terrascript.Resource):
pass
class snowflake_schema(terrascript.Resource):
pass
class snowflake_schema_grant(terrascript.Resource):
pass
class snowflake_scim_integration(terrascript.Resource):
pass
class snowflake_sequence(terrascript.Resource):
pass
class snowflake_sequence_grant(terrascript.Resource):
pass
class snowflake_share(terrascript.Resource):
pass
class snowflake_stage(terrascript.Resource):
pass
class snowflake_stage_grant(terrascript.Resource):
pass
class snowflake_storage_integration(terrascript.Resource):
pass
class snowflake_stream(terrascript.Resource):
pass
class snowflake_stream_grant(terrascript.Resource):
pass
class snowflake_table(terrascript.Resource):
pass
class snowflake_table_grant(terrascript.Resource):
pass
class snowflake_task(terrascript.Resource):
pass
class snowflake_task_grant(terrascript.Resource):
pass
class snowflake_user(terrascript.Resource):
pass
class snowflake_user_public_keys(terrascript.Resource):
pass
class snowflake_view(terrascript.Resource):
pass
class snowflake_view_grant(terrascript.Resource):
pass
class snowflake_warehouse(terrascript.Resource):
pass
class snowflake_warehouse_grant(terrascript.Resource):
pass
| 1,184 |
303 | <filename>src/main/java/com/google/webauthn/gaedemo/objects/AttestationExtension.java<gh_stars>100-1000
package com.google.webauthn.gaedemo.objects;
public interface AttestationExtension {
public enum Type {
CABLE,
}
public Type getType();
}
| 90 |
1,517 | <gh_stars>1000+
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.android.sqlite;
/**
* This error can occur if the application creates a SQLiteStatement object and allows multiple
* threads in the application use it at the same time.
* Sqlite returns this error if bind and execute methods on this object occur at the same time
* from multiple threads, like so:
* thread # 1: in execute() method of the SQLiteStatement object
* while thread # 2: is in bind..() on the same object.
*</p>
* FIX this by NEVER sharing the same SQLiteStatement object between threads.
* Create a local instance of the SQLiteStatement whenever it is needed, use it and close it ASAP.
* NEVER make it globally available.
*/
public class SQLiteMisuseException extends SQLiteException {
public SQLiteMisuseException() {}
public SQLiteMisuseException(String error) {
super(error);
}
}
| 397 |
2,338 | int main() {
asm volatile(
"vmov.f64 d0, #0.5\n\t"
"vmov.f64 d1, #1.5\n\t"
"vmov.f64 d2, #2.5\n\t"
"vmov.f64 d3, #3.5\n\t"
"vmov.f32 s8, #4.5\n\t"
"vmov.f32 s9, #5.5\n\t"
"vmov.f32 s10, #6.5\n\t"
"vmov.f32 s11, #7.5\n\t"
"\n\t"
"bkpt #0\n\t"
:
:
: "d0", "d1", "d2", "d3", "s8", "s9", "s10", "s11"
);
return 0;
}
| 278 |
9,402 | <filename>src/mono/dlls/mscordbi/cordb-thread.h<gh_stars>1000+
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CORDB-THREAD.H
//
#ifndef __MONO_DEBUGGER_CORDB_THREAD_H__
#define __MONO_DEBUGGER_CORDB_THREAD_H__
#include <cordb.h>
class CordbThread : public CordbBaseMono,
public ICorDebugThread,
public ICorDebugThread2,
public ICorDebugThread3,
public ICorDebugThread4
{
long m_threadId;
CordbProcess* m_pProcess;
CordbStepper* m_pStepper;
CordbRegisterSet* m_pRegisterSet;
CordbNativeFrame* m_pCurrentFrame;
CordbBlockingObjectEnum* m_pBlockingObject;
public:
CordbThread(Connection* conn, CordbProcess* ppProcess, long thread_id);
ULONG STDMETHODCALLTYPE AddRef(void)
{
return (BaseAddRef());
}
ULONG STDMETHODCALLTYPE Release(void)
{
return (BaseRelease());
}
const char* GetClassName()
{
return "CordbThread";
}
~CordbThread();
void SetRegisterSet(CordbRegisterSet* rs);
HRESULT STDMETHODCALLTYPE HasUnhandledException(void);
HRESULT STDMETHODCALLTYPE GetBlockingObjects(ICorDebugBlockingObjectEnum** ppBlockingObjectEnum);
HRESULT STDMETHODCALLTYPE GetCurrentCustomDebuggerNotification(ICorDebugValue** ppNotificationObject);
HRESULT STDMETHODCALLTYPE CreateStackWalk(ICorDebugStackWalk** ppStackWalk);
HRESULT STDMETHODCALLTYPE GetActiveInternalFrames(ULONG32 cInternalFrames,
ULONG32* pcInternalFrames,
ICorDebugInternalFrame2* ppInternalFrames[]);
HRESULT STDMETHODCALLTYPE GetActiveFunctions(ULONG32 cFunctions, ULONG32* pcFunctions, COR_ACTIVE_FUNCTION pFunctions[]);
HRESULT STDMETHODCALLTYPE GetConnectionID(CONNID* pdwConnectionId);
HRESULT STDMETHODCALLTYPE GetTaskID(TASKID* pTaskId);
HRESULT STDMETHODCALLTYPE GetVolatileOSThreadID(DWORD* pdwTid);
HRESULT STDMETHODCALLTYPE InterceptCurrentException(ICorDebugFrame* pFrame);
HRESULT STDMETHODCALLTYPE GetProcess(ICorDebugProcess** ppProcess);
HRESULT STDMETHODCALLTYPE GetID(DWORD* pdwThreadId);
HRESULT STDMETHODCALLTYPE GetHandle(HTHREAD* phThreadHandle);
HRESULT STDMETHODCALLTYPE GetAppDomain(ICorDebugAppDomain** ppAppDomain);
HRESULT STDMETHODCALLTYPE SetDebugState(CorDebugThreadState state);
HRESULT STDMETHODCALLTYPE GetDebugState(CorDebugThreadState* pState);
HRESULT STDMETHODCALLTYPE GetUserState(CorDebugUserState* pState);
HRESULT STDMETHODCALLTYPE GetCurrentException(ICorDebugValue** ppExceptionObject);
HRESULT STDMETHODCALLTYPE ClearCurrentException(void);
HRESULT STDMETHODCALLTYPE CreateStepper(ICorDebugStepper** ppStepper);
HRESULT STDMETHODCALLTYPE EnumerateChains(ICorDebugChainEnum** ppChains);
HRESULT STDMETHODCALLTYPE GetActiveChain(ICorDebugChain** ppChain);
HRESULT STDMETHODCALLTYPE GetActiveFrame(ICorDebugFrame** ppFrame);
HRESULT STDMETHODCALLTYPE GetRegisterSet(ICorDebugRegisterSet** ppRegisters);
HRESULT STDMETHODCALLTYPE CreateEval(ICorDebugEval** ppEval);
HRESULT STDMETHODCALLTYPE GetObject(ICorDebugValue** ppObject);
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID id, _COM_Outptr_ void __RPC_FAR* __RPC_FAR* pInterface);
long GetThreadId() const
{
return m_threadId;
}
};
class CordbThreadEnum : public CordbBaseMono, public ICorDebugThreadEnum
{
public:
CordbThreadEnum(Connection* conn);
ULONG STDMETHODCALLTYPE AddRef(void)
{
return (BaseAddRef());
}
ULONG STDMETHODCALLTYPE Release(void)
{
return (BaseRelease());
}
const char* GetClassName()
{
return "CordbThreadEnum";
}
HRESULT STDMETHODCALLTYPE Next(ULONG celt, ICorDebugThread* values[], ULONG* pceltFetched);
HRESULT STDMETHODCALLTYPE Skip(ULONG celt);
HRESULT STDMETHODCALLTYPE Reset(void);
HRESULT STDMETHODCALLTYPE Clone(ICorDebugEnum** ppEnum);
HRESULT STDMETHODCALLTYPE GetCount(ULONG* pcelt);
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void** ppvObject);
};
#endif
| 1,767 |
2,338 | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon \
// RUN: -target-feature +sha3 -S -emit-llvm -o - %s \
// RUN: | FileCheck %s
#include <arm_neon.h>
// CHECK-LABEL: @test_vsha512h(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512h
//
void test_vsha512h(uint64x2_t hash_ed, uint64x2_t hash_gf, uint64x2_t kwh_kwh2) {
uint64x2_t result = vsha512hq_u64(hash_ed, hash_gf, kwh_kwh2);
}
// CHECK-LABEL: @test_vsha512h2(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512h2
//
void test_vsha512h2(uint64x2_t sum_ab, uint64x2_t hash_c_, uint64x2_t hash_ab) {
uint64x2_t result = vsha512h2q_u64(sum_ab, hash_c_, hash_ab);
}
// CHECK-LABEL: @test_vsha512su0(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512su0
//
void test_vsha512su0(uint64x2_t w0_1, uint64x2_t w2_) {
uint64x2_t result = vsha512su0q_u64(w0_1, w2_);
}
// CHECK-LABEL: @test_vsha512su1(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512su1
//
void test_vsha512su1(uint64x2_t s01_s02, uint64x2_t w14_15, uint64x2_t w9_10) {
uint64x2_t result = vsha512su1q_u64(s01_s02, w14_15, w9_10);
}
// CHECK-LABEL: @test_vrax1(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.rax1
//
void test_vrax1(uint64x2_t a, uint64x2_t b) {
uint64x2_t result = vrax1q_u64(a, b);
}
// CHECK-LABEL: @test_xar(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.xar
//
void test_xar(uint64x2_t a, uint64x2_t b) {
uint64x2_t result = vxarq_u64(a, b, 10);
}
// CHECK-LABEL: @test_vbcax_u8(
// CHECK: call <16 x i8> @llvm.aarch64.crypto.bcaxu.v16i8
//
void test_vbcax_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) {
uint8x16_t result = vbcaxq_u8(a, b, c);
}
// CHECK-LABEL: @test_vbcax_u16(
// CHECK: call <8 x i16> @llvm.aarch64.crypto.bcaxu.v8i16
//
void test_vbcax_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) {
uint16x8_t result = vbcaxq_u16(a, b, c);
}
// CHECK-LABEL: @test_vbcax_u32(
// CHECK: call <4 x i32> @llvm.aarch64.crypto.bcaxu.v4i32
//
void test_vbcax_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) {
uint32x4_t result = vbcaxq_u32(a, b, c);
}
// CHECK-LABEL: @test_vbcax_u64(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.bcaxu.v2i64
//
void test_vbcax_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) {
uint64x2_t result = vbcaxq_u64(a, b, c);
}
// CHECK-LABEL: @test_vbcax_s8(
// CHECK: call <16 x i8> @llvm.aarch64.crypto.bcaxs.v16i8
//
void test_vbcax_s8(int8x16_t a, int8x16_t b, int8x16_t c) {
int8x16_t result = vbcaxq_s8(a, b, c);
}
// CHECK-LABEL: @test_vbcax_s16(
// CHECK: call <8 x i16> @llvm.aarch64.crypto.bcaxs.v8i16
//
void test_vbcax_s16(int16x8_t a, int16x8_t b, int16x8_t c) {
int16x8_t result = vbcaxq_s16(a, b, c);
}
// CHECK-LABEL: @test_vbcax_s32(
// CHECK: call <4 x i32> @llvm.aarch64.crypto.bcaxs.v4i32
//
void test_vbcax_s32(int32x4_t a, int32x4_t b, int32x4_t c) {
int32x4_t result = vbcaxq_s32(a, b, c);
}
// CHECK-LABEL: @test_vbcax_s64(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.bcaxs.v2i64
//
void test_vbcax_s64(int64x2_t a, int64x2_t b, int64x2_t c) {
int64x2_t result = vbcaxq_s64(a, b, c);
}
// CHECK-LABEL: @test_veor3_u8(
// CHECK: call <16 x i8> @llvm.aarch64.crypto.eor3u.v16i8
//
void test_veor3_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) {
uint8x16_t result = veor3q_u8(a, b, c);
}
// CHECK-LABEL: @test_veor3_u16(
// CHECK: call <8 x i16> @llvm.aarch64.crypto.eor3u.v8i16
//
void test_veor3_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) {
uint16x8_t result = veor3q_u16(a, b, c);
}
// CHECK-LABEL: @test_veor3_u32(
// CHECK: call <4 x i32> @llvm.aarch64.crypto.eor3u.v4i32
//
void test_veor3_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) {
uint32x4_t result = veor3q_u32(a, b, c);
}
// CHECK-LABEL: @test_veor3_u64(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.eor3u.v2i64
//
void test_veor3_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) {
uint64x2_t result = veor3q_u64(a, b, c);
}
// CHECK-LABEL: @test_veor3_s8(
// CHECK: call <16 x i8> @llvm.aarch64.crypto.eor3s.v16i8
//
void test_veor3_s8(int8x16_t a, int8x16_t b, int8x16_t c) {
int8x16_t result = veor3q_s8(a, b, c);
}
// CHECK-LABEL: @test_veor3_s16(
// CHECK: call <8 x i16> @llvm.aarch64.crypto.eor3s.v8i16
//
void test_veor3_s16(int16x8_t a, int16x8_t b, int16x8_t c) {
int16x8_t result = veor3q_s16(a, b, c);
}
// CHECK-LABEL: @test_veor3_s32(
// CHECK: call <4 x i32> @llvm.aarch64.crypto.eor3s.v4i32
//
void test_veor3_s32(int32x4_t a, int32x4_t b, int32x4_t c) {
int32x4_t result = veor3q_s32(a, b, c);
}
// CHECK-LABEL: @test_veor3_s64(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.eor3s.v2i64
//
void test_veor3_s64(int64x2_t a, int64x2_t b, int64x2_t c) {
int64x2_t result = veor3q_s64(a, b, c);
}
| 2,667 |
24,939 | <gh_stars>1000+
import asyncio
from mitmproxy import ctx
class KeepServing:
def load(self, loader):
loader.add_option(
"keepserving", bool, False,
"""
Continue serving after client playback, server playback or file
read. This option is ignored by interactive tools, which always keep
serving.
"""
)
def keepgoing(self) -> bool:
checks = [
"readfile.reading",
"replay.client.count",
"replay.server.count",
]
return any([ctx.master.commands.call(c) for c in checks])
def shutdown(self): # pragma: no cover
ctx.master.shutdown()
async def watch(self):
while True:
await asyncio.sleep(0.1)
if not self.keepgoing():
self.shutdown()
def running(self):
opts = [
ctx.options.client_replay,
ctx.options.server_replay,
ctx.options.rfile,
]
if any(opts) and not ctx.options.keepserving:
asyncio.get_event_loop().create_task(self.watch())
| 548 |
695 | import time
import threading
class ProceedContainer:
tid_to_proceed = {
1: False,
2: False,
}
def exit_while_loop(tid):
ProceedContainer.tid_to_proceed[tid] = True
return 'ok'
def thread_func(tid):
while not ProceedContainer.tid_to_proceed[tid]: # The debugger should change the proceed to True to exit the loop.
time.sleep(.1)
if __name__ == '__main__':
threads = [
threading.Thread(target=thread_func, args=(1,)),
threading.Thread(target=thread_func, args=(2,)),
]
for t in threads:
t.start()
for t in threads:
t.join()
print('TEST SUCEEDED')
| 281 |
5,964 | <gh_stars>1000+
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 19.3 Boolean Objects
// ES6 #sec-boolean.prototype.tostring
TF_BUILTIN(BooleanPrototypeToString, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
Node* value = ToThisValue(context, receiver, PrimitiveType::kBoolean,
"Boolean.prototype.toString");
Node* result = LoadObjectField(value, Oddball::kToStringOffset);
Return(result);
}
// ES6 #sec-boolean.prototype.valueof
TF_BUILTIN(BooleanPrototypeValueOf, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
Node* result = ToThisValue(context, receiver, PrimitiveType::kBoolean,
"Boolean.prototype.valueOf");
Return(result);
}
} // namespace internal
} // namespace v8
| 438 |
3,651 | /*
*
* * Copyright 2015 OrientDB LTD (info(at)orientdb.com)
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* * For more information: http://www.orientdb.com
*
*/
package com.orientechnologies.orient.core.sql.parser;
import com.orientechnologies.orient.core.sql.OCommandSQLParsingException;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import org.junit.Assert;
import org.junit.Test;
/** Created by luigidellaquila on 02/07/15. */
public class OProjectionTest {
@Test
public void testIsExpand() throws ParseException {
OrientSql parser = getParserFor("select expand(foo) from V");
OSelectStatement stm = (OSelectStatement) parser.parse();
Assert.assertTrue(stm.getProjection().isExpand());
OrientSql parser2 = getParserFor("select foo from V");
OSelectStatement stm2 = (OSelectStatement) parser2.parse();
Assert.assertFalse(stm2.getProjection().isExpand());
OrientSql parser3 = getParserFor("select expand from V");
OSelectStatement stm3 = (OSelectStatement) parser3.parse();
Assert.assertFalse(stm3.getProjection().isExpand());
}
@Test
public void testValidate() throws ParseException {
OrientSql parser = getParserFor("select expand(foo) from V");
OSelectStatement stm = (OSelectStatement) parser.parse();
stm.getProjection().validate();
try {
getParserFor("select expand(foo), bar from V").parse();
;
Assert.fail();
} catch (OCommandSQLParsingException ex) {
} catch (Exception x) {
Assert.fail();
}
}
protected OrientSql getParserFor(String string) {
InputStream is = new ByteArrayInputStream(string.getBytes());
OrientSql osql = new OrientSql(is);
return osql;
}
}
| 770 |
1,375 | //
// VirtualBoxServiceProvider.h
// Vagrant Manager
//
// Copyright (c) 2014 Lanayo. All rights reserved.
//
#import "VirtualBoxMachineInfo.h"
#import "VirtualMachineServiceProvider.h"
@interface VirtualBoxServiceProvider : NSObject <VirtualMachineServiceProvider>
@end | 80 |
711 | <gh_stars>100-1000
/*
* Copyright 2015 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.apiman.gateway.test.junit.servlet;
import io.apiman.test.common.resttest.IGatewayTestServer;
import io.apiman.test.common.resttest.IGatewayTestServerFactory;
/**
* Factory used to create the servlet version of the gateway for testing purposes.
*
* @author <EMAIL>
*/
public class ServletGatewayTestServerFactory implements IGatewayTestServerFactory {
/**
* Constructor.
*/
public ServletGatewayTestServerFactory() {
}
/**
* @see io.apiman.test.common.resttest.IGatewayTestServerFactory#createGatewayTestServer()
*/
@Override
public IGatewayTestServer createGatewayTestServer() {
return new ServletGatewayTestServer();
}
}
| 404 |
640 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- encoding: utf8 -*-
import os, sys
print "I'm going to fork now - the child will write something to a pipe, and the parent will read it back"
r, w = os.pipe() # r,w是文件描述符, 不是文件对象
pid = os.fork()
if pid:
# 父进程
os.close(w) # 关闭一个文件描述符
r = os.fdopen(r) # 将r转化为文件对象
print "parent: reading"
txt = r.read()
os.waitpid(pid, 0) # 确保子进程被撤销
else:
# 子进程
os.close(r)
w = os.fdopen(w, 'w')
print "child: writing"
w.write("here's some text from the child")
w.close()
print "child: closing"
sys.exit(0)
print "parent: got it; text =", txt
| 393 |
367 | #include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <node_api.h>
#include "utils.h"
#define BUFFER_SIZE 512
#define VALUE_NAME_BUFFER_SIZE 64
#define OID_HEX_BUFFER_SIZE 64
#include "polodb.h"
#define STD_CALL(EXPR) \
ec = (EXPR); \
if (ec < 0) { \
napi_throw_type_error(env, NULL, PLDB_error_msg()); \
return NULL; \
}
#define countof(x) sizeof((x)) / sizeof((x)[0])
static napi_ref collection_object_ref;
static napi_ref objectid_ref;
static int check_type(napi_env env, napi_value value, napi_valuetype expected) {
napi_status status;
napi_valuetype actual;
status = napi_typeof(env, value, &actual);
assert(status == napi_ok);
return actual == expected;
}
static napi_value db_version(napi_env env, napi_callback_info info) {
static char buffer[BUFFER_SIZE];
memset(buffer, 0, BUFFER_SIZE);
PLDB_version(buffer, BUFFER_SIZE);
napi_status status;
napi_value world;
status = napi_create_string_utf8(env, buffer, strlen(buffer), &world);
assert(status == napi_ok);
return world;
}
#define CHECK_STAT2(stat) \
if ((stat) != napi_ok) { \
goto err; \
}
#define DECLARE_NAPI_METHOD(name, func) \
{ name, 0, func, 0, 0, 0, napi_default, 0 }
typedef struct {
uint32_t id;
uint32_t meta_version;
} InternalCollection;
InternalCollection* NewInternalCollection(Database* db) {
InternalCollection* collection = (InternalCollection*)malloc(sizeof(InternalCollection));
memset(collection, 0, sizeof(InternalCollection));
collection->id = 0;
collection->meta_version = 0;
return collection;
}
void InternalCollection_finalizer(napi_env env, void* finalize_data, void* finalize_hint) {
InternalCollection* internal_collection = (InternalCollection*)finalize_data;
free(internal_collection);
}
static napi_value Collection_constructor(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 4;
napi_value args[4];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
if (!check_type(env, args[0], napi_object)) {
napi_throw_type_error(env, NULL, "the first arg should be an object");
return NULL;
}
if (!check_type(env, args[1], napi_string)) {
napi_throw_type_error(env, NULL, "the second arg should be an object");
return NULL;
}
if (!check_type(env, args[2], napi_number)) {
napi_throw_type_error(env, NULL, "the third arg should be a number");
return NULL;
}
if (!check_type(env, args[3], napi_number)) {
napi_throw_type_error(env, NULL, "the forth arg should be a number");
return NULL;
}
Database* db = NULL;
status = napi_unwrap(env, args[0], (void**)&db);
CHECK_STAT(status);
napi_property_descriptor db_prop[] = {
{ "__db", 0, 0, 0, 0, args[0], napi_default, 0 },
{ "__name", 0, 0, 0, 0, args[1], napi_default, 0 },
{ "__id", 0, 0, 0, 0, args[2], napi_default, 0 },
{ "__metaVersion", 0, 0, 0, 0, args[3], napi_default, 0 },
{ NULL }
};
status = napi_define_properties(env, this_arg, 4, db_prop);
CHECK_STAT(status);
InternalCollection* internal_collection = NewInternalCollection(db);
status = napi_wrap(env, this_arg, internal_collection, InternalCollection_finalizer, 0, NULL);
CHECK_STAT(status);
status = napi_get_value_uint32(env, args[2], &internal_collection->id);
CHECK_STAT(status);
status = napi_get_value_uint32(env, args[3], &internal_collection->meta_version);
CHECK_STAT(status);
return this_arg;
}
static DbDocument* JsValueToDbDocument(napi_env env, napi_value value);
static DbArray* JsArrayValueToDbArray(napi_env env, napi_value value);
static napi_status JsArrayValueToDbArray_SetStringElement(napi_env env, DbArray* arr, unsigned int index, napi_value value) {
napi_status status;
size_t str_len = 0;
status = napi_get_value_string_utf8(env, value, NULL, 0, &str_len);
if (status != napi_ok) {
return status;
}
char* buffer = malloc(str_len + 1);
memset(buffer, 0, str_len + 1);
status = napi_get_value_string_utf8(env, value, buffer, str_len + 1, &str_len);
if (status != napi_ok) {
return status;
}
PLDBValue tmpValue = {
PLDB_VAL_STRING,
{
.str = buffer
}
};
int ec = PLDB_arr_set(arr, index, tmpValue);
if (ec < 0) {
free(buffer);
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
}
free(buffer);
return napi_ok;
}
static napi_status JsArrayValueToDbArray_SetArrayElement(napi_env env, DbArray* arr, unsigned int index, napi_value child_value) {
DbArray* child_arr = JsArrayValueToDbArray(env, child_value);
if (child_arr == NULL) {
return napi_generic_failure;
}
napi_status result = napi_ok;
PLDBValue tmp = {
PLDB_VAL_ARRAY,
{
.arr = child_arr,
}
};
if (PLDB_arr_set(arr, index, tmp) < 0) {
result = napi_throw_error(env, NULL, PLDB_error_msg());
}
PLDB_free_arr(child_arr);
return result;
}
static napi_status JsArrayValueToDbArray_SetUTCDateTime(napi_env env, DbArray* arr, unsigned int index, napi_value value) {
napi_status status;
int64_t utc_datetime = 0;
status = JsGetUTCDateTime(env, value, &utc_datetime);
CHECK_STAT(status);
PLDBValue tmp = {
PLDB_VAL_UTC_DATETIME,
{
.utc = utc_datetime,
}
};
int ec = PLDB_arr_set(arr, index, tmp);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
}
return napi_ok;
}
static napi_status JsArrayValueToDbArray_SetElement(napi_env env, DbArray* arr, unsigned int index, napi_value value) {
napi_status status;
napi_valuetype ty;
napi_value object_id_instance;
status = napi_get_reference_value(env, objectid_ref, &object_id_instance);
CHECK_STAT(status);
status = napi_typeof(env, value, &ty);
int64_t int_value = 0;
double float_value = 0;
bool bl_value = false;
int ec = 0;
switch (ty) {
case napi_undefined:
case napi_null: {
PLDBValue tmp = PLDB_NULL;
PLDB_arr_set(arr, index, tmp);
break;
}
case napi_string:
return JsArrayValueToDbArray_SetStringElement(env, arr, index, value);
case napi_boolean: {
status = napi_get_value_bool(env, value, &bl_value);
if (status != napi_ok) {
return status;
}
PLDBValue tmp = PLDB_BOOL(bl_value);
PLDB_arr_set(arr, index, tmp);
break;
}
case napi_number: {
ec = JsIsInteger(env, value);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
} else if (ec) {
status = napi_get_value_int64(env, value, &int_value);
if (status != napi_ok) return status;
PLDBValue tmp = PLDB_INT(int_value);
PLDB_arr_set(arr, index, tmp);
} else {
status = napi_get_value_double(env, value, &float_value);
if (status != napi_ok) return status;
PLDBValue tmp = PLDB_DOUBLE(float_value);
PLDB_arr_set(arr, index, tmp);
}
break;
}
case napi_object: {
bool is_array = false;
bool is_date = false;
status = JsIsArray(env, value, &is_array);
CHECK_STAT(status);
if (is_array) {
return JsArrayValueToDbArray_SetArrayElement(env, arr, index, value);
}
status = napi_is_date(env, value, &is_date);
if (napi_instanceof(env, value, object_id_instance, &bl_value)) {
DbObjectId* oid = NULL; // borrowed
status = napi_unwrap(env, value, (void**)&oid);
CHECK_STAT(status);
PLDBValue tmp = {
PLDB_VAL_OBJECT_ID,
{
.oid = oid
}
};
if (PLDB_arr_set(arr, index, tmp) < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
}
} else {
DbDocument* child_doc = JsValueToDbDocument(env, value);
if (child_doc == NULL) {
return napi_generic_failure;
}
PLDBValue tmp = {
PLDB_VAL_DOCUMENT,
{
.doc = child_doc
}
};
if (PLDB_arr_set(arr, index, tmp) < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
}
PLDB_free_doc(child_doc);
}
break;
}
default:
napi_throw_type_error(env, NULL, "unsupport object type");
return napi_generic_failure;
}
return napi_ok;
}
static DbArray* JsArrayValueToDbArray(napi_env env, napi_value value) {
napi_status status;
uint32_t arr_len = 0;
status = napi_get_array_length(env, value, &arr_len);
CHECK_STAT2(status);
DbArray* arr = PLDB_mk_arr_with_size(arr_len);
napi_value item_value;
for (uint32_t i = 0; i < arr_len; i++) {
status = napi_get_element(env, value, i, &item_value);
CHECK_STAT2(status);
status = JsArrayValueToDbArray_SetElement(env, arr, i, item_value);
CHECK_STAT2(status);
}
goto normal;
err:
PLDB_free_arr(arr);
return NULL;
normal:
return arr;
}
static napi_status JsStringValueToDbValue_SetStringProperty(napi_env env, DbDocument* doc, const char* key, napi_value value) {
napi_status status;
size_t str_len = 0;
status = napi_get_value_string_utf8(env, value, NULL, 0, &str_len);
if (status != napi_ok) {
return status;
}
char* buffer = malloc(str_len + 1);
memset(buffer, 0, str_len + 1);
status = napi_get_value_string_utf8(env, value, buffer, str_len + 1, &str_len);
if (status != napi_ok) {
return status;
}
PLDBValue tmp = {
PLDB_VAL_STRING,
{
.str = buffer
}
};
int ec = PLDB_doc_set(doc, key, tmp);
if (ec < 0) {
free(buffer);
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
}
free(buffer);
return napi_ok;
}
static napi_status JsStringValueToDbValue_SetArrayProperty(napi_env env, DbDocument* doc, const char* key, napi_value value) {
DbArray* arr = JsArrayValueToDbArray(env, value);
if (arr == NULL) {
return napi_generic_failure;
}
napi_status result = napi_ok;
PLDBValue tmp = {
PLDB_VAL_ARRAY,
{
.arr = arr
}
};
if (PLDB_doc_set(doc, key, tmp) < 0) {
result = napi_throw_error(env, NULL, PLDB_error_msg());
}
PLDB_free_arr(arr);
return napi_ok;
}
static napi_status JsStringValueToDbValue_SetUTCDateTime(napi_env env, DbDocument* doc, const char* key, napi_value value) {
napi_status status;
int64_t utc_datetime = 0;
status = JsGetUTCDateTime(env, value, &utc_datetime);
CHECK_STAT(status);
PLDBValue tmp = {
PLDB_VAL_UTC_DATETIME,
{
.utc = utc_datetime
}
};
int ec = PLDB_doc_set(doc, key, tmp);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
}
return napi_ok;
}
static napi_status JsValueToDbDocument_SetProperty(napi_env env, DbDocument* doc, const char* key, napi_value value) {
napi_status status;
napi_valuetype ty;
napi_value object_id_instance;
status = napi_get_reference_value(env, objectid_ref, &object_id_instance);
CHECK_STAT(status);
status = napi_typeof(env, value, &ty);
int64_t int_value = 0;
double float_value = 0;
bool bl_value = false;
int ec = 0;
switch (ty) {
case napi_undefined:
case napi_null: {
PLDBValue tmp = PLDB_NULL;
PLDB_doc_set(doc, key, tmp);
break;
}
case napi_string:
return JsStringValueToDbValue_SetStringProperty(env, doc, key, value);
case napi_boolean: {
status = napi_get_value_bool(env, value, &bl_value);
if (status != napi_ok) {
return status;
}
PLDBValue tmp = PLDB_BOOL(bl_value);
PLDB_doc_set(doc, key, tmp);
break;
}
case napi_number: {
ec = JsIsInteger(env, value);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
} else if (ec) {
status = napi_get_value_int64(env, value, &int_value);
if (status != napi_ok) return status;
PLDBValue tmp = PLDB_INT(int_value);
PLDB_doc_set(doc, key, tmp);
} else {
status = napi_get_value_double(env, value, &float_value);
if (status != napi_ok) return status;
PLDBValue tmp = PLDB_DOUBLE(float_value);
PLDB_doc_set(doc, key, tmp);
}
break;
}
case napi_object: {
bool is_array = false;
bool is_date = false;
status = JsIsArray(env, value, &is_array);
CHECK_STAT(status);
if (is_array) {
return JsStringValueToDbValue_SetArrayProperty(env, doc, key, value);
}
status = napi_is_date(env, value, &is_date);
CHECK_STAT(status);
if (is_date) {
return JsStringValueToDbValue_SetUTCDateTime(env, doc, key, value);
}
if (napi_instanceof(env, value, object_id_instance, &bl_value)) {
DbObjectId* oid = NULL; // borrowed
status = napi_unwrap(env, value, (void**)&oid);
CHECK_STAT(status);
PLDBValue tmp = {
PLDB_VAL_OBJECT_ID,
{
.oid = oid
}
};
if (PLDB_doc_set(doc, key, tmp) < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
}
} else {
DbDocument* child_doc = JsValueToDbDocument(env, value);
if (child_doc == NULL) {
return napi_generic_failure;
}
PLDBValue tmp = {
PLDB_VAL_DOCUMENT,
{
.doc = child_doc
}
};
if (PLDB_doc_set(doc, key, tmp) < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return napi_generic_failure;
}
PLDB_free_doc(child_doc);
}
break;
}
default:
napi_throw_type_error(env, NULL, "unsupport object type");
return napi_generic_failure;
}
return napi_ok;
}
static DbDocument* JsValueToDbDocument(napi_env env, napi_value value) {
napi_status status;
if (!check_type(env, value, napi_object)) {
napi_throw_type_error(env, NULL, "object expected");
return NULL;
}
DbDocument* doc = PLDB_mk_doc();
napi_value names_array;
status = napi_get_property_names(env, value, &names_array);
CHECK_STAT2(status);
uint32_t arr_len = 0;
status = napi_get_array_length(env, names_array, &arr_len);
CHECK_STAT2(status);
char name_buffer[512];
napi_value element_name;
napi_value element_value;
for (uint32_t i = 0; i < arr_len; i++) {
status = napi_get_element(env, names_array, i, &element_name);
CHECK_STAT2(status);
status = napi_get_property(env, value, element_name, &element_value);
CHECK_STAT2(status);
memset(name_buffer, 0, 512);
size_t size = 0;
status = napi_get_value_string_utf8(env, element_name, name_buffer, 512, &size);
CHECK_STAT2(status);
status = JsValueToDbDocument_SetProperty(env, doc, name_buffer, element_value);
if (status != napi_ok) {
goto err;
}
}
goto normal;
err:
PLDB_free_doc(doc);
return NULL;
normal:
return doc;
}
static napi_value DbValueToJsValue(napi_env env, PLDBValue value);
static napi_value DbDocumentToJsValue(napi_env env, DbDocument* doc) {
napi_status status;
napi_value result = 0;
status = napi_create_object(env, &result);
CHECK_STAT(status);
int ec = 0;
DbDocumentIter* iter = PLDB_doc_iter(doc);
static char buffer[BUFFER_SIZE];
memset(buffer, 0, BUFFER_SIZE);
PLDBValue item;
ec = PLDB_doc_iter_next(iter, buffer, BUFFER_SIZE, &item);
while (ec) {
napi_value item_value = DbValueToJsValue(env, item);
napi_property_descriptor prop = {
buffer,
NULL,
0, 0, 0,
item_value,
napi_default | napi_enumerable | napi_writable,
0
};
status = napi_define_properties(env, result, 1, &prop);
if (status != napi_ok) {
PLDB_free_value(item);
goto err;
}
memset(buffer, 0, BUFFER_SIZE);
PLDB_free_value(item);
ec = PLDB_doc_iter_next(iter, buffer, BUFFER_SIZE, &item);
}
goto normal;
err:
if (iter != NULL) {
PLDB_free_doc_iter(iter);
iter = NULL;
}
return NULL;
normal:
PLDB_free_doc_iter(iter);
return result;
}
static napi_value DbArrayToJsValue(napi_env env, DbArray* arr) {
napi_status status;
napi_value result = 0;
uint32_t len = PLDB_arr_len(arr);
status = napi_create_array_with_length(env, len, &result);
CHECK_STAT(status);
PLDBValue value_item = PLDB_NULL;
int ec = 0;
for (uint32_t i = 0; i < len; i++) {
ec = PLDB_arr_get(arr, i, &value_item);
if (ec < 0) {
napi_throw_error(env, NULL, "get element error #1");
return NULL;
}
napi_value js_item = DbValueToJsValue(env, value_item);
if (js_item == NULL) {
return NULL;
}
status = napi_set_element(env, result, i, js_item);
CHECK_STAT(status);
PLDB_free_value(value_item);
}
return result;
}
static napi_value DbValueToJsValue(napi_env env, PLDBValue value) {
napi_status status;
napi_value result = NULL;
int ec = 0;
int64_t long_value = 0;
switch (value.tag) {
case PLDB_VAL_NULL:
status = napi_get_undefined(env, &result);
CHECK_STAT(status);
return result;
case PLDB_VAL_DOUBL:
status = napi_create_double(env, value.v.double_value, &result);
CHECK_STAT(status);
return result;
case PLDB_VAL_BOOLEAN:
status = napi_get_boolean(env, value.v.bool_value ? true : false, &result);
CHECK_STAT(status);
return result;
case PLDB_VAL_INT:
status = napi_create_int64(env, value.v.int_value, &result);
CHECK_STAT(status);
return result;
case PLDB_VAL_STRING: {
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
const char* content = value.v.str;
result = NULL;
status = napi_create_string_utf8(env, content, NAPI_AUTO_LENGTH, &result);
return result;
}
case PLDB_VAL_DOCUMENT: {
result = DbDocumentToJsValue(env, value.v.doc);
return result;
}
case PLDB_VAL_ARRAY: {
result = DbArrayToJsValue(env, value.v.arr);
return result;
}
case PLDB_VAL_OBJECT_ID: {
napi_value objectid_ctor;
status = napi_get_reference_value(env, objectid_ref, &objectid_ctor);
CHECK_STAT(status);
DbObjectId* clone = PLDB_dup_object_id(value.v.oid);
napi_value oid_ext;
status = napi_create_external(env, clone, NULL, NULL, &oid_ext);
CHECK_STAT(status);
size_t argc = 1;
napi_value args[] = { oid_ext };
status = napi_new_instance(env, objectid_ctor, argc, args, &result);
CHECK_STAT(status);
return result;
}
case PLDB_VAL_UTC_DATETIME : {
return JsNewDate(env, value.v.utc);
}
default:
napi_throw_error(env, NULL, "Unknown DbValue type");
return NULL;
}
}
#define CHECK_DB_ALIVE(stat) \
if ((stat) != napi_ok) { \
napi_throw_error(env, NULL, "db has been closed"); \
return NULL; \
}
static napi_status get_db_from_js_collection(napi_env env, napi_value collection, Database** db) {
napi_status status;
napi_value js_db;
status = napi_get_named_property(env, collection, "__db", &js_db);
if (status != napi_ok) return status;
status = napi_unwrap(env, js_db, (void**)db);
return status;
}
static napi_value Collection_insert(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 1;
napi_value args[1];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
Database* db;
status = get_db_from_js_collection(env, this_arg, &db);
CHECK_DB_ALIVE(status);
InternalCollection* internal_collection;
status = napi_unwrap(env, this_arg, (void**)&internal_collection);
CHECK_STAT(status);
DbDocument* doc = JsValueToDbDocument(env, args[0]);
if (doc == NULL) {
return NULL;
}
napi_value result = 0;
int ec = 0;
ec = PLDB_insert(db, internal_collection->id, internal_collection->meta_version, doc);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
goto clean;
}
if (ec > 0) {
PLDBValue new_id = PLDB_NULL;
int ec2 = PLDB_doc_get(doc, "_id", &new_id);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
goto clean;
}
napi_value js_id = DbValueToJsValue(env, new_id);
if (js_id == NULL) {
goto clean;
}
status = napi_set_named_property(env, args[0], "_id", js_id);
if (status != napi_ok) {
napi_throw_error(env, NULL, "set '_id' for object failed");
goto clean;
}
PLDB_free_value(new_id);
}
clean:
PLDB_free_doc(doc);
return result;
}
static napi_value Collection_find(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 1;
napi_value args[1];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
Database* db;
status = get_db_from_js_collection(env, this_arg, &db);
CHECK_DB_ALIVE(status);
InternalCollection* internal_collection;
status = napi_unwrap(env, this_arg, (void**)&internal_collection);
CHECK_STAT(status);
DbDocument* query_doc;
napi_valuetype arg1_ty;
status = napi_typeof(env, args[0], &arg1_ty);
assert(status == napi_ok);
if (arg1_ty == napi_undefined) {
query_doc = NULL;
} else if (arg1_ty == napi_object) {
query_doc = JsValueToDbDocument(env, args[0]);
if (query_doc == NULL) {
return NULL;
}
}
int ec = 0;
DbHandle* handle = NULL;
ec = PLDB_find(db,
internal_collection->id,
internal_collection->meta_version,
query_doc,
&handle
);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
napi_value result;
status = napi_create_array(env, &result);
ec = PLDB_step(handle);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
uint32_t counter = 0;
int state = PLDB_handle_state(handle);
PLDBValue item = PLDB_NULL;
while (state == 2) {
PLDB_handle_get(handle, &item);
napi_value js_value = DbValueToJsValue(env, item);
if (js_value == NULL) {
PLDB_free_value(item);
goto err;
}
status = napi_set_element(env, result, counter, js_value);
if (status != napi_ok) {
PLDB_free_value(item);
goto err;
}
PLDB_free_value(item);
counter++;
ec = PLDB_step(handle);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
goto err;
}
state = PLDB_handle_state(handle);
}
goto normal;
err:
PLDB_free_handle(handle);
return NULL;
normal:
PLDB_free_handle(handle);
return result;
}
static napi_value Collection_find_one(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 1;
napi_value args[1];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
Database* db;
status = get_db_from_js_collection(env, this_arg, &db);
CHECK_DB_ALIVE(status);
InternalCollection* internal_collection;
status = napi_unwrap(env, this_arg, (void**)&internal_collection);
CHECK_STAT(status);
DbDocument* query_doc;
napi_valuetype arg1_ty;
status = napi_typeof(env, args[0], &arg1_ty);
assert(status == napi_ok);
if (arg1_ty == napi_undefined) {
query_doc = NULL;
} else if (arg1_ty == napi_object) {
query_doc = JsValueToDbDocument(env, args[0]);
if (query_doc == NULL) {
return NULL;
}
}
int ec = 0;
DbHandle* handle = NULL;
ec = PLDB_find(db,
internal_collection->id,
internal_collection->meta_version,
query_doc,
&handle
);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
napi_value result;
ec = PLDB_step(handle);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
uint32_t counter = 0;
int state = PLDB_handle_state(handle);
PLDBValue item = PLDB_NULL;
if (state == 2) {
PLDB_handle_get(handle, &item);
result = DbValueToJsValue(env, item);
if (result == NULL) {
PLDB_free_value(item);
goto err;
}
PLDB_free_value(item);
} else {
result = NULL;
}
goto normal;
err:
PLDB_close_and_free_handle(handle);
return NULL;
normal:
PLDB_close_and_free_handle(handle);
return result;
}
static napi_value Collection_count(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
status = napi_get_cb_info(env, info, NULL, NULL, &this_arg, NULL);
CHECK_STAT(status);
Database* db;
status = get_db_from_js_collection(env, this_arg, &db);
CHECK_DB_ALIVE(status);
InternalCollection* internal_collection;
status = napi_unwrap(env, this_arg, (void**)&internal_collection);
CHECK_STAT(status);
int64_t ec = PLDB_count(db,
internal_collection->id,
internal_collection->meta_version
);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
napi_value result;
status = napi_create_int64(env, ec, &result);
CHECK_STAT(status);
return result;
}
static napi_value Collection_update(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 2;
napi_value args[2];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
if (!check_type(env, args[0], napi_object)) {
napi_throw_type_error(env, NULL, "the first arg should be an object");
return NULL;
}
if (!check_type(env, args[1], napi_object)) {
napi_throw_type_error(env, NULL, "the second arg should be an object");
return NULL;
}
Database* db;
status = get_db_from_js_collection(env, this_arg, &db);
CHECK_DB_ALIVE(status);
InternalCollection* internal_collection;
status = napi_unwrap(env, this_arg, (void**)&internal_collection);
CHECK_STAT(status);
DbDocument* query_doc = NULL;
DbDocument* update_doc = NULL;
query_doc = JsValueToDbDocument(env, args[0]);
if (query_doc == 0) {
goto ret;
}
update_doc = JsValueToDbDocument(env, args[1]);
if (query_doc == 0) {
goto ret;
}
int ec = PLDB_update(db, internal_collection->id, internal_collection->meta_version, query_doc, update_doc);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
goto ret;
}
ret:
if (query_doc != NULL) {
PLDB_free_doc(query_doc);
query_doc = NULL;
}
if (update_doc != NULL) {
PLDB_free_doc(update_doc);
update_doc = NULL;
}
return NULL;
}
static napi_value Collection_delete(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 1;
napi_value args[1];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
if (!check_type(env, args[0], napi_object)) {
napi_throw_type_error(env, NULL, "the first arg should be an object");
return NULL;
}
Database* db;
status = get_db_from_js_collection(env, this_arg, &db);
CHECK_DB_ALIVE(status);
InternalCollection* internal_collection;
status = napi_unwrap(env, this_arg, (void**)&internal_collection);
CHECK_STAT(status);
DbDocument* query_doc = NULL;
query_doc = JsValueToDbDocument(env, args[0]);
if (query_doc == 0) {
goto ret;
}
int ec = PLDB_delete(db,
internal_collection->id,
internal_collection->meta_version,
query_doc
);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
goto ret;
}
ret:
if (query_doc != NULL) {
PLDB_free_doc(query_doc);
query_doc = NULL;
}
return NULL;
}
static napi_value Collection_delete_all(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
status = napi_get_cb_info(env, info, NULL, NULL, &this_arg, NULL);
CHECK_STAT(status);
Database* db;
status = get_db_from_js_collection(env, this_arg, &db);
CHECK_DB_ALIVE(status);
InternalCollection* internal_collection;
status = napi_unwrap(env, this_arg, (void**)&internal_collection);
CHECK_STAT(status);
int ec = PLDB_delete_all(db,
internal_collection->id,
internal_collection->meta_version
);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
return NULL;
}
static napi_value Collection_drop(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
status = napi_get_cb_info(env, info, NULL, NULL, &this_arg, NULL);
CHECK_STAT(status);
Database* db;
status = get_db_from_js_collection(env, this_arg, &db);
CHECK_DB_ALIVE(status);
InternalCollection* internal_collection;
status = napi_unwrap(env, this_arg, (void**)&internal_collection);
CHECK_STAT(status);
int ec = PLDB_drop(db,
internal_collection->id,
internal_collection->meta_version
);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
return NULL;
}
static napi_value Database_create_collection(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 1;
napi_value args[1];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
Database* db = NULL;
status = napi_unwrap(env, this_arg, (void*)&db);
CHECK_STAT(status);
if (!check_type(env, args[0], napi_string)) {
napi_throw_type_error(env, NULL, "The first argument should be a string");
return NULL;
}
static char path_buffer[BUFFER_SIZE];
memset(path_buffer, 0, BUFFER_SIZE);
size_t written_count = 0;
status = napi_get_value_string_utf8(env, args[0], path_buffer, BUFFER_SIZE, &written_count);
assert(status == napi_ok);
int ec = 0;
uint32_t col_id = 0;
uint32_t meta_version = 0;
STD_CALL(PLDB_create_collection(db, path_buffer, &col_id, &meta_version));
napi_value collection_ctor;
status = napi_get_reference_value(env, collection_object_ref, &collection_ctor);
CHECK_STAT(status);
napi_value js_col_id;
napi_value js_meta_version;
status = napi_create_uint32(env, col_id, &js_col_id);
CHECK_STAT(status);
status = napi_create_uint32(env, meta_version, &js_meta_version);
CHECK_STAT(status);
size_t arg_size = 4;
napi_value pass_args[] = { this_arg, args[0], js_col_id, js_meta_version };
napi_value result = NULL;;
status = napi_new_instance(env, collection_ctor, arg_size, pass_args, &result);
if (status == napi_generic_failure) {
return NULL;
}
CHECK_STAT(status);
return result;
}
static napi_value Database_collection(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 1;
napi_value args[1];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
Database* db = NULL;
status = napi_unwrap(env, this_arg, (void*)&db);
CHECK_STAT(status);
if (!check_type(env, args[0], napi_string)) {
napi_throw_type_error(env, NULL, "The first argument should be a string");
return NULL;
}
napi_value collection_ctor;
status = napi_get_reference_value(env, collection_object_ref, &collection_ctor);
CHECK_STAT(status);
static char name_buffer[BUFFER_SIZE];
memset(name_buffer, 0 , BUFFER_SIZE);
size_t value_len = 0;
status = napi_get_value_string_utf8(env, args[0], name_buffer, BUFFER_SIZE, &value_len);
CHECK_STAT(status);
uint32_t col_id = 0;
uint32_t meta_version = 0;
int ec = PLDB_get_collection_meta_by_name(db, name_buffer, &col_id, &meta_version);
if (ec < 0) {
if (ec == PLDB_ERR_COLLECTION_NOT_FOUND) {
int ec2 = PLDB_create_collection(db, name_buffer, &col_id, &meta_version);
if (ec2 < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
} else {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
}
napi_value js_col_id;
napi_value js_meta_version;
status = napi_create_uint32(env, col_id, &js_col_id);
CHECK_STAT(status);
status = napi_create_uint32(env, meta_version, &js_meta_version);
CHECK_STAT(status);
size_t arg_size = 4;
napi_value pass_args[] = { this_arg, args[0], js_col_id, js_meta_version };
napi_value result = NULL;;
status = napi_new_instance(env, collection_ctor, arg_size, pass_args, &result);
if (status == napi_generic_failure) { // an error is thrown
return NULL;
} else if (status != napi_ok) {
napi_throw_error(env, NULL, "new collection failed");
return NULL;
}
return result;
}
static napi_value Database_close(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
status = napi_get_cb_info(env, info, NULL, NULL, &this_arg, NULL);
CHECK_STAT(status);
Database* db;
status = napi_remove_wrap(env, this_arg, (void**)&db);
if (status != napi_ok) {
napi_throw_error(env, NULL, "database has been closed");
return NULL;
}
PLDB_close(db);
return NULL;
}
static napi_value Database_start_transaction(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
status = napi_get_cb_info(env, info, NULL, NULL, &this_arg, NULL);
CHECK_STAT(status);
Database* db = NULL;
status = napi_unwrap(env, this_arg, (void*)&db);
CHECK_STAT(status);
int ec = PLDB_start_transaction(db, PLDB_TRANS_AUTO);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
return NULL;
}
static napi_value Database_commit(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
status = napi_get_cb_info(env, info, NULL, NULL, &this_arg, NULL);
CHECK_STAT(status);
Database* db = NULL;
status = napi_unwrap(env, this_arg, (void*)&db);
CHECK_STAT(status);
int ec = PLDB_commit(db);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
return NULL;
}
static napi_value Database_rollback(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
status = napi_get_cb_info(env, info, NULL, NULL, &this_arg, NULL);
CHECK_STAT(status);
Database* db = NULL;
status = napi_unwrap(env, this_arg, (void*)&db);
CHECK_STAT(status);
int ec = PLDB_rollback(db);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
return NULL;
}
static void Database_finalize(napi_env env, void* finalize_data, void* finalize_hint) {
if (finalize_data == NULL) {
return;
}
PLDB_close((Database*)finalize_data);
}
static napi_value Database_constuctor(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 1;
napi_value args[1];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
if (!check_type(env, args[0], napi_string)) {
napi_throw_type_error(env, NULL, "The first argument should be a string");
return NULL;
}
static char path_buffer[BUFFER_SIZE];
memset(path_buffer, 0, BUFFER_SIZE);
size_t written_count = 0;
status = napi_get_value_string_utf8(env, args[0], path_buffer, BUFFER_SIZE, &written_count);
assert(status == napi_ok);
Database* db = PLDB_open(path_buffer);
if (db == NULL) {
napi_throw_type_error(env, NULL, PLDB_error_msg());
return NULL;
}
status = napi_wrap(env, this_arg, db, Database_finalize, 0, NULL);
CHECK_STAT(status);
return this_arg;
}
static void ObjectId_finalize(napi_env env, void* finalize_data, void* finalize_hint) {
PLDB_free_object_id((DbObjectId*)finalize_data);
}
static napi_value ObjectId_constructor(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
size_t argc = 1;
napi_value args[1];
status = napi_get_cb_info(env, info, &argc, args, &this_arg, NULL);
CHECK_STAT(status);
DbObjectId* oid = NULL;
status = napi_get_value_external(env, args[0], (void**)&oid);
if (oid == NULL) {
napi_throw_error(env, NULL, "internal error: data is NULL");
return NULL;
}
status = napi_wrap(env, this_arg, oid, ObjectId_finalize, NULL, NULL);
CHECK_STAT(status);
return this_arg;
}
static napi_value ObjectId_toString(napi_env env, napi_callback_info info) {
napi_status status;
napi_value this_arg;
status = napi_get_cb_info(env, info, NULL, NULL, &this_arg, NULL);
CHECK_STAT(status);
DbObjectId* oid = NULL;
status = napi_unwrap(env, this_arg, (void**)&oid);
CHECK_STAT(status);
static char buffer[16];
memset(buffer, 0, 16);
int ec = PLDB_object_id_to_hex(oid, buffer, 16);
if (ec < 0) {
napi_throw_error(env, NULL, PLDB_error_msg());
return NULL;
}
napi_value result;
status = napi_create_string_utf8(env, buffer, ec, &result);
CHECK_STAT(status);
return result;
}
static napi_status SetCallbackProp(napi_env env, napi_value exports, const char* key, napi_callback cb) {
napi_status status;
napi_property_descriptor desc = DECLARE_NAPI_METHOD(key, cb);
status = napi_define_properties(env, exports, 1, &desc);
return status;
}
static napi_value Database_Init(napi_env env, napi_value exports) {
napi_status status;
napi_value temp;
napi_create_int64(env, 100, &temp);
napi_property_descriptor db_props[] = {
DECLARE_NAPI_METHOD("createCollection", Database_create_collection),
DECLARE_NAPI_METHOD("collection", Database_collection),
DECLARE_NAPI_METHOD("close", Database_close),
DECLARE_NAPI_METHOD("startTransaction", Database_start_transaction),
DECLARE_NAPI_METHOD("commit", Database_commit),
DECLARE_NAPI_METHOD("rollback", Database_rollback),
};
size_t db_prop_size = countof(db_props);
napi_value db_result;
status = napi_define_class(
env,
"Database",
NAPI_AUTO_LENGTH,
Database_constuctor,
NULL,
db_prop_size,
db_props,
&db_result
);
CHECK_STAT(status);
status = napi_set_named_property(env, exports, "Database", db_result);
CHECK_STAT(status);
return exports;
}
static napi_value Collection_Init(napi_env env, napi_value exports) {
napi_status status;
napi_property_descriptor collection_props[] = {
DECLARE_NAPI_METHOD("insert", Collection_insert),
DECLARE_NAPI_METHOD("find", Collection_find),
DECLARE_NAPI_METHOD("findOne", Collection_find_one),
DECLARE_NAPI_METHOD("count", Collection_count),
DECLARE_NAPI_METHOD("update", Collection_update),
DECLARE_NAPI_METHOD("delete", Collection_delete),
DECLARE_NAPI_METHOD("deleteAll", Collection_delete_all),
DECLARE_NAPI_METHOD("drop", Collection_drop),
};
size_t collection_prop_size = countof(collection_props);
napi_value collection_result;
status = napi_define_class(
env,
"Collection",
NAPI_AUTO_LENGTH,
Collection_constructor,
NULL,
collection_prop_size,
collection_props,
&collection_result
);
CHECK_STAT(status);
status = napi_create_reference(
env,
collection_result,
1,
&collection_object_ref
);
CHECK_STAT(status);
return exports;
}
static napi_value ObjectId_Init(napi_env env, napi_value exports) {
napi_status status;
napi_property_descriptor objectid_props[] = {
DECLARE_NAPI_METHOD("toString", ObjectId_toString),
};
size_t objectid_prop_size = countof(objectid_props);
napi_value objectid_result;
status = napi_define_class(
env,
"ObjectId",
NAPI_AUTO_LENGTH,
ObjectId_constructor,
NULL,
objectid_prop_size,
objectid_props,
&objectid_result
);
CHECK_STAT(status);
status = napi_create_reference(env, objectid_result, 1, &objectid_ref);
CHECK_STAT(status);
status = napi_set_named_property(env, exports, "ObjectId", objectid_result);
CHECK_STAT(status);
return exports;
}
static napi_value Init(napi_env env, napi_value exports) {
napi_status status;
#define REGISTER_CALLBACK(NAME, FUN) \
status = SetCallbackProp(env, exports, NAME, FUN); \
assert(status == napi_ok)
REGISTER_CALLBACK("version", db_version);
exports = ObjectId_Init(env, exports);
if (exports == NULL) {
return NULL;
}
exports = Database_Init(env, exports);
if (exports == NULL) {
return NULL;
}
exports = Collection_Init(env, exports);
if (exports == NULL) {
return NULL;
}
return exports;
}
NAPI_MODULE(polodb, Init)
| 16,985 |
631 | /*
Copyright 2009-(CURRENT YEAR) <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app.controllers;
import app.services.Greeter;
import app.services.Redirector;
import app.services.RedirectorImpl;
import com.google.inject.Injector;
import org.javalite.activeweb.AppIntegrationSpec;
import org.javalite.activeweb.mocks.GreeterMock;
import org.junit.Before;
import org.junit.Test;
/**
* @author <NAME>
*/
public class Injection2ControllerSpec extends AppIntegrationSpec {
@Before
public void before(){
injector().bind(Greeter.class).to(GreeterMock.class)
.bind(Redirector.class).to(RedirectorImpl.class).create();
}
@Test
public void shouldInjectRealService(){
controller("injection").get("index");
a(responseContent()).shouldBeEqual("The greeting is: Hello from mock greeter");
}
}
| 440 |
1,313 | /*
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "hardware/flash.h"
#include "pico/unique_id.h"
static_assert(PICO_UNIQUE_BOARD_ID_SIZE_BYTES == FLASH_UNIQUE_ID_SIZE_BYTES, "Board ID size must match flash ID size");
static pico_unique_board_id_t retrieved_id;
static void __attribute__((constructor)) _retrieve_unique_id_on_boot(void) {
#if PICO_NO_FLASH
// The hardware_flash call will panic() if called directly on a NO_FLASH
// build. Since this constructor is pre-main it would be annoying to
// debug, so just produce something well-defined and obviously wrong.
for (int i = 0; i < PICO_UNIQUE_BOARD_ID_SIZE_BYTES; i++)
retrieved_id.id[i] = 0xee;
#else
flash_get_unique_id(retrieved_id.id);
#endif
}
void pico_get_unique_board_id(pico_unique_board_id_t *id_out) {
*id_out = retrieved_id;
}
void pico_get_unique_board_id_string(char *id_out, uint len) {
assert(len > 0);
size_t i;
// Generate hex one nibble at a time
for (i = 0; (i < len - 1) && (i < PICO_UNIQUE_BOARD_ID_SIZE_BYTES * 2); i++) {
int nibble = (retrieved_id.id[i/2] >> (4 - 4 * (i&1))) & 0xf;
id_out[i] = (char)(nibble < 10 ? nibble + '0' : nibble + 'A' - 10);
}
id_out[i] = 0;
}
| 551 |
2,151 | <reponame>rhencke/engine
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_APP_APPLICATION_DELEGATE_APP_STATE_H_
#define IOS_CHROME_APP_APPLICATION_DELEGATE_APP_STATE_H_
#import <UIKit/UIKit.h>
@protocol AppNavigation;
@protocol BrowserLauncher;
@protocol BrowserViewInformation;
@protocol TabOpening;
@protocol TabSwitching;
@protocol StartupInformation;
@class DeviceSharingManager;
@class MainApplicationDelegate;
@class MemoryWarningHelper;
@class MetricsMediator;
@class TabModel;
// Represents the application state and responds to application state changes
// and system events.
@interface AppState : NSObject
- (instancetype)init NS_UNAVAILABLE;
- (instancetype)
initWithBrowserLauncher:(id<BrowserLauncher>)browserLauncher
startupInformation:(id<StartupInformation>)startupInformation
applicationDelegate:(MainApplicationDelegate*)applicationDelegate
NS_DESIGNATED_INITIALIZER;
// YES if the user has ever interacted with the application. May be NO if the
// application has been woken up by the system for background work.
@property(nonatomic, readonly) BOOL userInteracted;
// Window for the application, it is not set during the initialization method.
// Set the property before calling methods related to it.
@property(nonatomic, weak) UIWindow* window;
// Saves the launchOptions to be used from -newTabFromLaunchOptions. If the
// application is in background, initialize the browser to basic. If not, launch
// the browser.
// Returns whether additional delegate handling should be performed (call to
// -performActionForShortcutItem or -openURL by the system for example)
- (BOOL)requiresHandlingAfterLaunchWithOptions:(NSDictionary*)launchOptions
stateBackground:(BOOL)stateBackground;
// Whether the application is in Safe Mode.
- (BOOL)isInSafeMode;
// Logs duration of the session in the main tab model and records that chrome is
// no longer in cold start.
- (void)willResignActiveTabModel;
// Called when the application is getting terminated. It stops all outgoing
// requests, config updates, clears the device sharing manager and stops the
// mainChrome instance.
- (void)applicationWillTerminate:(UIApplication*)application
applicationNavigation:(id<AppNavigation>)appNavigation;
// Resumes the session: reinitializing metrics and opening new tab if necessary.
// User sessions are defined in terms of BecomeActive/ResignActive so that
// session boundaries include things like turning the screen off or getting a
// phone call, not just switching apps.
- (void)resumeSessionWithTabOpener:(id<TabOpening>)tabOpener
tabSwitcher:(id<TabSwitching>)tabSwitcher;
// Called when going into the background. iOS already broadcasts, so
// stakeholders can register for it directly.
- (void)applicationDidEnterBackground:(UIApplication*)application
memoryHelper:(MemoryWarningHelper*)memoryHelper
tabSwitcherIsActive:(BOOL)tabSwitcherIsActive;
// Called when returning to the foreground. Resets and uploads the metrics.
// Starts the browser to foreground if needed.
- (void)applicationWillEnterForeground:(UIApplication*)application
metricsMediator:(MetricsMediator*)metricsMediator
memoryHelper:(MemoryWarningHelper*)memoryHelper
tabOpener:(id<TabOpening>)tabOpener
appNavigation:(id<AppNavigation>)appNavigation;
// Sets the return value for -didFinishLaunchingWithOptions that determines if
// UIKit should make followup delegate calls such as
// -performActionForShortcutItem or -openURL.
- (void)launchFromURLHandled:(BOOL)URLHandled;
@end
#endif // IOS_CHROME_APP_APPLICATION_DELEGATE_APP_STATE_H_
| 1,210 |
1,738 | <reponame>brianherrera/lumberyard<gh_stars>1000+
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#pragma once
#include <AzCore/std/smart_ptr/unique_ptr.h>
namespace physx
{
template <typename T>
struct PxPtrReleaser
{
void operator()(T* ptr)
{
if (ptr)
{
ptr->release();
}
}
};
template <typename T>
using unique_ptr = AZStd::unique_ptr<T, PxPtrReleaser<T>>;
}
| 359 |
809 | /**
* @file fpga.c
* @brief Load config to FPGA
* @author <NAME> <<EMAIL>>
* @version
* @date 28.01.2020
*/
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <drivers/fpga.h>
static void print_help(char **argv) {
printf("USAGE:\n");
printf("%s [load file_name | tftp file_name]\n", argv[0]);
}
#define FPGA_DEFAULT_ID 0
#define BUF_SZ (16 * 1024 * 1024)
static uint8_t buf[BUF_SZ];
int main(int argc, char **argv) {
struct fpga *fpga;
struct stat st;
int fd;
int err;
char *fname;
if (argc != 3) {
print_help(argv);
return 0;
}
fpga = fpga_by_id(FPGA_DEFAULT_ID);
if (fpga == NULL) {
printf("fpga%d not found\n", FPGA_DEFAULT_ID);
return 0;
}
fname = argv[2];
fd = open(fname, O_RDONLY);
if (fd < 0) {
printf("Failed to open %s\n", fname);
return -1;
}
err = fstat(fd, &st);
if (err < 0) {
printf("Failed to determine file size, exiting\n");
return -1;
} else if (st.st_size > BUF_SZ) {
printf("File size is to large,"
" need to be less than %d bytes"
"(%s is %lld bytes)\n",
BUF_SZ,
fname,
st.st_size);
return -1;
}
err = read(fd, buf, BUF_SZ);
if (err == 0) {
printf("Failed to read %s\n", fname);
return -1;
}
err = close(fd);
if (err != 0) {
printf("Warning: failed to close %s after read, "
"try to write config anyway\n",
fname);
}
err = fpga->ops->config_init(fpga);
if (err != 0) {
printf("FPGA config init failed with code %d! Exiting\n",
err);
return -1;
}
err = fpga->ops->config_write(fpga, buf, st.st_size);
if (err != 0) {
printf("FPGA config write failed with code %d! Exiting\n",
err);
return -1;
}
err = fpga->ops->config_complete(fpga);
if (err != 0) {
printf("FPGA config finish failed with code %d!\n",
err);
return -1;
}
return 0;
}
| 850 |
735 | <reponame>wilx/xstream
/*
* Copyright (C) 2004, 2005 <NAME>.
* Copyright (C) 2006, 2007, 2018 XStream Committers.
* All rights reserved.
*
* The software in this package is published under the terms of the BSD
* style license a copy of which has been included with this distribution in
* the LICENSE.txt file.
*
* Created on 03. September 2004 by <NAME>
*/
package com.thoughtworks.xstream.io.xml;
import nu.xom.Element;
public class XomWriterTest extends AbstractDocumentWriterTest<Element> {
@Override
protected void setUp() throws Exception {
super.setUp();
writer = new XomWriter();
}
@Override
protected DocumentReader createDocumentReaderFor(final Element node) {
return new XomReader(node);
}
// inherits tests from superclass
}
| 256 |
1,607 | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2022 the original author or authors.
*/
package org.assertj.core.internal.bigdecimals;
import static org.assertj.core.api.BDDAssertions.then;
import static org.assertj.core.error.ShouldBeLessOrEqual.shouldBeLessOrEqual;
import static org.assertj.core.test.TestData.someInfo;
import static org.assertj.core.util.AssertionsUtil.expectAssertionError;
import java.math.BigDecimal;
import org.assertj.core.internal.BigDecimalsBaseTest;
import org.junit.jupiter.api.Test;
/**
* Tests for <code>{@link BigDecimals#assertIsNotPositive(AssertionInfo, BigDecimal))}</code>.
*
* @author <NAME>
*/
class BigDecimals_assertIsNotPositive_Test extends BigDecimalsBaseTest {
@Test
void should_succeed_since_actual_is_not_positive() {
numbers.assertIsNotPositive(someInfo(), new BigDecimal(-6));
}
@Test
void should_succeed_since_actual_is_zero() {
numbers.assertIsNotPositive(someInfo(), BigDecimal.ZERO);
}
@Test
void should_fail_since_actual_is_positive() {
// WHEN
AssertionError assertionError = expectAssertionError(() -> numbers.assertIsNotPositive(someInfo(), new BigDecimal(6)));
// THEN
then(assertionError).hasMessage(shouldBeLessOrEqual(new BigDecimal(6), BigDecimal.ZERO).create());
}
@Test
void should_fail_since_actual_can_be_positive_according_to_custom_comparison_strategy() {
// WHEN
AssertionError assertionError = expectAssertionError(() -> numbersWithAbsValueComparisonStrategy.assertIsNotPositive(someInfo(), new BigDecimal(-1)));
// THEN
then(assertionError).hasMessage(shouldBeLessOrEqual(new BigDecimal(-1), BigDecimal.ZERO, absValueComparisonStrategy).create());
}
@Test
void should_fail_since_actual_is_positive_according_to_custom_comparison_strategy() {
// WHEN
AssertionError assertionError = expectAssertionError(() -> numbersWithAbsValueComparisonStrategy.assertIsNotPositive(someInfo(), BigDecimal.ONE));
// THEN
then(assertionError).hasMessage(shouldBeLessOrEqual(BigDecimal.ONE, BigDecimal.ZERO, absValueComparisonStrategy).create());
}
}
| 849 |
335 | /*
* Copyright 2019-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.vividus.bdd.steps.ui.web;
import java.util.List;
import org.jbehave.core.annotations.When;
import org.openqa.selenium.WebElement;
import org.vividus.bdd.monitor.TakeScreenshotOnFailure;
import org.vividus.bdd.steps.ui.validation.IBaseValidations;
import org.vividus.selenium.element.Checkbox;
import org.vividus.ui.action.search.Locator;
import org.vividus.ui.web.action.CheckboxAction;
import org.vividus.ui.web.action.IMouseActions;
@TakeScreenshotOnFailure
public class CheckboxSteps
{
private static final String CHECKBOX = "Checkbox";
private final IBaseValidations baseValidations;
private final IMouseActions mouseActions;
public CheckboxSteps(IBaseValidations baseValidations, IMouseActions mouseActions)
{
this.baseValidations = baseValidations;
this.mouseActions = mouseActions;
}
/**
* Performs action on all the checkboxes found by locator
* @param checkBoxAction Actions to be performed (CHECK, UNCHECK)
* @param checkboxesLocator Locator to locate checkboxes
*/
@When("I $checkboxAction all checkboxes located by `$checkboxesLocator`")
public void changeStateOfAllCheckboxes(CheckboxAction checkBoxAction, Locator checkboxesLocator)
{
List<WebElement> checkboxes = baseValidations.assertIfElementsExist("Checkboxes", checkboxesLocator);
checkboxes.stream().map(this::createCheckbox).forEach(checkbox -> changeCheckboxState(checkbox,
checkBoxAction));
}
/**
* Performs action on checkbox found by locator
* @param checkBoxAction Actions to be performed (CHECK, UNCHECK)
* @param checkboxLocator Locator to locate checkboxes
*/
@When("I $checkBoxAction checkbox located by `$checkboxLocator`")
public void changeStateOfCheckbox(CheckboxAction checkBoxAction, Locator checkboxLocator)
{
WebElement checkboxElement = baseValidations.assertIfElementExists(CHECKBOX, checkboxLocator);
Checkbox checkbox = createCheckbox(checkboxElement);
changeCheckboxState(checkbox, checkBoxAction);
}
private Checkbox createCheckbox(WebElement checkbox)
{
return checkbox == null || checkbox instanceof Checkbox ? (Checkbox) checkbox : new Checkbox(checkbox);
}
private void changeCheckboxState(Checkbox checkbox, CheckboxAction action)
{
if (checkbox != null && checkbox.getWrappedElement() != null && checkbox.isSelected() != action.isSelected())
{
WebElement elementToClick = getClickableElement(checkbox);
mouseActions.click(elementToClick);
}
}
private WebElement getClickableElement(Checkbox checkbox)
{
return checkbox.isDisplayed() ? checkbox : checkbox.getLabelElement();
}
}
| 1,130 |
435 | <filename>pycon-us-2016/videos/guillermo-perez-python-as-a-configuration-language-pycon-2016.json
{
"copyright_text": "Standard YouTube License",
"description": "Speaker: <NAME>\u00e9rez\n\nThe enormous size of Facebook's infrastructure and its \"\"Move Fast\"\" culture pose a challenge for the configuration framework, requiring a safe, fast and usable system to deploy settings to hundreds of thousands of servers.\n\nPython plays a special role in this framework, being the tool for generating the configuration, paired with thrift for enhanced schema validation and type-safety.\n\nSlides can be found at: https://speakerdeck.com/pycon2016 and https://github.com/PyCon/2016-slides",
"duration": 1829,
"id": 5107,
"language": "eng",
"recorded": "2016-05-30",
"related_urls": [
"https://github.com/PyCon/2016-slides",
"https://speakerdeck.com/pycon2016"
],
"slug": "guillermo-perez-python-as-a-configuration-language-pycon-2016",
"speakers": [
"<NAME>"
],
"tags": [],
"thumbnail_url": "https://i.ytimg.com/vi/cLWUD9VnLmA/maxresdefault.jpg",
"title": "Python as a configuration language",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=cLWUD9VnLmA"
}
]
}
| 438 |
371 | <reponame>wuchunfu/EngineX
package com.risk.riskmanage.system.model.request;
import lombok.Data;
@Data
public class LoginInfoParam {
private String account;// 账号
private String password;// 密码
}
| 80 |
357 | <reponame>ChangXiaoning/jalangi2
# Copyright 2013 Samsung Information Systems America, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
from subprocess import call
from os.path import exists
import shutil
import os
from time import sleep
from sys import platform
import zipfile
from urllib import urlretrieve
def npm_install():
print "---> installing node modules"
if os.system(" ".join(['npm', 'install'])) != 0:
print "npm install failed"
exit(1)
def call_fail(l):
if call(l) != 0:
print "{} failed".format(" ".join(l))
exit(1)
def del_dir(d):
if not exists(d):
return
if platform == "win32":
res = os.system('rmdir /q /s {}'.format(d))
else:
res = os.system('rm -rf {}'.format(d))
if res != 0:
print "failed to delete directory {}".format(res)
exit(1)
if exists("node_modules"):
shutil.rmtree("node_modules")
npm_install()
#urlretrieve("https://raw.github.com/Constellation/escodegen/1.1.0/escodegen.browser.js","node_modules/escodegen/escodegen.browser.js");
print "---> Installation successful."
print "---> run \'npm test\' to make sure all tests pass"
| 594 |
318 | <filename>sys/winnt/win32api.h
/* SCCS Id: @(#)win32api.h 3.4 $Date: 2002/07/24 08:25:21 $ */
/* Copyright (c) NetHack PC Development Team 1996 */
/* NetHack may be freely redistributed. See license for details. */
/*
* This header file is used to clear up some discrepencies with Visual C
* header files & NetHack before including windows.h, so all NetHack
* files should include "win32api.h" rather than <windows.h>.
*/
# if defined(_MSC_VER)
# undef strcmpi
# undef min
# undef max
# pragma warning(disable:4142) /* Warning, Benign redefinition of type */
# pragma pack(8)
# endif
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <commctrl.h>
# if defined(_MSC_VER)
# pragma pack()
# endif
/*win32api.h*/
| 279 |
453 | <filename>libc/newlib/libc/sys/sysvi386/dup2.c
#include <fcntl.h>
int
dup2 (int fd1, int fd2) {
close (fd2); /* ignore errors, if any */
return (fcntl (fd1, F_DUPFD, fd2));
}
| 91 |
403 | package io.craft.atom.rpc;
import io.craft.atom.io.Channel;
import io.craft.atom.io.IoHandler;
/**
* @author mindwind
* @version 1.0, Sep 22, 2014
*/
public abstract class RpcIoHandler implements IoHandler {
static final String RPC_CHANNEL = "rpc.channel";
@Override
public void channelOpened(Channel<byte[]> channel) {}
@Override
public void channelClosed(Channel<byte[]> channel) {}
@Override
public void channelIdle(Channel<byte[]> channel) {}
@Override
public void channelRead(Channel<byte[]> channel, byte[] bytes) {}
@Override
public void channelFlush(Channel<byte[]> channel, byte[] bytes) {}
@Override
public void channelWritten(Channel<byte[]> channel, byte[] bytes) {}
@Override
public void channelThrown(Channel<byte[]> channel, Exception cause) {}
}
| 252 |
389 | <reponame>kschoos/vivado-library
/******************************************************************************/
/* */
/* PmodCLS.c -- PmodCLS Example Projects */
/* */
/******************************************************************************/
/* Author: <NAME> */
/* Copyright 2016, Digilent Inc. */
/******************************************************************************/
/* File Description: */
/* */
/* This file contains code for running a demonstration of the PmodCLS when */
/* used with the PmodCLS IP core. */
/* */
/******************************************************************************/
/* Revision History: */
/* */
/* 06/15/2016(MikelSkreen): Created */
/* 08/17/2017(jPeyron): Updated */
/* 11/03/2017(artvvb): 2016.4 Maintenance */
/* 02/12/2018(atangzwj): Validated for Vivado 2017.4 */
/* */
/******************************************************************************/
/***************************** Include Files ****************************/
#include "PmodCLS.h"
/************************** Function Definitions ************************/
XSpi_Config CLSConfig =
{
0,
0,
1,
0,
1,
8,
0,
0,
0,
0,
0
};
/* --------------------------------------------------------------------*/
/** CLS_DisplayMode
**
** Parameters:
** charNumber - parameter for selecting the wrapping type of the line: to 16
** or 40 characters
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** This function wraps the line at 16 or 40 characters
-----------------------------------------------------------------------*/
void CLS_DisplayMode(PmodCLS *InstancePtr, uint8_t charNumber) {
uint8_t dispMode16[] = {CLS_ESC, CLS_BRACKET, '0', CLS_DISP_MODE_CMD};
uint8_t dispMode40[] = {CLS_ESC, CLS_BRACKET, '1', CLS_DISP_MODE_CMD};
if (charNumber == 1) {
CLS_WriteSpi(InstancePtr, dispMode16, 4); // Wrap line at 16 characters
} else {
CLS_WriteSpi(InstancePtr, dispMode40, 4); // Wrap line at 40 characters
}
}
/* --------------------------------------------------------------------*/
/** CLS_CursorModeSet
**
** Parameters:
** setCursor - boolean parameter through which the cursor is set on or off
** setBlink - boolean parameter through which the blink option is set on or
** off
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** This function turns the cursor and the blinking option on or off,
** according to the user's selection.
-----------------------------------------------------------------------*/
void CLS_CursorModeSet(PmodCLS *InstancePtr, uint8_t setCursor,
uint8_t setBlink) {
uint8_t cursorOff[] = {CLS_ESC, CLS_BRACKET, '0', CLS_CURSOR_MODE_CMD};
uint8_t cursorOnBlinkOff[] =
{CLS_ESC, CLS_BRACKET, '1', CLS_CURSOR_MODE_CMD};
uint8_t cursorBlinkOn[] = {CLS_ESC, CLS_BRACKET, '2', CLS_CURSOR_MODE_CMD};
if (setCursor == 0) {
// Send the command for both display and blink off
CLS_WriteSpi(InstancePtr, cursorOff, 4);
} else if ((setCursor == 1) && (setBlink == 0)) {
// Send the command for display on and blink off
CLS_WriteSpi(InstancePtr, cursorOnBlinkOff, 4);
} else {
// Send the command for display and blink on
CLS_WriteSpi(InstancePtr, cursorBlinkOn, 4);
}
}
/* ------------------------------------------------------------------- */
/** CLS_DisplayClear
**
** Parameters:
** None
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** This function clears the display and returns the cursor home
-----------------------------------------------------------------------*/
void CLS_DisplayClear(PmodCLS *InstancePtr) {
u8 dispClr[] = {CLS_ESC, CLS_BRACKET, '0', CLS_DISP_CLR_CMD};
// Clear the display and returns the cursor home
CLS_WriteSpi(InstancePtr, dispClr, 4);
}
/*---------------------------------------------------------------------*/
/** void CLS_begin(PmodCLS *InstancePtr, u32 SPI_Address)
**
** Parameters:
** InstancePtr: A PmodCLS object to start
** SPI_Address: The Base address of the PmodCLS SPI
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** Initialize the PmodCLS.
-----------------------------------------------------------------------*/
void CLS_begin(PmodCLS *InstancePtr, u32 SPI_Address) {
CLSConfig.BaseAddress = SPI_Address;
CLS_SPIInit(&InstancePtr->CLSSpi);
}
/*---------------------------------------------------------------------*/
/** CLS_end(void)
**
** Parameters:
** InstancePtr - PmodCLS object to stop
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** Stops the device
-----------------------------------------------------------------------*/
void CLS_end(PmodCLS *InstancePtr) {
XSpi_Stop(&InstancePtr->CLSSpi);
}
/*---------------------------------------------------------------------*/
/** CLS_SPIInit
**
** Parameters:
** InstancePtr - the PmodCLS object to communicate with
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** Initializes the PmodCLS SPI.
-----------------------------------------------------------------------*/
int CLS_SPIInit(XSpi *SpiInstancePtr) {
int Status;
Status = XSpi_CfgInitialize(SpiInstancePtr, &CLSConfig,
CLSConfig.BaseAddress);
if (Status != XST_SUCCESS) {
return XST_FAILURE;
}
u32 options = XSP_MASTER_OPTION | XSP_MANUAL_SSELECT_OPTION;
Status = XSpi_SetOptions(SpiInstancePtr, options);
if (Status != XST_SUCCESS) {
return XST_FAILURE;
}
Status = XSpi_SetSlaveSelect(SpiInstancePtr, 1);
if (Status != XST_SUCCESS) {
return XST_FAILURE;
}
/*
* Start the SPI driver so that the device is enabled.
*/
XSpi_Start(SpiInstancePtr);
/*
* Disable Global interrupt to use polled mode operation
*/
XSpi_IntrGlobalDisable(SpiInstancePtr);
return XST_SUCCESS;
}
/* ------------------------------------------------------------ */
/** CLS_Readbyte
**
** Parameters:
** InstancePtr - the PmodCLS object to communicate with
**
** Return Value:
** byte - Byte read from XSpi
**
** Errors:
** None
**
** Description:
** Reads a single byte over SPI
-----------------------------------------------------------------------*/
u8 CLS_ReadByte(PmodCLS *InstancePtr) {
u8 byte;
XSpi_Transfer(&InstancePtr->CLSSpi, &byte, &byte, 1);
return byte;
}
/* ------------------------------------------------------------ */
/** CLS_WriteByte
**
** Parameters:
** InstancePtr - PmodCLS object to send to
** cmd - Command to send
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** Writes a single byte over SPI
-----------------------------------------------------------------------*/
void CLS_WriteByte(PmodCLS *InstancePtr, u8 cmd) {
XSpi_Transfer(&InstancePtr->CLSSpi, &cmd, NULL, 1);
}
/* ------------------------------------------------------------ */
/** CLS_WriteSpi
**
** Parameters:
** PmodCLS *InstancePtr - the PmodCLS object to communicate with
** u8 reg - the starting register to write to
** u8 *wData - the data to write
** int nData - the number of data bytes to write
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** Writes the byte array to the chip via SPI.
-----------------------------------------------------------------------*/
void CLS_WriteSpi(PmodCLS *InstancePtr, u8 *wData, int nData) {
XSpi_Transfer(&InstancePtr->CLSSpi, wData, 0, nData);
}
/* --------------------------------------------------------------------*/
/** uint8_t CLS_WriteStringAtPos
**
** Parameters:
** idxLine - the line where the string is written: 0 or 1
** idxPos - the start column for the string to be written:0 to 39
** strLn - the string to be written
**
** Return Value:
** u8 - CLS_LCDS_ERR_SUCCESS - The action completed successfully
** - a combination of the following errors(OR-ed):
** - CLS_LCDS_ERR_ARG_COL_RANGE - The argument is not within 0, 39 range
** - CLS_LCDS_ERR_ARG_ROW_RANGE - The argument is not within 0, 2 range
**
** Errors:
** see returned values
**
** Description:
** This function writes a string at a specified position
**
-----------------------------------------------------------------------*/
u8 CLS_WriteStringAtPos(PmodCLS *InstancePtr, uint8_t idxRow, uint8_t idxCol,
char *strLn) {
uint8_t bResult = CLS_LCDS_ERR_SUCCESS;
if (idxRow > 2) {
bResult |= CLS_LCDS_ERR_ARG_ROW_RANGE;
}
if (idxCol > 39) {
bResult |= CLS_LCDS_ERR_ARG_COL_RANGE;
}
if (bResult == CLS_LCDS_ERR_SUCCESS) {
// Separate the position digits in order to send them, useful when the
// position is greater than 10
uint8_t firstDigit = idxCol % 10;
uint8_t secondDigit = idxCol / 10;
uint8_t length = strlen(strLn);
uint8_t lengthToPrint = length + idxCol;
uint8_t stringToSend[] = { CLS_ESC, CLS_BRACKET, idxRow + '0', ';',
secondDigit + '0', firstDigit + '0', CLS_CURSOR_POS_CMD };
if (lengthToPrint > 40) {
// Truncate the length of the string
// If it's greater than the positions number of a line
length = 40 - idxCol;
}
CLS_WriteSpi(InstancePtr, (u8*) stringToSend, 7);
CLS_WriteSpi(InstancePtr, (u8*) strLn, length);
}
return bResult;
}
/*---------------------------------------------------------------------*/
/** CLS_ReadSpi
**
** Synopsis:
** CLS_ReadSpi(&CLSobj, 0x3A, &bytearray, 5);
**
** Parameters:
** PmodCLS *InstancePtr - the PmodCLS object to communicate with
** u8 reg - the starting register to read from
** u8 *rData - the byte array to read into
** int nData - the number of data bytes to read
**
** Return Value:
** None
**
** Errors:
** None
**
** Description:
** Reads data in through SPI. Data is stored into rData.
-----------------------------------------------------------------------*/
void CLS_ReadSpi(PmodCLS *InstancePtr, u8 *rData, int nData) {
u8 bytearray[nData];
XSpi_Transfer(&InstancePtr->CLSSpi, bytearray, bytearray, nData);
memcpy(rData, &bytearray[0], nData);
}
| 4,508 |
369 | {
"externalDevices": {
"/AfterglowAx1Device": {
"deviceName": "microsoft0",
"server": "localhost:3884",
"descriptor": "external-devices/device-descriptors/Afterglow_AX1.json"
}
}
}
| 123 |
348 | {"nom":"Vero","circ":"1ère circonscription","dpt":"Corse-du-Sud","inscrits":377,"abs":222,"votants":155,"blancs":2,"nuls":1,"exp":152,"res":[{"nuance":"REG","nom":"<NAME>","voix":50},{"nuance":"LR","nom":"<NAME>","voix":38},{"nuance":"REM","nom":"Mme <NAME>","voix":27},{"nuance":"EXD","nom":"M. <NAME>","voix":14},{"nuance":"FI","nom":"M. <NAME>","voix":8},{"nuance":"EXD","nom":"Mme <NAME>","voix":7},{"nuance":"COM","nom":"Mme <NAME>","voix":7},{"nuance":"EXG","nom":"Mme <NAME>","voix":1},{"nuance":"DIV","nom":"M. <NAME>","voix":0},{"nuance":"DIV","nom":"M. <NAME>","voix":0},{"nuance":"DIV","nom":"M. <NAME>","voix":0}]} | 256 |
12,252 | <filename>testsuite/performance/tests/src/main/java/org/keycloak/performance/iteration/RandomIterator.java<gh_stars>1000+
package org.keycloak.performance.iteration;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
/**
*
* @author tkyjovsk
*/
public class RandomIterator<T> implements Iterator<T> {
List<T> list;
public RandomIterator(List<T> iteratedList) {
this.list = iteratedList;
}
@Override
public boolean hasNext() {
return true;
}
@Override
public T next() {
return list.get(ThreadLocalRandom.current().nextInt(list.size()));
}
}
| 244 |
379 | import numpy as np
from skrf_qtwidgets.analyzers import base_analyzer
import os
import skrf
from skrf_qtwidgets.cfg import example_data_dir
class DummyResource(object):
def __init__(self, *args, **kwargs):
pass
def close(self):
pass
class Analyzer(base_analyzer.Analyzer):
DEFAULT_VISA_ADDRESS = "GPIB0::16::INSTR"
NAME = "Analyzer"
NPORTS = 2
def __init__(self, resource):
self.resource = DummyResource()
def measure_twoport_ntwk(self, ports=(1, 2), sweep=True, **kwargs):
# return self.get_twoport(ports, sweep=sweep)
return self.get_snp_network(ports)
def measure_oneport_ntwk(self, port=1, sweep=True, **kwargs):
# return self.get_oneport(port, sweep=sweep)
return self.get_snp_network(port)
def get_snp_network(self, ports, **kwargs):
ntwk = skrf.Network(os.path.join(example_data_dir, 'ring slot array simulation.s2p'))
if type(ports) == int:
ports = [ports]
if len(ports) == 1:
port = ports[0]
if port == 1:
return ntwk.s11
elif port == 2:
return ntwk.s22
else:
return None
elif len(ports) == 2:
port1, port2 = ports
if port1 == 1 and port2 == 2:
return ntwk
elif port2 == 2 and port1 == 1:
s = np.zeros_like(ntwk.s)
s[:, 0, 0] = ntwk.s[:, 1, 1]
s[:, 1, 0] = ntwk.s[:, 1, 0]
s[:, 0, 1] = ntwk.s[:, 0, 1]
s[:, 1, 1] = ntwk.s[:, 0, 0]
else:
raise Exception("ports must be 1 or 2 or 1,2 or 2,1")
else:
raise Exception("ports must be 1 or 2 or 1,2 or 2,1")
def get_switch_terms(self, ports=(1, 2), sweep=True):
return self.get_switch_terms(ports)
def get_list_of_traces(self, channel=None):
traces = [
{"label": "S11"},
{"label": "S21"},
{"label": "S12"},
{"label": "S22"},
]
return traces
def get_traces(self, traces=[], channel=None, sweep=False, **kwargs):
name_prefix = kwargs.get('name_prefix', "")
if name_prefix:
name_prefix += " - "
ntwk = skrf.Network(os.path.join(example_data_dir, 'ring slot array simulation.s2p'))
if not traces:
return
result = []
for trace in traces:
if trace["label"] in ["S11", "S21", "S12", "S22"]:
t = getattr(ntwk, trace["label"].lower())
t.name = name_prefix + trace["label"]
result.append(t)
return result
| 1,400 |
852 | import FWCore.ParameterSet.Config as cms
unpackedPatTrigger = cms.EDProducer(
"PATTriggerObjectStandAloneUnpacker"
, patTriggerObjectsStandAlone = cms.InputTag( 'selectedPatTrigger' )
, triggerResults = cms.InputTag( 'TriggerResults::HLT' )
)
| 96 |
495 | <gh_stars>100-1000
// Copyright (c) 2017-2021 Dr. <NAME> and <NAME>
// Please see LICENSE for license or visit https://github.com/taocpp/json/
#ifndef TAO_JSON_INTERNAL_HEXDUMP_HPP
#define TAO_JSON_INTERNAL_HEXDUMP_HPP
#include <sstream>
#include <string>
#include <utility>
namespace tao::json::internal
{
template< typename T >
void hexdump( std::ostream& os, const T& v )
{
static const char h[] = "0123456789ABCDEF";
for( const auto b : v ) {
os.put( h[ static_cast< unsigned char >( b ) >> 4 ] );
os.put( h[ static_cast< unsigned char >( b ) & 0xF ] );
}
}
template< typename T >
[[nodiscard]] std::string hexdump( const T& v )
{
std::ostringstream oss;
internal::hexdump( oss, v );
return std::move( oss ).str();
}
} // namespace tao::json::internal
#endif
| 363 |
1,281 | <filename>pytorch_toolbelt/zoo/__init__.py
from .classification import *
from .segmentation import *
| 34 |
5,169 | {
"name": "OCHuds",
"version": "1.3.0",
"summary": "A bunch of huds.",
"description": "A bunch of huds: Spinner, Progress, Message and Animated.",
"homepage": "https://github.com/OpenCraft/OCHuds",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/OpenCraft/OCHuds.git",
"tag": "1.3.0"
},
"social_media_url": "https://twitter.com/Morbin_",
"platforms": {
"ios": "8.0"
},
"source_files": "OCHuds/Classes/**/*",
"resource_bundles": {
"OCHuds": [
"OCHuds/**/*.{storyboard,xib,xcassets,json,imageset,png}"
]
},
"frameworks": "UIKit",
"dependencies": {
"OCExtensions": [
]
},
"pushed_with_swift_version": "3.0"
}
| 347 |
10,876 | <filename>ports/presentmon/vcpkg.json
{
"name": "presentmon",
"version-semver": "1.7.0",
"description": "PresentMon is a tool to capture and analyze ETW events related to swap chain presentation on Windows.",
"supports": "windows & !uwp",
"features": {
"tools": {
"description": "Build tool PresentMon"
}
}
}
| 118 |
389 | <gh_stars>100-1000
{
"id":"contentfragmentlist-45d72c7ea2",
":type": "core/wcm/components/contentfragmentlist/v1/contentfragmentlist",
"items": []
}
| 74 |
3,001 | package com.github.florent37.expectanim.listener;
import com.github.florent37.expectanim.ExpectAnim;
/**
* Created by florentchampigny on 21/02/2017.
*/
public interface AnimationStartListener {
void onAnimationStart(ExpectAnim expectAnim);
}
| 86 |
6,270 | <gh_stars>1000+
[
{
"type": "feature",
"category": "CloudWatchEvents",
"description": "Add support for SageMaker Model Builder Pipelines Targets to EventBridge"
},
{
"type": "feature",
"category": "CustomerProfiles",
"description": "This release adds an optional parameter named FlowDefinition in PutIntegrationRequest."
},
{
"type": "feature",
"category": "EventBridge",
"description": "Add support for SageMaker Model Builder Pipelines Targets to EventBridge"
},
{
"type": "feature",
"category": "IoTWireless",
"description": "Support tag-on-create for WirelessDevice."
},
{
"type": "feature",
"category": "TranscribeService",
"description": "Amazon Transcribe now supports tagging words that match your vocabulary filter for batch transcription."
}
] | 345 |
335 | <gh_stars>100-1000
{
"word": "Interrogatory",
"definitions": [
"A written question which is formally put to one party in a case by another party and which must be answered."
],
"parts-of-speech": "Noun"
} | 84 |
372 | ##
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
# enable logging of modules under test
import logging
import pytest
from mp.mpfexp import MpFileExplorer
from mp.mpfexp import MpFileExplorerCaching
_mpfexp_inst = None
def pytest_addoption(parser):
"""
Add some custom parameters.
:param parser: Parser object
"""
parser.addoption(
"--testcon",
action="store",
default="ser:/dev/ttyUSB0",
help="Connection string to use for tests",
)
parser.addoption(
"--caching",
action="store_true",
default=False,
help="Enable caching of MpFileExplorer",
)
parser.addoption(
"--nosetup", action="store_true", default=False, help="Skip initial board setup"
)
@pytest.fixture(scope="module")
def mpsetup(request):
"""
Initial setup. Mainly clear out everything from FS except "boot.py" and "port_config.py"
:param request: Request object
"""
if not request.config.getoption("--nosetup"):
fe = MpFileExplorer(request.config.getoption("--testcon"))
fe.puts(
"pytest.py",
"""
def rm(path):
import uos as os
files = os.listdir(path)
for f in files:
if f not in ['boot.py', 'port_config.py']:
try:
os.remove(path + '/' + f)
except:
try:
os.rmdir(path + '/' + f)
except:
rm(path + '/' + f)
""",
)
fe.exec_("import pytest")
fe.exec_("pytest.rm('.')")
@pytest.fixture(scope="function")
def mpfexp(request):
"""
Fixture providing connected mMpFileExplorer instance
:param request: Request object
:return: MpFileExplorer instance
"""
global _mpfexp_inst
def teardown():
_mpfexp_inst.close()
if request.config.getoption("--caching"):
_mpfexp_inst = MpFileExplorerCaching(request.config.getoption("--testcon"))
else:
_mpfexp_inst = MpFileExplorer(request.config.getoption("--testcon"))
request.addfinalizer(teardown)
return _mpfexp_inst
logging.basicConfig(
format="%(asctime)s\t%(levelname)s\t%(message)s",
filename="test.log",
level=logging.DEBUG,
)
| 1,313 |
575 | <filename>chrome/browser/media/router/chrome_media_router_factory.cc
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/media/router/chrome_media_router_factory.h"
#include "build/build_config.h"
#include "chrome/browser/media/router/media_router_feature.h"
#include "chrome/browser/profiles/incognito_helpers.h"
#include "components/keyed_service/content/browser_context_dependency_manager.h"
#include "components/media_router/browser/media_router_dialog_controller.h"
#include "content/public/browser/browser_context.h"
#if defined(OS_ANDROID)
#include "chrome/browser/media/android/router/chrome_media_router_client.h"
#include "components/media_router/browser/android/media_router_android.h"
#include "components/media_router/browser/android/media_router_dialog_controller_android.h"
#else
#include "chrome/browser/media/router/event_page_request_manager_factory.h"
#include "chrome/browser/media/router/mojo/media_router_desktop.h"
#endif
using content::BrowserContext;
namespace media_router {
namespace {
base::LazyInstance<ChromeMediaRouterFactory>::DestructorAtExit service_factory =
LAZY_INSTANCE_INITIALIZER;
} // namespace
// static
ChromeMediaRouterFactory* ChromeMediaRouterFactory::GetInstance() {
return &service_factory.Get();
}
// static
void ChromeMediaRouterFactory::DoPlatformInit() {
#if defined(OS_ANDROID)
InitChromeMediaRouterJavaClient();
// The desktop (Views) version of this is in ChromeBrowserMainExtraPartsViews
// because we can't reach into Views from this directory.
media_router::MediaRouterDialogController::SetGetOrCreate(
base::BindRepeating([](content::WebContents* web_contents) {
DCHECK(web_contents);
MediaRouterDialogController* controller = nullptr;
// This call does nothing if the controller already exists.
MediaRouterDialogControllerAndroid::CreateForWebContents(web_contents);
controller =
MediaRouterDialogControllerAndroid::FromWebContents(web_contents);
return controller;
}));
#endif
}
ChromeMediaRouterFactory::ChromeMediaRouterFactory() {
#if !defined(OS_ANDROID)
DependsOn(EventPageRequestManagerFactory::GetInstance());
#endif
}
ChromeMediaRouterFactory::~ChromeMediaRouterFactory() = default;
content::BrowserContext* ChromeMediaRouterFactory::GetBrowserContextToUse(
content::BrowserContext* context) const {
return chrome::GetBrowserContextRedirectedInIncognito(context);
}
KeyedService* ChromeMediaRouterFactory::BuildServiceInstanceFor(
BrowserContext* context) const {
if (!MediaRouterEnabled(context)) {
NOTREACHED();
return nullptr;
}
MediaRouterBase* media_router = nullptr;
#if defined(OS_ANDROID)
media_router = new MediaRouterAndroid();
#else
media_router = new MediaRouterDesktop(context);
#endif
media_router->Initialize();
return media_router;
}
void ChromeMediaRouterFactory::BrowserContextShutdown(
content::BrowserContext* context) {
// Notify the MediaRouter (which uses the normal/base proflie) of incognito
// profile shutdown.
if (context->IsOffTheRecord()) {
MediaRouter* router =
static_cast<MediaRouter*>(GetServiceForBrowserContext(context, false));
if (router)
router->OnIncognitoProfileShutdown();
}
BrowserContextKeyedServiceFactory::BrowserContextShutdown(context);
}
} // namespace media_router
| 1,146 |
742 | <gh_stars>100-1000
package org.support.project.knowledge.vo.api;
public class GroupDetail extends Group {
/** 所属ユーザ数 */
private int userCount = 0;
/**
* @return the userCount
*/
public int getUserCount() {
return userCount;
}
/**
* @param userCount the userCount to set
*/
public void setUserCount(int userCount) {
this.userCount = userCount;
}
}
| 193 |
563 | """
This script
- creates a folder named "cifar10_c" under the same directory as 'CIFAR-10-C'
- extracts images from .npy files and save them as .jpg.
"""
import os
import sys
import numpy as np
import os.path as osp
from PIL import Image
from dassl.utils import mkdir_if_missing
def extract_and_save(images, labels, level, dst):
# level denotes the corruption intensity level (0-based)
assert 0 <= level <= 4
for i in range(10000):
real_i = i + level*10000
im = Image.fromarray(images[real_i])
label = int(labels[real_i])
category_dir = osp.join(dst, str(label).zfill(3))
mkdir_if_missing(category_dir)
save_path = osp.join(category_dir, str(i + 1).zfill(5) + ".jpg")
im.save(save_path)
def main(npy_folder):
npy_folder = osp.abspath(osp.expanduser(npy_folder))
dataset_cap = osp.basename(npy_folder)
assert dataset_cap in ["CIFAR-10-C", "CIFAR-100-C"]
if dataset_cap == "CIFAR-10-C":
dataset = "cifar10_c"
else:
dataset = "cifar100_c"
if not osp.exists(npy_folder):
print('The given folder "{}" does not exist'.format(npy_folder))
root = osp.dirname(npy_folder)
im_folder = osp.join(root, dataset)
mkdir_if_missing(im_folder)
dirnames = os.listdir(npy_folder)
dirnames.remove("labels.npy")
if "README.txt" in dirnames:
dirnames.remove("README.txt")
assert len(dirnames) == 19
labels = np.load(osp.join(npy_folder, "labels.npy"))
for dirname in dirnames:
corruption = dirname.split(".")[0]
corruption_folder = osp.join(im_folder, corruption)
mkdir_if_missing(corruption_folder)
npy_filename = osp.join(npy_folder, dirname)
images = np.load(npy_filename)
assert images.shape[0] == 50000
for level in range(5):
dst = osp.join(corruption_folder, str(level + 1))
mkdir_if_missing(dst)
print('Saving images to "{}"'.format(dst))
extract_and_save(images, labels, level, dst)
if __name__ == "__main__":
# sys.argv[1] contains the path to CIFAR-10-C or CIFAR-100-C
main(sys.argv[1])
| 950 |
854 | __________________________________________________________________________________________________
sample 100 ms submission
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
n = len(graph)
paths = {}
for u in range(n-2, -1, -1):
paths[u] = []
for v in graph[u]:
if v == n - 1:
paths[u] += [u, v],
else:
for path in paths[v]:
paths[u] += [u] + path,
return paths[0]
__________________________________________________________________________________________________
sample 14784 kb submission
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
n = len(graph)
def solve(node):
if node == n-1:
return [[n-1]]
ans = []
for new in graph[node]:
for path in solve(new):
ans.append([node] + path)
return ans
return solve(0)
__________________________________________________________________________________________________
| 579 |
2,054 | <reponame>w136111526/zqcnn<gh_stars>1000+
#include "ZQ_CNN_Net.h"
#include "ZQ_CNN_MTCNN_AspectRatio.h"
#if defined(_WIN32)
#include "ZQlib/ZQ_PutTextCN.h"
#endif
#include <vector>
#include <iostream>
#include "opencv2/opencv.hpp"
#include "ZQ_CNN_CompileConfig.h"
#if ZQ_CNN_USE_BLAS_GEMM
#if __ARM_NEON
#include <openblas/cblas.h>
#else
#include <openblas/cblas.h>
#pragma comment(lib,"libopenblas.lib")
#endif
#elif ZQ_CNN_USE_MKL_GEMM
#include "mkl/mkl.h"
#pragma comment(lib,"mklml.lib")
#else
#pragma comment(lib,"ZQ_GEMM.lib")
#endif
using namespace ZQ;
using namespace std;
using namespace cv;
static void Draw(cv::Mat &image, const std::vector<ZQ_CNN_BBox>& thirdBbox)
{
std::vector<ZQ_CNN_BBox>::const_iterator it = thirdBbox.begin();
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
if (it->score > 0.7)
{
cv::rectangle(image, cv::Point((*it).col1, (*it).row1), cv::Point((*it).col2, (*it).row2), cv::Scalar(0, 0, 255), 2, 8, 0);
}
else
{
cv::rectangle(image, cv::Point((*it).col1, (*it).row1), cv::Point((*it).col2, (*it).row2), cv::Scalar(0, 255, 0), 2, 8, 0);
}
for (int num = 0; num < 5; num++)
circle(image, cv::Point(*(it->ppoint + num) + 0.5f, *(it->ppoint + num + 5) + 0.5f), 1, cv::Scalar(0, 255, 255), -1);
}
else
{
printf("not exist!\n");
}
}
}
int run_fig()
{
int num_threads = 1;
#if ZQ_CNN_USE_BLAS_GEMM
printf("set openblas thread_num = %d\n", num_threads);
openblas_set_num_threads(num_threads);
#elif ZQ_CNN_USE_MKL_GEMM
mkl_set_num_threads(num_threads);
#endif
#if defined(_WIN32)
Mat image0 = cv::imread("data/hand6.jpg", 1);
#else
Mat image0 = cv::imread("../../data/11.jpg", 1);
#endif
if (image0.empty())
{
cout << "empty image\n";
return EXIT_FAILURE;
}
//cv::resize(image0, image0, cv::Size(), 2, 2);
if (image0.channels() == 1)
cv::cvtColor(image0, image0, CV_GRAY2BGR);
//cv::convertScaleAbs(image0, image0, 2.0);
/* TIPS: when finding tiny faces for very big image, gaussian blur is very useful for Pnet*/
bool run_blur = true;
int kernel_size = 3, sigma = 2;
if (image0.cols * image0.rows >= 2500 * 1600)
{
run_blur = false;
kernel_size = 5;
sigma = 3;
}
else if (image0.cols * image0.rows >= 1920 * 1080)
{
run_blur = false;
kernel_size = 3;
sigma = 2;
}
else
{
run_blur = false;
}
if (run_blur)
{
cv::Mat blur_image0;
int nBlurIters = 1000;
double t00 = omp_get_wtime();
for (int i = 0; i < nBlurIters; i++)
cv::GaussianBlur(image0, blur_image0, cv::Size(kernel_size, kernel_size), sigma, sigma);
double t01 = omp_get_wtime();
printf("[%d] blur cost %.3f secs, 1 blur costs %.3f ms\n", nBlurIters, t01 - t00, 1000 * (t01 - t00) / nBlurIters);
cv::GaussianBlur(image0, image0, cv::Size(kernel_size, kernel_size), sigma, sigma);
}
std::vector<ZQ_CNN_BBox> thirdBbox;
ZQ_CNN_MTCNN_AspectRatio mtcnn;
std::string result_name;
mtcnn.TurnOnShowDebugInfo();
//mtcnn.SetLimit(300, 50, 20);
bool gesture = true;
int thread_num = 1;
bool special_handle_very_big_face = false;
result_name = "resultdet.jpg";
#if defined(_WIN32)
if (!mtcnn.Init("model/handdet1-dw20-fast.zqparams", "model/handdet1-dw20-fast.nchwbin",
"model/handdet2-dw24-fast.zqparams", "model/handdet2-dw24-fast.nchwbin",
"model/handdet3-dw48-fast.zqparams", "model/handdet3-dw48-fast.nchwbin",
thread_num
#else
if (!mtcnn.Init("../../model/handdet1-dw20-fast.zqparams", "../../model/handdet1-dw20-fast.nchwbin",
"../../model/handdet2-dw24-fast.zqparams", "../../model/handdet2-dw24-fast.nchwbin",
"../../model/handdet3-dw48-fast.zqparams", "../../model/handdet3-dw48-fast.nchwbin",
thread_num
#endif
))
{
cout << "failed to init!\n";
return EXIT_FAILURE;
}
mtcnn.SetPara(image0.cols, image0.rows, 80, 0.3, 0.4, 0.5, 0.5, 0.5, 0.5, 0.709, 3, 20, 4, special_handle_very_big_face);
mtcnn.TurnOffShowDebugInfo();
//mtcnn.TurnOnShowDebugInfo();
int iters = 1;
double t1 = omp_get_wtime();
for (int i = 0; i < iters; i++)
{
if (i == iters / 2)
mtcnn.TurnOnShowDebugInfo();
else
mtcnn.TurnOffShowDebugInfo();
if (!mtcnn.Find(image0.data, image0.cols, image0.rows, image0.step[0], thirdBbox))
{
cout << "failed to find face!\n";
//return EXIT_FAILURE;
continue;
}
}
double t2 = omp_get_wtime();
printf("total %.3f s / %d = %.3f ms\n", t2 - t1, iters, 1000 * (t2 - t1) / iters);
namedWindow("result");
Draw(image0, thirdBbox);
imwrite(result_name, image0);
imshow("result", image0);
waitKey(0);
return EXIT_SUCCESS;
}
int run_cam()
{
int num_threads = 1;
#if ZQ_CNN_USE_BLAS_GEMM
printf("set openblas thread_num = %d\n", num_threads);
openblas_set_num_threads(num_threads);
#elif ZQ_CNN_USE_MKL_GEMM
mkl_set_num_threads(num_threads);
#endif
std::vector<ZQ_CNN_BBox> thirdBbox;
std::vector<ZQ_CNN_BBox> thirdBbox_last;
ZQ_CNN_MTCNN_AspectRatio mtcnn;
std::string result_name;
//mtcnn.TurnOnShowDebugInfo();
//mtcnn.SetLimit(300, 50, 20);
bool gesture = true;
int thread_num = 3;
bool special_handle_very_big_face = false;
result_name = "resultdet.jpg";
#if defined(_WIN32)
if (!mtcnn.Init("model/handdet1-dw20-fast.zqparams", "model/handdet1-dw20-fast.nchwbin",
"model/handdet2-dw24-fast.zqparams", "model/handdet2-dw24-fast.nchwbin",
"model/handdet3-dw48-fast.zqparams", "model/handdet3-dw48-fast.nchwbin",
thread_num
#else
if (!mtcnn.Init("../../model/handdet1-dw20-fast.zqparams", "../../model/handdet1-dw20-fast.nchwbin",
"../../model/handdet2-dw24-fast.zqparams", "../../model/handdet2-dw24-fast.nchwbin",
"../../model/handdet3-dw48-fast.zqparams", "../../model/handdet3-dw48-fast.nchwbin",
thread_num
#endif
))
{
cout << "failed to init!\n";
return EXIT_FAILURE;
}
//cv::VideoCapture cap("video_20190507_170946.mp4");
cv::VideoCapture cap(0);
cv::VideoWriter writer;
cv::Mat image0;
cv::namedWindow("show");
while (true)
{
cap >> image0;
if (image0.empty())
break;
cv::resize(image0, image0, cv::Size(), 0.5, 0.5);
cv::GaussianBlur(image0, image0, cv::Size(3, 3), 2, 2);
cv::GaussianBlur(image0, image0, cv::Size(3, 3), 2, 2);
if (!writer.isOpened())
writer.open("cam1.mp4", CV_FOURCC('D', 'I', 'V', 'X'), 25, cv::Size(image0.cols, image0.rows));
mtcnn.SetPara(image0.cols, image0.rows, 80, 0.6, 0.7, 0.8, 0.5, 0.5, 0.5, 0.709, 3, 20, 4, special_handle_very_big_face);
mtcnn.TurnOnShowDebugInfo();
static int fr_id = 0;
if (!mtcnn.Find(image0.data, image0.cols, image0.rows, image0.step[0], thirdBbox))
{
thirdBbox = thirdBbox_last;
}
if (thirdBbox.size() == 0)
{
printf("%d\n", fr_id);
}
fr_id++;
Draw(image0, thirdBbox);
thirdBbox_last = thirdBbox;
imshow("show", image0);
writer << image0;
int key = cv::waitKey(10);
if (key == 27)
break;
}
return EXIT_SUCCESS;
}
int main()
{
//return run_fig();
return run_cam();
}
| 3,253 |
454 | <gh_stars>100-1000
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to train and eval representations from
unsupervised pretraining of InfoGraph model
"""
import os
import sys
import json
import glob
import pickle
import argparse
from collections import namedtuple
import numpy as np
import logging
import multiprocessing as mp
logging.basicConfig(
format='%(asctime)s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO)
import paddle
import paddle.nn as nn
import pgl
from pahelix.datasets.inmemory_dataset import InMemoryDataset
from src.model import InfoGraph, InfoGraphCriterion
from src.featurizer import MoleculeCollateFunc
from classifier import eval_on_classifiers
def train(args, model, criterion, optimizer, dataset,
collate_fn, epoch_id):
"""Model training for one epoch and log the average loss."""
data_gen = dataset.get_data_loader(
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=True,
collate_fn=collate_fn)
model.train()
list_loss = []
total_data, trained_data, batch_id = len(dataset), 0, 0
for graphs, pos_mask, neg_mask in data_gen:
graphs = graphs.tensor()
pos_mask = paddle.to_tensor(pos_mask, 'float32')
neg_mask = paddle.to_tensor(neg_mask, 'float32')
global_repr, encoding = model(graphs)
if criterion.prior:
prior = paddle.uniform(
[args.batch_size, model.embedding_dim],
min=0.0, max=1.0)
loss = criterion(graphs, global_repr, encoding,
pos_mask, neg_mask, prior)
else:
loss = criterion(graphs, global_repr, encoding,
pos_mask, neg_mask)
loss.backward()
optimizer.step()
optimizer.clear_grad()
list_loss.append(loss.numpy())
trained_data += graphs.num_graph
if batch_id % args.log_interval == 0:
logging.info('Epoch %d [%d/%d] train/loss:%f' % \
(epoch_id, trained_data, total_data, list_loss[-1]))
batch_id += 1
return np.mean(list_loss)
def save_embedding(args, model, dataset, collate_fn, epoch_id):
"""Save the embeddings of all the testing data for multiple classifier evaluation."""
data_gen = dataset.get_data_loader(
batch_size=args.batch_size,
num_workers=1,
shuffle=False,
collate_fn=collate_fn)
model.eval()
graph_emb_lst, graph_label_lst = [], []
for graphs, labels, valids in data_gen:
graphs = graphs.tensor()
graphs_emb, _ = model(graphs)
graphs_emb = graphs_emb.numpy()
graph_emb_lst.extend(list(graphs_emb))
graph_label_lst.extend(list(labels))
n = len(dataset)
merge_data = {
'emb': np.array(graph_emb_lst[:n]),
'y': np.array(graph_label_lst[:n])
}
pkl = os.path.join(args.emb_dir, 'epoch_%s.pkl' % epoch_id)
with open(pkl, 'wb') as f:
pickle.dump(merge_data, f)
def parallel_eval(data_queue, pkl_lst):
"""The target function to run a multi-classifier evaluation for given embeddings."""
for pkl in pkl_lst:
with open(pkl, 'rb') as f:
data = pickle.load(f)
metrics = eval_on_classifiers(data['emb'], data['y'], search=True)
logging.info('{}: logreg ({:.4f}), svc ({:.4f}), '
'linearsvc ({:.4f}), randomforest ({:.4f})'.format(
os.path.basename(pkl), metrics['logreg_acc'],
metrics['svc_acc'], metrics['linearsvc_acc'],
metrics['randomforest_acc']))
sys.stdout.flush()
res = {
'pkl': os.path.basename(pkl),
'logreg': metrics['logreg_acc'],
'svc': metrics['svc_acc'],
'linearsvc': metrics['linearsvc_acc'],
'randomforest': metrics['randomforest_acc'],
}
data_queue.put(res)
def save_eval_metric(res_collect, emb_dir):
"""Save the evaluation metrics from parallel evaluation workers."""
base = os.path.basename(emb_dir)
json_file = os.path.join(os.path.dirname(emb_dir), '%s_eval.json' % base)
keys = list(res_collect.keys())
acc_keys = []
for k in keys:
if type(res_collect[k][0]) is not str:
acc_keys.append(k)
best_acc = max(res_collect[k])
res_collect['best_%s' % k] = best_acc
print('Best {} acc: {:.4f}'.format(k, round(best_acc, 4)))
res_collect['avg_acc'] = []
for i in range(len(res_collect[acc_keys[0]])):
sum_acc = 0
for k in acc_keys:
sum_acc += res_collect[k][i]
res_collect['avg_acc'].append(sum_acc / len(acc_keys))
best_acc = max(res_collect['avg_acc'])
res_collect['best_avg_acc'] = best_acc
print('Best average acc: {:.4f}'.format(round(best_acc, 4)))
with open(json_file, 'w') as f:
json.dump(res_collect, f)
def main(args):
with open(args.config, 'r') as f:
config = json.load(f)
logging.info('Load data ...')
dataset = InMemoryDataset(npz_data_path=os.path.join(
args.root, args.dataset, 'processed'))
collate_fn = MoleculeCollateFunc(
config['atom_names'],
config['bond_names'],
with_graph_label=False,
with_pos_neg_mask=True)
eval_collate_fn = MoleculeCollateFunc(
config['atom_names'],
config['bond_names'],
with_graph_label=True,
with_pos_neg_mask=False)
logging.info("Data loaded.")
logging.info("Train Examples: %s" % len(dataset))
sys.stdout.flush()
if args.emb_dir is not None:
# pylint: disable=E1123
os.makedirs(args.emb_dir, exist_ok=True)
if args.model_dir is not None:
# pylint: disable=E1123
os.makedirs(args.model_dir, exist_ok=True)
model = InfoGraph(config)
criterion = InfoGraphCriterion(config)
optimizer = paddle.optimizer.Adam(
learning_rate=args.lr,
parameters=model.parameters())
save_embedding(args, model, dataset, eval_collate_fn, -1)
for epoch_id in range(args.max_epoch):
train_loss = train(args, model, criterion, optimizer,
dataset, collate_fn, epoch_id)
logging.info('Epoch %d, train/loss: %f' % (epoch_id, train_loss))
pdparams = os.path.join(args.model_dir, 'epoch_%d.pdparams' % epoch_id)
paddle.save(model.state_dict(), pdparams)
save_embedding(args, model, dataset, eval_collate_fn, epoch_id)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='InfoGraph pretrain')
parser.add_argument("--task_name", type=str, default='train')
parser.add_argument("--use_cuda", action='store_true', default=False)
# parser.add_argument("--data_loader", action='store_true', default=False)
parser.add_argument("--config", type=str, default='model_configs/unsupervised_pretrain_config.json')
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--max_epoch", type=int, default=100)
parser.add_argument("--log_interval", type=int, default=100)
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--root", type=str, default='data')
parser.add_argument("--dataset", type=str, default='mutag')
parser.add_argument("--model_dir", type=str, required=True)
parser.add_argument("--emb_dir", type=str)
parser.add_argument("--eval_workers", type=int, default=10)
parser.add_argument("--dont_save_emb", action='store_true', default=False)
args = parser.parse_args()
if args.task_name == 'train':
main(args)
elif args.task_name == 'eval':
pkl_lst = glob.glob(os.path.join(args.emb_dir, '*.pkl'))
proc_manager = mp.Manager()
data_queue = proc_manager.Queue()
proc_lst = []
for i in range(args.eval_workers):
ids = [j for j in range(len(pkl_lst)) if j % args.eval_workers == i]
sub_pkl_lst = [pkl_lst[j] for j in ids]
p = mp.Process(target=parallel_eval, args=(data_queue, sub_pkl_lst))
p.start()
proc_lst.append(p)
res_collect = dict()
to_recv = len(pkl_lst)
while to_recv > 0:
res = data_queue.get()
to_recv -= 1
for k, v in res.items():
if k not in res_collect:
res_collect[k] = [v]
else:
res_collect[k].append(v)
save_eval_metric(res_collect, args.emb_dir)
for p in proc_lst:
p.join()
| 4,203 |
434 | <reponame>aqyoung/scan-Fox-scan<gh_stars>100-1000
#!/usr/bin/env python
#!-*- coding:utf-8 -*-
import json
import threading
import multiprocessing
import requests
from flask import Flask,render_template,request,session,jsonify,redirect
from libs.action import SqlMapAction,Spider_Handle,Save_Success_Target
from libs.func import Tools
from libs.models import MySQLHander
from libs.action import Action
from libs.proxy import run_proxy
app = Flask(__name__)
mysql = MySQLHander()
app.config.update(dict(
DEBUG=True,
SECRET_KEY="546sdafwerxcvSERds549fwe8rdxfsaf98we1r2"
))
app.config.from_envvar('AUTOSQLI_SETTINGS', silent=True)
app.secret_key = "34$#4564dsfaWEERds/*-()^=sadfWE89SA"
SqlMap = SqlMapAction()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/index')
def settings_views():
return render_template('index.html')
@app.route('/settings', methods=['GET', 'POST'])
def settings_settings_info():
return render_template('info.html')
#TODO user=session['user']
@app.route('/action/startask', methods=['GET', 'POST'])
def action_startask():
if request.method == 'GET':
return render_template('startask.html')
else:
#删除之前的任务
SqlMap.DeleteAllTask()
#转换为sqlmap的设置
options = Tools.do_sqlmap_options(request.form)
#更新整体的设置
SqlMap.update_settings(request)
#线程启动任务,后台运行,没有join
t = threading.Thread(target=Spider_Handle,args=(request.form['target'],options,))
t.start()
t = threading.Thread(target=Save_Success_Target,args=())
t.start()
return redirect('/action/showtask')
return "<html><script>alert('success add new target');window.location.href='/action/showtask';</script></html>"
return "<html><script>alert('add new target Faild');history.back();</script></html>"
@app.route('/action/showtask', methods=['GET'])
def action_showtask():
data = {"number":0, "data":[]}
if request.args.has_key('action') and request.args['action'] == "refresh":
mysql = MySQLHander()
sql = "select taskid,target,success,status from task"
mysql.query(sql)
source = mysql.fetchAllRows()
#获取正在扫描的URL
num = 0
for line in source:
num += 1
data['data'].append({"taskid":line[0], "target":line[1], "success":line[2], "status":line[3]})
data['number'] = num
mysql.close()
return json.dumps(data)
if request.args.has_key('type'):
if request.args['type'] == "log":
sqlaction = SqlMapAction()
server = sqlaction._get_server()
url = "{0}/scan/{1}/log".format(server, request.args['taskid'])
return json.dumps(Tools.getjsondata(url))
if request.args['type'] == "payload":
sqlaction = SqlMapAction()
server = sqlaction._get_server()
url = "{0}/scan/{1}/data".format(server, request.args['taskid'])
return json.dumps(Tools.getjsondata(url))
return render_template('showtask.html')
@app.route('/action/showdetail', methods=['GET'])
def action_showjson():
data = {"target":"", "data":"", "success":0, "status":"running"}
if request.args.has_key('taskid'):
taskid = request.args['taskid']
sql = "select target,data,success,status where taskid = '{0}'".format(taskid)
mysql = MySQLHander()
mysql.query(sql)
resource = mysql.fetchOneRow()
data = {"target":resource[0], "data":resource[1], "success":resource[2], "status":resource[4]}
return json.dumps(data)
@app.route('/action/stoptask')
def action_status():
if request.args['taskidlist'] != "":
taskidlist = []
if request.args['taskidlist'].find(",") > 0:
taskidlist = request.args['taskidlist'].split(',')
else:
taskidlist.append(request.args['taskidlist'])
return json.dumps({"status":SqlMap.StopTask(taskidlist)})
return json.dumps({"error":"no taskid"})
if __name__ == '__main__':
app.run() | 1,775 |
790 | <gh_stars>100-1000
# This is just a kludge so that bdist_rpm doesn't guess wrong about the
# distribution name and version, if the egg_info command is going to alter
# them, another kludge to allow you to build old-style non-egg RPMs, and
# finally, a kludge to track .rpm files for uploading when run on Python <2.5.
from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm
import sys, os
class bdist_rpm(_bdist_rpm):
def initialize_options(self):
_bdist_rpm.initialize_options(self)
self.no_egg = None
if sys.version<"2.5":
# Track for uploading any .rpm file(s) moved to self.dist_dir
def move_file(self, src, dst, level=1):
_bdist_rpm.move_file(self, src, dst, level)
if dst==self.dist_dir and src.endswith('.rpm'):
getattr(self.distribution,'dist_files',[]).append(
('bdist_rpm',
src.endswith('.src.rpm') and 'any' or sys.version[:3],
os.path.join(dst, os.path.basename(src)))
)
def run(self):
self.run_command('egg_info') # ensure distro name is up-to-date
_bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-','_')
spec = _bdist_rpm._make_spec_file(self)
line23 = '%define version '+version
line24 = '%define version '+rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23,line24)
for line in spec
]
spec.insert(spec.index(line24)+1, "%define unmangled_version "+version)
return spec
| 966 |
416 | <gh_stars>100-1000
/*
* Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tencentcloudapi.cme.v20191029.models;
import com.tencentcloudapi.common.AbstractModel;
import com.google.gson.annotations.SerializedName;
import com.google.gson.annotations.Expose;
import java.util.HashMap;
public class RecordReplayProjectInput extends AbstractModel{
/**
* 录制拉流地址。
*/
@SerializedName("PullStreamUrl")
@Expose
private String PullStreamUrl;
/**
* 录制文件归属者。
*/
@SerializedName("MaterialOwner")
@Expose
private Entity MaterialOwner;
/**
* 录制文件存储分类路径。
*/
@SerializedName("MaterialClassPath")
@Expose
private String MaterialClassPath;
/**
* 回放推流地址。
*/
@SerializedName("PushStreamUrl")
@Expose
private String PushStreamUrl;
/**
* Get 录制拉流地址。
* @return PullStreamUrl 录制拉流地址。
*/
public String getPullStreamUrl() {
return this.PullStreamUrl;
}
/**
* Set 录制拉流地址。
* @param PullStreamUrl 录制拉流地址。
*/
public void setPullStreamUrl(String PullStreamUrl) {
this.PullStreamUrl = PullStreamUrl;
}
/**
* Get 录制文件归属者。
* @return MaterialOwner 录制文件归属者。
*/
public Entity getMaterialOwner() {
return this.MaterialOwner;
}
/**
* Set 录制文件归属者。
* @param MaterialOwner 录制文件归属者。
*/
public void setMaterialOwner(Entity MaterialOwner) {
this.MaterialOwner = MaterialOwner;
}
/**
* Get 录制文件存储分类路径。
* @return MaterialClassPath 录制文件存储分类路径。
*/
public String getMaterialClassPath() {
return this.MaterialClassPath;
}
/**
* Set 录制文件存储分类路径。
* @param MaterialClassPath 录制文件存储分类路径。
*/
public void setMaterialClassPath(String MaterialClassPath) {
this.MaterialClassPath = MaterialClassPath;
}
/**
* Get 回放推流地址。
* @return PushStreamUrl 回放推流地址。
*/
public String getPushStreamUrl() {
return this.PushStreamUrl;
}
/**
* Set 回放推流地址。
* @param PushStreamUrl 回放推流地址。
*/
public void setPushStreamUrl(String PushStreamUrl) {
this.PushStreamUrl = PushStreamUrl;
}
public RecordReplayProjectInput() {
}
/**
* NOTE: Any ambiguous key set via .set("AnyKey", "value") will be a shallow copy,
* and any explicit key, i.e Foo, set via .setFoo("value") will be a deep copy.
*/
public RecordReplayProjectInput(RecordReplayProjectInput source) {
if (source.PullStreamUrl != null) {
this.PullStreamUrl = new String(source.PullStreamUrl);
}
if (source.MaterialOwner != null) {
this.MaterialOwner = new Entity(source.MaterialOwner);
}
if (source.MaterialClassPath != null) {
this.MaterialClassPath = new String(source.MaterialClassPath);
}
if (source.PushStreamUrl != null) {
this.PushStreamUrl = new String(source.PushStreamUrl);
}
}
/**
* Internal implementation, normal users should not use it.
*/
public void toMap(HashMap<String, String> map, String prefix) {
this.setParamSimple(map, prefix + "PullStreamUrl", this.PullStreamUrl);
this.setParamObj(map, prefix + "MaterialOwner.", this.MaterialOwner);
this.setParamSimple(map, prefix + "MaterialClassPath", this.MaterialClassPath);
this.setParamSimple(map, prefix + "PushStreamUrl", this.PushStreamUrl);
}
}
| 1,824 |
1,538 | <filename>src/kudu/util/throttler-test.cc
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "kudu/util/throttler.h"
#include <gtest/gtest.h>
#include "kudu/util/monotime.h"
#include "kudu/util/test_util.h"
namespace kudu {
class ThrottlerTest : public KuduTest {
};
TEST_F(ThrottlerTest, TestOpThrottle) {
// Check operation rate throttling
MonoTime now = MonoTime::Now();
Throttler t0(now, 1000, 1000*1000, 1);
// Fill up bucket
now += MonoDelta::FromMilliseconds(2000);
// Check throttle behavior for 1 second.
for (int p = 0; p < 10; p++) {
for (int i = 0; i < 100; i++) {
ASSERT_TRUE(t0.Take(now, 1, 1));
}
ASSERT_FALSE(t0.Take(now, 1, 1));
now += MonoDelta::FromMilliseconds(100);
}
}
TEST_F(ThrottlerTest, TestIOThrottle) {
// Check operation rate throttling
MonoTime now = MonoTime::Now();
Throttler t0(now, 50000, 1000*1000, 1);
// Fill up bucket
now += MonoDelta::FromMilliseconds(2000);
// Check throttle behavior for 1 second.
for (int p = 0; p < 10; p++) {
for (int i = 0; i < 100; i++) {
ASSERT_TRUE(t0.Take(now, 1, 1000));
}
ASSERT_FALSE(t0.Take(now, 1, 1000));
now += MonoDelta::FromMilliseconds(100);
}
}
TEST_F(ThrottlerTest, TestBurst) {
// Check IO rate throttling
MonoTime now = MonoTime::Now();
Throttler t0(now, 2000, 1000*1000, 5);
// Fill up bucket
now += MonoDelta::FromMilliseconds(2000);
for (int i = 0; i < 100; i++) {
now += MonoDelta::FromMilliseconds(1);
ASSERT_TRUE(t0.Take(now, 1, 5000));
}
ASSERT_TRUE(t0.Take(now, 1, 100000));
ASSERT_FALSE(t0.Take(now, 1, 1));
}
} // namespace kudu
| 861 |
3,427 | <reponame>oli0044/graphhopper<gh_stars>1000+
/*
* Licensed to GraphHopper GmbH under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* GraphHopper GmbH licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.graphhopper.routing;
public class OSMReaderConfig {
private double maxWayPointDistance = 1;
private double elevationMaxWayPointDistance = Double.MAX_VALUE;
private boolean smoothElevation = false;
private double longEdgeSamplingDistance = Double.MAX_VALUE;
private int workerThreads = 2;
public double getMaxWayPointDistance() {
return maxWayPointDistance;
}
/**
* This parameter affects the routine used to simplify the edge geometries (Douglas-Peucker). Higher values mean
* more details are preserved. The default is 1 (meter). Simplification can be disabled by setting it to 0.
*/
public OSMReaderConfig setMaxWayPointDistance(double maxWayPointDistance) {
this.maxWayPointDistance = maxWayPointDistance;
return this;
}
public double getElevationMaxWayPointDistance() {
return elevationMaxWayPointDistance;
}
/**
* Sets the max elevation discrepancy between way points and the simplified polyline in meters
*/
public OSMReaderConfig setElevationMaxWayPointDistance(double elevationMaxWayPointDistance) {
this.elevationMaxWayPointDistance = elevationMaxWayPointDistance;
return this;
}
public boolean isSmoothElevation() {
return smoothElevation;
}
/**
* Enables/disables elevation smoothing
*/
public OSMReaderConfig setSmoothElevation(boolean smoothElevation) {
this.smoothElevation = smoothElevation;
return this;
}
public double getLongEdgeSamplingDistance() {
return longEdgeSamplingDistance;
}
/**
* Sets the distance between elevation samples on long edges
*/
public OSMReaderConfig setLongEdgeSamplingDistance(double longEdgeSamplingDistance) {
this.longEdgeSamplingDistance = longEdgeSamplingDistance;
return this;
}
public int getWorkerThreads() {
return workerThreads;
}
/**
* Sets the number of threads used for the OSM import
*/
public OSMReaderConfig setWorkerThreads(int workerThreads) {
this.workerThreads = workerThreads;
return this;
}
}
| 971 |
690 | <filename>artemis-core/artemis/src/main/java/com/artemis/link/EntityFieldMutator.java<gh_stars>100-1000
package com.artemis.link;
import com.artemis.Component;
import com.artemis.Entity;
import com.artemis.World;
import com.artemis.utils.reflect.Field;
import com.artemis.utils.reflect.ReflectionException;
class EntityFieldMutator implements UniFieldMutator {
private World world;
@Override
public int read(Component c, Field f) {
try {
Entity e = (Entity) f.get(c);
return (e != null) ? e.getId() : -1;
} catch (ReflectionException exc) {
throw new RuntimeException(exc);
}
}
@Override
public void write(int value, Component c, Field f) {
try {
Entity e = (value != -1) ? world.getEntity(value) : null;
f.set(c, e);
} catch (ReflectionException exc) {
throw new RuntimeException(exc);
}
}
@Override
public void setWorld(World world) {
this.world = world;
}
}
| 331 |
1,403 | /*
* Helper.h
*
* This file is part of the "LLGL" project (Copyright (c) 2015-2019 by <NAME>)
* See "LICENSE.txt" for license information.
*/
#ifndef LLGL_HELPER_H
#define LLGL_HELPER_H
#include "../Renderer/CheckedCast.h"
#include <LLGL/Export.h>
#include <algorithm>
#include <type_traits>
#include <memory>
#include <vector>
#include <list>
#include <algorithm>
#include <set>
#include <string>
#include <cstring>
#include <sstream>
#include <iomanip>
#include <functional>
#include <cstdint>
//TODO: separate this header into multiple "*Utils.h" files, such as "ContainerUtils.h"
namespace LLGL
{
/* ----- Templates ----- */
// Alternative to std::make_unique<T> for strict C++11 support.
template <typename T, typename... Args>
std::unique_ptr<T> MakeUnique(Args&&... args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
// Alternative to std::make_unique<T[]> for strict C++11 support.
template <typename T>
std::unique_ptr<T[]> MakeUniqueArray(std::size_t size)
{
return std::unique_ptr<T[]>(new T[size]);
}
// Initializes the specified data of basic type of POD structure type with zeros (using ::memset).
template <class T>
void InitMemory(T& data)
{
static_assert(!std::is_pointer<T>::value, "'InitMemory' does not allow pointer types");
static_assert(std::is_pod<T>::value, "'InitMemory' does only allow plain-old-data (POD)");
::memset(&data, 0, sizeof(T));
}
// Fills the specified container with 'value' (using std::fill).
template <class Container, class T>
void Fill(Container& cont, const T& value)
{
std::fill(std::begin(cont), std::end(cont), value);
}
// Returns true if the specified container contains the entry specified by 'value' (using std::find).
template <class Container, class T>
bool Contains(const Container& cont, const T& value)
{
return (std::find(std::begin(cont), std::end(cont), value) != std::end(cont));
}
template <class Container, class T>
void RemoveFromList(Container& cont, const T& entry)
{
auto it = std::find(std::begin(cont), std::end(cont), entry);
if (it != std::end(cont))
cont.erase(it);
}
template <class Container, class UnaryPredicate>
void RemoveFromListIf(Container& cont, UnaryPredicate pred)
{
auto it = std::find_if(std::begin(cont), std::end(cont), pred);
if (it != std::end(cont))
cont.erase(it);
}
template <class Container, class T>
void RemoveAllFromList(Container& cont, const T& entry)
{
cont.erase(
std::remove(std::begin(cont), std::end(cont), entry),
cont.end()
);
}
template <class Container, class UnaryPredicate>
void RemoveAllFromListIf(Container& cont, UnaryPredicate pred)
{
cont.erase(
std::remove_if(std::begin(cont), std::end(cont), pred),
cont.end()
);
}
template <typename T>
void AddOnceToSharedList(std::vector<std::shared_ptr<T>>& cont, const std::shared_ptr<T>& entry)
{
if (entry && std::find(cont.begin(), cont.end(), entry) == cont.end())
cont.push_back(entry);
}
template <typename T, typename TBase>
void RemoveFromSharedList(std::vector<std::shared_ptr<T>>& cont, const TBase* entry)
{
if (entry)
{
RemoveFromListIf(
cont,
[entry](const std::shared_ptr<T>& e)
{
return (e.get() == entry);
}
);
}
}
template <typename T, typename TBase>
void RemoveFromUniqueSet(std::set<std::unique_ptr<T>>& cont, const TBase* entry)
{
if (entry)
{
RemoveFromListIf(
cont,
[entry](const std::unique_ptr<T>& e)
{
return (e.get() == entry);
}
);
}
}
template <typename BaseType, typename SubType>
SubType* TakeOwnership(std::set<std::unique_ptr<BaseType>>& objectSet, std::unique_ptr<SubType>&& object)
{
auto ref = object.get();
objectSet.emplace(std::forward<std::unique_ptr<SubType>>(object));
return ref;
}
template <typename BaseType, typename SubType>
SubType* TakeOwnership(std::vector<std::unique_ptr<BaseType>>& objectSet, std::unique_ptr<SubType>&& object)
{
auto ref = object.get();
objectSet.emplace_back(std::forward<std::unique_ptr<SubType>>(object));
return ref;
}
template <typename BaseType, typename SubType>
SubType* TakeOwnership(std::list<std::unique_ptr<BaseType>>& objectSet, std::unique_ptr<SubType>&& object)
{
auto ref = object.get();
objectSet.emplace_back(std::forward<std::unique_ptr<SubType>>(object));
return ref;
}
// Similar to std::unqiue but with predicate that allows to modify elements.
template <typename ForwardIt, typename BinaryPredicate>
static ForwardIt UniqueMerge(ForwardIt begin, ForwardIt end, BinaryPredicate pred)
{
if (begin == end)
return end;
ForwardIt result = begin;
while (++begin != end)
{
if (!pred(*result, *begin) && ++result != begin)
*result = std::move(*begin);
}
return ++result;
}
// Returns the specified integral value as hexadecimal string.
template <typename T>
std::string ToHex(T value)
{
static_assert(std::is_integral<T>::value, "input parameter of 'LLGL::ToHex' must be an integral type");
std::stringstream s;
s << std::setfill('0') << std::setw(sizeof(T)*2) << std::hex << std::uppercase << static_cast<std::uint64_t>(value);
return s.str();
}
/*
\brief Returns the next resource from the specified resource array.
\param[in,out] numResources Specifies the remaining number of resources in the array.
\param[in,out] resourceArray Pointer to the remaining array of resource pointers.
\remarks If the last element in the array is reached,
'resourceArray' is points to the location after the last entry, and 'numResources' is 0.
*/
template <typename TSub, typename TBase>
TSub* NextArrayResource(std::uint32_t& numResources, TBase* const * & resourceArray)
{
if (numResources > 0)
{
--numResources;
return LLGL_CAST(TSub*, (*(resourceArray++)));
}
return nullptr;
}
// Returns the adjusted size with the specified alignment, which is always greater or equal to 'size' (T can be UINT or VkDeviceSize for instance).
template <typename T>
T GetAlignedSize(T size, T alignment)
{
if (alignment > 1)
{
const T alignmentBitmask = alignment - 1;
return ((size + alignmentBitmask) & ~alignmentBitmask);
}
return size;
}
// Clamps the value x into the range [minimum, maximum].
template <typename T>
T Clamp(const T& x, const T& minimum, const T& maximum)
{
return std::max(minimum, std::min(x, maximum));
}
// Returns the raw function pointer of the specified member function, e.g. GetMemberFuncPtr(&Foo::Bar).
template <typename T>
const void* GetMemberFuncPtr(T pfn)
{
union
{
T func;
const void* addr;
}
ptr;
ptr.func = pfn;
return ptr.addr;
}
// Returns the length of the specified null-terminated string.
template <typename T>
inline std::size_t StrLength(const T* s)
{
std::size_t len = 0;
while (*s++ != 0)
++len;
return len;
}
// Specialization of StrLength template for ANSI strings.
template <>
inline std::size_t StrLength<char>(const char* s)
{
return std::strlen(s);
}
// Specialization of StrLength template for Unicode strings.
template <>
inline std::size_t StrLength<wchar_t>(const wchar_t* s)
{
return std::wcslen(s);
}
// Advances the specified pointer with a byte-aligned offset.
template <typename T>
inline T* AdvancePtr(T* ptr, std::size_t offset)
{
using TByteAligned = typename std::conditional
<
std::is_const<T>::value,
const std::int8_t,
std::int8_t
>::type;
return reinterpret_cast<T*>(reinterpret_cast<TByteAligned*>(ptr) + offset);
}
/* ----- Functions ----- */
// Reads the specified text file into a string.
LLGL_EXPORT std::string ReadFileString(const char* filename);
// Reads the specified binary file into a buffer.
LLGL_EXPORT std::vector<char> ReadFileBuffer(const char* filename);
// Converts the UTF16 input string to UTF8 string.
LLGL_EXPORT std::string ToUTF8String(const std::wstring& utf16);
LLGL_EXPORT std::string ToUTF8String(const wchar_t* utf16);
// Converts the UTF8 input string to UTF16 string.
LLGL_EXPORT std::wstring ToUTF16String(const std::string& utf8);
LLGL_EXPORT std::wstring ToUTF16String(const char* utf8);
} // /namespace LLGL
#endif
// ================================================================================
| 3,160 |
5,169 | {
"name": "KICaptureKit",
"version": "0.0.2",
"summary": "KICaptureKit",
"description": "KICaptureKit",
"homepage": "https://github.com/smartwalle/KICaptureKit",
"license": "MIT",
"authors": {
"SmartWalle": "<EMAIL>"
},
"platforms": {
"ios": "7.0"
},
"source": {
"git": "https://github.com/smartwalle/KICaptureKit.git",
"tag": "0.0.2"
},
"source_files": [
"KICaptureKit/KICaptureKit/*.{h,m}",
"KICaptureKit/KICaptureView/*.{h,m}"
],
"exclude_files": "Classes/Exclude",
"frameworks": [
"AVFoundation",
"UIKit"
],
"requires_arc": true
}
| 284 |
30,023 | """Config flow for Overkiz (by Somfy) integration."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, cast
from aiohttp import ClientError
from pyoverkiz.client import OverkizClient
from pyoverkiz.const import SUPPORTED_SERVERS
from pyoverkiz.exceptions import (
BadCredentialsException,
MaintenanceException,
TooManyAttemptsBannedException,
TooManyRequestsException,
)
from pyoverkiz.models import obfuscate_id
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import dhcp, zeroconf
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from .const import CONF_HUB, DEFAULT_HUB, DOMAIN, LOGGER
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Overkiz (by Somfy)."""
VERSION = 1
_config_entry: ConfigEntry | None
_default_user: None | str
_default_hub: str
def __init__(self) -> None:
"""Initialize Overkiz Config Flow."""
super().__init__()
self._config_entry = None
self._default_user = None
self._default_hub = DEFAULT_HUB
async def async_validate_input(self, user_input: dict[str, Any]) -> None:
"""Validate user credentials."""
username = user_input[CONF_USERNAME]
password = <PASSWORD>_input[CONF_PASSWORD]
server = SUPPORTED_SERVERS[user_input[CONF_HUB]]
session = async_create_clientsession(self.hass)
client = OverkizClient(
username=username, password=password, server=server, session=session
)
await client.login(register_event_listener=False)
# Set first gateway id as unique id
if gateways := await client.get_gateways():
gateway_id = gateways[0].id
await self.async_set_unique_id(gateway_id)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step via config flow."""
errors = {}
if user_input:
self._default_user = user_input[CONF_USERNAME]
self._default_hub = user_input[CONF_HUB]
try:
await self.async_validate_input(user_input)
except TooManyRequestsException:
errors["base"] = "too_many_requests"
except BadCredentialsException:
errors["base"] = "invalid_auth"
except (TimeoutError, ClientError):
errors["base"] = "cannot_connect"
except MaintenanceException:
errors["base"] = "server_in_maintenance"
except TooManyAttemptsBannedException:
errors["base"] = "too_many_attempts"
except Exception as exception: # pylint: disable=broad-except
errors["base"] = "unknown"
LOGGER.exception(exception)
else:
if self._config_entry:
if self._config_entry.unique_id != self.unique_id:
return self.async_abort(reason="reauth_wrong_account")
# Update existing entry during reauth
self.hass.config_entries.async_update_entry(
self._config_entry,
data={
**self._config_entry.data,
**user_input,
},
)
self.hass.async_create_task(
self.hass.config_entries.async_reload(
self._config_entry.entry_id
)
)
return self.async_abort(reason="reauth_successful")
# Create new entry
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=user_input[CONF_USERNAME], data=user_input
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_USERNAME, default=self._default_user): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_HUB, default=self._default_hub): vol.In(
{key: hub.name for key, hub in SUPPORTED_SERVERS.items()}
),
}
),
errors=errors,
)
async def async_step_dhcp(self, discovery_info: dhcp.DhcpServiceInfo) -> FlowResult:
"""Handle DHCP discovery."""
hostname = discovery_info.hostname
gateway_id = hostname[8:22]
LOGGER.debug("DHCP discovery detected gateway %s", obfuscate_id(gateway_id))
return await self._process_discovery(gateway_id)
async def async_step_zeroconf(
self, discovery_info: zeroconf.ZeroconfServiceInfo
) -> FlowResult:
"""Handle ZeroConf discovery."""
properties = discovery_info.properties
gateway_id = properties["gateway_pin"]
LOGGER.debug("ZeroConf discovery detected gateway %s", obfuscate_id(gateway_id))
return await self._process_discovery(gateway_id)
async def _process_discovery(self, gateway_id: str) -> FlowResult:
"""Handle discovery of a gateway."""
await self.async_set_unique_id(gateway_id)
self._abort_if_unique_id_configured()
self.context["title_placeholders"] = {"gateway_id": gateway_id}
return await self.async_step_user()
async def async_step_reauth(self, data: Mapping[str, Any]) -> FlowResult:
"""Handle reauth."""
self._config_entry = cast(
ConfigEntry,
self.hass.config_entries.async_get_entry(self.context["entry_id"]),
)
self.context["title_placeholders"] = {
"gateway_id": self._config_entry.unique_id
}
self._default_user = self._config_entry.data[CONF_USERNAME]
self._default_hub = self._config_entry.data[CONF_HUB]
return await self.async_step_user(dict(data))
| 2,895 |
2,151 | <reponame>zipated/src<filename>device/vr/vr_device_base_unittest.cc<gh_stars>1000+
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "device/vr/vr_device.h"
#include <memory>
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "device/vr/public/mojom/vr_service.mojom.h"
#include "device/vr/test/fake_vr_device.h"
#include "device/vr/test/fake_vr_service_client.h"
#include "device/vr/test/mock_vr_display_impl.h"
#include "device/vr/vr_device_base.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace device {
namespace {
class VRDeviceBaseForTesting : public VRDeviceBase {
public:
VRDeviceBaseForTesting() = default;
~VRDeviceBaseForTesting() override = default;
void SetVRDisplayInfoForTest(mojom::VRDisplayInfoPtr display_info) {
SetVRDisplayInfo(std::move(display_info));
}
void FireDisplayActivate() {
OnActivate(device::mojom::VRDisplayEventReason::MOUNTED, base::DoNothing());
}
bool ListeningForActivate() { return listening_for_activate; }
private:
void OnListeningForActivate(bool listening) override {
listening_for_activate = listening;
}
bool listening_for_activate = false;
DISALLOW_COPY_AND_ASSIGN(VRDeviceBaseForTesting);
};
class StubVRDeviceEventListener : public VRDeviceEventListener {
public:
~StubVRDeviceEventListener() override {}
MOCK_METHOD1(DoOnChanged, void(mojom::VRDisplayInfo* vr_device_info));
void OnChanged(mojom::VRDisplayInfoPtr vr_device_info) override {
DoOnChanged(vr_device_info.get());
}
MOCK_METHOD2(OnActivate,
void(mojom::VRDisplayEventReason,
base::OnceCallback<void(bool)>));
MOCK_METHOD0(OnExitPresent, void());
MOCK_METHOD0(OnBlur, void());
MOCK_METHOD0(OnFocus, void());
MOCK_METHOD1(OnDeactivate, void(mojom::VRDisplayEventReason));
};
} // namespace
class VRDeviceTest : public testing::Test {
public:
VRDeviceTest() {}
~VRDeviceTest() override {}
protected:
void SetUp() override {
mojom::VRServiceClientPtr proxy;
client_ = std::make_unique<FakeVRServiceClient>(mojo::MakeRequest(&proxy));
}
std::unique_ptr<MockVRDisplayImpl> MakeMockDisplay(VRDeviceBase* device) {
mojom::VRDisplayClientPtr display_client;
return std::make_unique<testing::NiceMock<MockVRDisplayImpl>>(
device, client(), nullptr, nullptr, mojo::MakeRequest(&display_client),
false);
}
std::unique_ptr<VRDeviceBaseForTesting> MakeVRDevice() {
std::unique_ptr<VRDeviceBaseForTesting> device =
std::make_unique<VRDeviceBaseForTesting>();
device->SetVRDisplayInfoForTest(MakeVRDisplayInfo(device->GetId()));
return device;
}
mojom::VRDisplayInfoPtr MakeVRDisplayInfo(unsigned int device_id) {
mojom::VRDisplayInfoPtr display_info = mojom::VRDisplayInfo::New();
display_info->index = device_id;
return display_info;
}
FakeVRServiceClient* client() { return client_.get(); }
std::unique_ptr<FakeVRServiceClient> client_;
base::MessageLoop message_loop_;
DISALLOW_COPY_AND_ASSIGN(VRDeviceTest);
};
// Tests VRDevice class default behaviour when it dispatches "vrdevicechanged"
// event. The expected behaviour is all of the services related with this device
// will receive the "vrdevicechanged" event.
TEST_F(VRDeviceTest, DeviceChangedDispatched) {
auto device = MakeVRDevice();
StubVRDeviceEventListener listener;
device->SetVRDeviceEventListener(&listener);
EXPECT_CALL(listener, DoOnChanged(testing::_)).Times(1);
device->SetVRDisplayInfoForTest(MakeVRDisplayInfo(device->GetId()));
}
TEST_F(VRDeviceTest, DisplayActivateRegsitered) {
device::mojom::VRDisplayEventReason mounted =
device::mojom::VRDisplayEventReason::MOUNTED;
auto device = MakeVRDevice();
StubVRDeviceEventListener listener;
device->SetVRDeviceEventListener(&listener);
EXPECT_FALSE(device->ListeningForActivate());
device->SetListeningForActivate(true);
EXPECT_TRUE(device->ListeningForActivate());
EXPECT_CALL(listener, OnActivate(mounted, testing::_)).Times(1);
device->FireDisplayActivate();
}
TEST_F(VRDeviceTest, NoMagicWindowPosesWhileBrowsing) {
auto device = std::make_unique<FakeVRDevice>();
device->SetPose(mojom::VRPose::New());
device->GetMagicWindowPose(
base::BindOnce([](mojom::VRPosePtr pose) { EXPECT_TRUE(pose); }));
device->SetMagicWindowEnabled(false);
device->GetMagicWindowPose(
base::BindOnce([](mojom::VRPosePtr pose) { EXPECT_FALSE(pose); }));
}
} // namespace device
| 1,610 |
1,283 | #!/usr/bin/python
# -*- coding: utf8 -*-
# cp936
#
# The MIT License (MIT)
#
# Copyright (c) 2010-2017 fasiondog
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#===============================================================================
# history:
# 1. 20171122, Added by fasiondog
#===============================================================================
from hikyuu.cpp.core import KData, Indicator, SignalBase, ConditionBase, System
from matplotlib.pylab import gca as mpl_gca
from matplotlib.pylab import gcf as mpl_gcf
from .matplotlib_draw import create_figure as mpl_create_figure
from .matplotlib_draw import kplot as mpl_kplot
from .matplotlib_draw import mkplot as mpl_mkplot
from .matplotlib_draw import iplot as mpl_iplot
from .matplotlib_draw import ibar as mpl_ibar
from .matplotlib_draw import sgplot as mpl_sgplot
from .matplotlib_draw import cnplot as mpl_cnplot
from .matplotlib_draw import sysplot as mpl_sysplot
from .matplotlib_draw import ax_draw_macd as mpl_ax_draw_macd
from .matplotlib_draw import ax_draw_macd2 as mpl_ax_draw_macd2
from .matplotlib_draw import ax_set_locator_formatter as mpl_ax_set_locator_formatter
from .matplotlib_draw import adjust_axes_show as mpl_adjust_axes_show
from .bokeh_draw import gcf as bk_gcf
from .bokeh_draw import gca as bk_gca
from .bokeh_draw import create_figure as bk_create_figure
from .bokeh_draw import show_gcf as bk_show_gcf
from .bokeh_draw import kplot as bk_kplot
from .bokeh_draw import iplot as bk_iplot
from .bokeh_draw import ibar as bk_ibar
from .bokeh_draw import ax_draw_macd as bk_ax_draw_macd
from .bokeh_draw import ax_draw_macd2 as bk_ax_draw_macd2
from .bokeh_draw import sgplot as bk_sgplot
g_draw_engine = 'matplotlib'
def get_current_draw_engine():
global g_draw_engine
return g_draw_engine
def set_current_draw_engine(engine):
global g_draw_engine
g_draw_engine = engine
def use_draw_engine(engine='matplotlib'):
if engine == 'matplotlib':
use_draw_with_matplotlib()
elif engine == 'bokeh':
use_draw_with_bokeh()
else:
print("未知的引擎: {}".format(engine))
def use_draw_with_bokeh():
from bokeh.io import output_notebook
output_notebook()
set_current_draw_engine('bokeh')
KData.plot = bk_kplot
KData.kplot = bk_kplot
Indicator.plot = bk_iplot
Indicator.bar = bk_ibar
SignalBase.plot = bk_sgplot
def use_draw_with_matplotlib():
set_current_draw_engine('matplotlib')
KData.plot = mpl_kplot
KData.kplot = mpl_kplot
KData.mkplot = mpl_mkplot
Indicator.plot = mpl_iplot
Indicator.bar = mpl_ibar
SignalBase.plot = mpl_sgplot
ConditionBase.plot = mpl_cnplot
System.plot = mpl_sysplot
def create_figure(n=1, figsize=None):
"""生成含有指定坐标轴数量的窗口,最大只支持4个坐标轴。
:param int n: 坐标轴数量
:param figsize: (宽, 高)
:return: (ax1, ax2, ...) 根据指定的坐标轴数量而定,超出[1,4]个坐标轴时,返回None
"""
engine = get_current_draw_engine()
if engine == 'matplotlib':
return mpl_create_figure(n, figsize if figsize else (10, 8))
elif engine == 'bokeh':
return bk_create_figure(n, figsize if figsize else (800, 800))
else:
print("未知的引擎: {}".format(engine))
def gcf():
engine = get_current_draw_engine()
if engine == 'matplotlib':
return mpl_gcf()
elif engine == 'bokeh':
return bk_gcf()
else:
print("未知的引擎: {}".format(engine))
def gca():
engine = get_current_draw_engine()
if engine == 'matplotlib':
return mpl_gca()
elif engine == 'bokeh':
return bk_gca()
else:
print("未知的引擎: {}".format(engine))
def show_gcf():
engine = get_current_draw_engine()
if engine == 'matplotlib':
pass
elif engine == 'bokeh':
bk_show_gcf()
else:
print("未知的引擎: {}".format(engine))
def ax_draw_macd(axes, kdata, n1=12, n2=26, n3=9):
"""绘制MACD
:param axes: 指定的坐标轴
:param KData kdata: KData
:param int n1: 指标 MACD 的参数1
:param int n2: 指标 MACD 的参数2
:param int n3: 指标 MACD 的参数3
"""
engine = get_current_draw_engine()
if engine == 'matplotlib':
mpl_ax_draw_macd(axes, kdata, n1, n2, n3)
elif engine == 'bokeh':
pass
else:
print("未知的引擎: {}".format(engine))
def ax_draw_macd(axes, kdata, n1=12, n2=26, n3=9):
"""绘制MACD
:param axes: 指定的坐标轴
:param KData kdata: KData
:param int n1: 指标 MACD 的参数1
:param int n2: 指标 MACD 的参数2
:param int n3: 指标 MACD 的参数3
"""
engine = get_current_draw_engine()
if engine == 'matplotlib':
return mpl_ax_draw_macd(axes, kdata, n1, n2, n3)
elif engine == 'bokeh':
return bk_ax_draw_macd(axes, kdata, n1, n2, n3)
else:
print("未知的引擎: {}".format(engine))
def ax_draw_macd2(axes, ref, kdata, n1=12, n2=26, n3=9):
"""绘制MACD。
当BAR值变化与参考序列ref变化不一致时,显示为灰色,
当BAR和参考序列ref同时上涨,显示红色
当BAR和参考序列ref同时下跌,显示绿色
:param axes: 指定的坐标轴
:param ref: 参考序列,EMA
:param KData kdata: KData
:param int n1: 指标 MACD 的参数1
:param int n2: 指标 MACD 的参数2
:param int n3: 指标 MACD 的参数3
"""
engine = get_current_draw_engine()
if engine == 'matplotlib':
mpl_ax_draw_macd2(axes, ref, kdata, n1, n2, n3)
elif engine == 'bokeh':
bk_ax_draw_macd2(axes, ref, kdata, n1, n2, n3)
else:
print("未知的引擎: {}".format(engine))
def adjust_axes_show(axeslist):
"""用于调整上下紧密相连的坐标轴显示时,其上一坐标轴最小值刻度和下一坐标轴最大值刻度
显示重叠的问题。
:param axeslist: 上下相连的坐标轴列表 (ax1,ax2,...)
"""
engine = get_current_draw_engine()
if engine == 'matplotlib':
mpl_adjust_axes_show(axeslist)
elif engine == 'bokeh':
pass
else:
print("未知的引擎: {}".format(engine))
def ax_set_locator_formatter(axes, dates, typ):
""" 设置指定坐标轴的日期显示,根据指定的K线类型优化X轴坐标显示
:param axes: 指定的坐标轴
:param dates: Datetime构成可迭代序列
:param Query.KType typ: K线类型
"""
engine = get_current_draw_engine()
if engine == 'matplotlib':
mpl_ax_set_locator_formatter(axes, dates, typ)
elif engine == 'bokeh':
pass
else:
print("未知的引擎: {}".format(engine))
# 设置默认引擎
use_draw_engine('matplotlib')
__all__ = [
'use_draw_engine', 'get_current_draw_engine', 'create_figure', 'gcf', 'show_gcf', 'gca',
'ax_draw_macd', 'ax_draw_macd2'
]
| 3,678 |
543 | /*
* Copyright 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.mirkosertic.bytecoder.core;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import de.mirkosertic.bytecoder.unittest.BytecoderUnitTestRunner;
@RunWith(BytecoderUnitTestRunner.class)
public class StaticAccessTest {
public static class StaticClassWithStuffInside {
public static int member;
public static String member2;
}
@Test
public void testStaticGetAndSet() {
String theText ="JUHU";
StaticClassWithStuffInside.member = 12;
StaticClassWithStuffInside.member2 = theText;
int theResult = StaticClassWithStuffInside.member;
String theResult2 = StaticClassWithStuffInside.member2;
Assert.assertSame(theText, theResult2);
Assert.assertEquals(12, theResult, 0);
}
@Test
public void testGet() {
int theValue = StaticClassWithStuffInside.member;
}
} | 507 |
367 | """Histograms of individual gradient transformations."""
import torch
from backpack.extensions import BatchGrad
from cockpit.quantities.bin_adaptation import NoAdaptation
from cockpit.quantities.quantity import SingleStepQuantity
from cockpit.quantities.utils_hists import histogram2d
from cockpit.quantities.utils_transforms import BatchGradTransformsHook
class GradHist1d(SingleStepQuantity):
"""Quantity class for one-dimensional histograms of indivdual gradient elements.
The histograms consider individual gradient elements, with outliers being
clipped to lie in the visible range.
"""
def __init__(
self,
track_schedule,
verbose=False,
bins=100,
range=(-2, 2),
adapt=None,
):
"""Initialization sets the tracking schedule & creates the output dict.
Args:
track_schedule (callable): Function that maps the ``global_step``
to a boolean, which determines if the quantity should be computed.
verbose (bool, optional): Turns on verbose mode. Defaults to ``False``.
bins (int): Number of bins
range ([float, float], optional): Lower and upper limit of the bin range.
Default: ``(-2, 2)``.
adapt (BinAdaptation): Policy for adapting the bin limits. Per default,
no adaptation is performed.
"""
super().__init__(track_schedule, verbose=verbose)
self._range = range
self._bins = bins
self._adapt = NoAdaptation(verbose=verbose) if adapt is None else adapt
def extensions(self, global_step):
"""Return list of BackPACK extensions required for the computation.
Args:
global_step (int): The current iteration number.
Returns:
list: (Potentially empty) list with required BackPACK quantities.
"""
ext = []
if self.should_compute(global_step):
ext.append(BatchGrad())
ext += self._adapt.extensions(global_step)
return ext
def extension_hooks(self, global_step):
"""Return list of BackPACK extension hooks required for the computation.
Args:
global_step (int): The current iteration number.
Returns:
[callable]: List of required BackPACK extension hooks for the current
iteration.
"""
hooks = []
if self.should_compute(global_step):
hooks.append(
BatchGradTransformsHook(transforms={"hist_1d": self._compute_histogram})
)
hooks += self._adapt.extension_hooks(global_step)
return hooks
def track(self, global_step, params, batch_loss):
"""Perform scheduled computations and store result.
Args:
global_step (int): The current iteration number.
params ([torch.Tensor]): List of torch.Tensors holding the network's
parameters.
batch_loss (torch.Tensor): Mini-batch loss from current step.
"""
super().track(global_step, params, batch_loss)
# update limits
if self._adapt.should_compute(global_step):
self._range = self._adapt.compute(
global_step, params, batch_loss, self._range
)
def _compute(self, global_step, params, batch_loss):
"""Evaluate the individual gradient histogram.
Args:
global_step (int): The current iteration number.
params ([torch.Tensor]): List of torch.Tensors holding the network's
parameters.
batch_loss (torch.Tensor): Mini-batch loss from current step.
Returns:
dict: Entry ``'hist'`` holds the histogram, entry ``'edges'`` holds
the bin limits.
"""
hist = sum(p.grad_batch_transforms["hist_1d"][0] for p in params).detach()
edges = params[0].grad_batch_transforms["hist_1d"][1]
return {"hist": hist, "edges": edges}
def _compute_histogram(self, batch_grad):
"""Transform individual gradients into histogram data.
This function is be applied onto every parameter's individual gradient
during backpropagation. If `p` denotes the parameter, then the return
value is stored in the `"hist"` field of `p.grad_batch_transforms`.
Args:
batch_grad (torch.Tensor): Individual gradient of a parameter `p`. If
`p` is of shape `(*)`, the individual gradients have shape `(N, *)`,
where `N` denotes the batch size.
Returns:
(torch.Tensor, torch.Tensor): First tensor represents histogram counts,
second tensor are bin edges. Both are on the input's device.
"""
# NOTE ``batch_grad`` is 1/B ∇ℓᵢ so we need to compensate the 1/B
B = batch_grad.shape[0]
individual_gradients = B * batch_grad
start, end = self._range
individual_gradients = torch.clamp(individual_gradients, start, end)
hist = torch.histc(individual_gradients, bins=self._bins, min=start, max=end)
edges = torch.linspace(start, end, self._bins + 1, device=batch_grad.device)
return hist, edges
class GradHist2d(SingleStepQuantity):
"""Quantity class for two-dimensional histograms over gradient and parameters.
Tracks two-dimensional histogram of individual gradient elements over
parameters. Individual gradient values are binned on the x-axis, parameter
values are binned on the y-axis.
"""
def __init__(
self,
track_schedule,
verbose=False,
bins=(40, 50),
range=((-1, 1), (-2, 2)),
adapt=(None, None),
keep_individual=False,
):
"""Initialize the 2D Histogram of individual gradient elements over parameters.
Args:
track_schedule (callable): Function that maps the ``global_step``
to a boolean, which determines if the quantity should be computed.
verbose (bool, optional): Turns on verbose mode. Defaults to ``False``.
bins ([int, int]): Number of bins in x and y direction. Default:
``(40, 50)``
range ([[float, float], [float, float]], optional): Bin limits in x and
y direction. Default ``((-1, 1), (-2, 2))``.
adapt ([BinAdaptation or None, BinAdaptation or None], optional): Policy
for adapting the bin limits in x and y direction. ``None`` indicates
no adaptation. Default value: ``(None, None)``.
keep_individual (bool, optional): Whether to keep individual
parameter histograms. Defaults to False.
"""
super().__init__(track_schedule, verbose=verbose)
self._range = list(range)
self._bins = bins
self._adapt = [NoAdaptation(verbose=verbose) if a is None else a for a in adapt]
self._keep_individual = keep_individual
def extensions(self, global_step):
"""Return list of BackPACK extensions required for the computation.
Args:
global_step (int): The current iteration number.
Returns:
list: (Potentially empty) list with required BackPACK quantities.
"""
ext = []
if self.should_compute(global_step):
ext.append(BatchGrad())
for adapt in self._adapt:
ext += adapt.extensions(global_step)
return ext
def extension_hooks(self, global_step):
"""Return list of BackPACK extension hooks required for the computation.
Args:
global_step (int): The current iteration number.
Returns:
[callable]: List of required BackPACK extension hooks for the current
iteration.
"""
hooks = []
if self.should_compute(global_step):
hooks.append(BatchGradTransformsHook({"hist_2d": self._compute_histogram}))
for adapt in self._adapt:
hooks += adapt.extension_hooks(global_step)
return hooks
def track(self, global_step, params, batch_loss):
"""Perform scheduled computations and store result.
Args:
global_step (int): The current iteration number.
params ([torch.Tensor]): List of torch.Tensors holding the network's
parameters.
batch_loss (torch.Tensor): Mini-batch loss from current step.
"""
super().track(global_step, params, batch_loss)
# update limits
for dim, adapt in enumerate(self._adapt):
if adapt.should_compute(global_step):
self._range[dim] = adapt.compute(
global_step, params, batch_loss, self._range
)
def _compute(self, global_step, params, batch_loss):
"""Aggregate histogram data over parameters and save to output."""
hist = sum(p.grad_batch_transforms["hist_2d"][0] for p in params).detach()
edges = params[0].grad_batch_transforms["hist_2d"][1]
result = {"hist": hist, "edges": edges}
if self._keep_individual:
result["param_groups"] = len(params)
for idx, p in enumerate(params):
hist, edges = p.grad_batch_transforms["hist_2d"]
hist = hist.detach()
result[f"param_{idx}"] = {"hist": hist, "edges": edges}
return result
def _compute_histogram(self, batch_grad):
"""Transform individual gradients and parameters into a 2d histogram.
Args:
batch_grad (torch.Tensor): Individual gradient of a parameter `p`. If
`p` is of shape `(*)`, the individual gradients have shape `(N, *)`,
where `N` denotes the batch size.
Returns:
(torch.Tensor, (torch.Tensor, torch.Tensor)): First tensor represents
histogram counts, second tuple holds the bin edges in x and y
direction. All are on the input's device.
"""
# NOTE ``batch_grad`` is 1/B ∇ℓᵢ so we need to compensate the 1/B
B = batch_grad.shape[0]
data = [B * batch_grad, batch_grad._param_weakref().data]
for dim, data_dim in enumerate(data):
lower, upper = self._range[dim]
bins = self._bins[dim]
# Histogram implementation does not include the limits, clip to bin center
bin_size = (upper - lower) / bins
data[dim] = torch.clamp(
data_dim, min=lower + bin_size / 2, max=upper - bin_size / 2
)
return self.__hist_high_mem(*data)
def __hist_high_mem(self, individual_gradients, param):
"""Compute histogram with memory-intensive strategy.
Args:
individual_gradients (torch.Tensor): Individual gradients, clipped to the
histogram range.
param (torch.Tensor): Parameter, clipped to the histogram range.
Returns:
(torch.Tensor, (torch.Tensor, torch.Tensor)): First tensor represents
histogram counts, second tuple holds the bin edges in x and y
direction. All are on the input's device.
"""
batch_size = individual_gradients.shape[0]
expand_arg = [batch_size] + param.dim() * [-1]
data = torch.stack(
(
individual_gradients.flatten(),
param.unsqueeze(0).expand(*expand_arg).flatten(),
)
)
return histogram2d(data, bins=self._bins, range=self._range)
| 4,869 |
3,459 | <reponame>werminghoff/Provenance
#ifndef __MD_MAP_REALTEC_H
#define __MD_MAP_REALTEC_H
MD_Cart_Type *MD_Make_Cart_Type_REALTEC(const md_game_info *ginfo, const uint8 *ROM, const uint32 ROM_size, const uint32 iparam, const char *sparam);
#endif
| 100 |
352 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import os
import sys
import click
from sagify.log import logger
from sagify.commands import ASCII_LOGO
from sagify.config.config import ConfigManager
click.disable_unicode_literals_warning = True
@click.command()
@click.option(u"--image-name", required=False, help="Docker image name")
@click.option(u"--aws-region", required=False, help="AWS Region to use in operations")
@click.option(u"--aws-profile", required=False, help="AWS Profile to use in operations")
@click.option(u"--python-version", required=False, help="Python version used when building")
@click.option(u"--requirements-dir", required=False, help="Path to requirements.txt")
def configure(image_name, aws_region, aws_profile, python_version, requirements_dir):
"""
Command to configure SageMaker template
"""
logger.info(ASCII_LOGO)
_configure('.', image_name, aws_region, aws_profile, python_version, requirements_dir)
def _configure(config_dir, image_name, aws_region, aws_profile, python_version, requirements_dir):
try:
config_manager = ConfigManager(os.path.join(config_dir, '.sagify.json'))
config = config_manager.get_config()
if image_name is not None:
config.image_name = image_name
if aws_region is not None:
config.aws_region = aws_region
if aws_profile is not None:
config.aws_profile = aws_profile
if python_version is not None:
config.python_version = python_version
if requirements_dir is not None:
config.requirements_dir = requirements_dir
config_manager.set_config(config)
logger.info("\nConfiguration updated successfully!\n")
except ValueError as e:
logger.info("{}".format(e))
sys.exit(-1)
| 676 |
14,668 | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit checks for Share share_targets."""
def CheckVersionUpdatedInShareTargetList(input_api, output_api):
def IsShareTargetList(x):
return (input_api.os_path.basename(
x.LocalPath()) == 'share_targets.asciipb')
share_targets = input_api.AffectedFiles(file_filter=IsShareTargetList)
if not share_targets:
return []
for _, line in share_targets[0].ChangedContents():
if line.strip().startswith('version_id: '):
return []
return [
output_api.PresubmitError(
'Increment |version_id| in share_targets.asciipb if you are '
'updating the share_targets proto.')
]
def CheckChangeOnUpload(input_api, output_api):
return CheckVersionUpdatedInShareTargetList(input_api, output_api)
| 369 |
3,200 | <gh_stars>1000+
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polycommon.conf.handlers.env_handler import EnvConfHandler
from polycommon.conf.handlers.settings_handler import SettingsConfHandler
from polycommon.conf.option_service import OptionService
from polycommon.options.option import OptionStores
class ConfService(OptionService):
def setup(self) -> None:
super().setup()
self.stores[OptionStores.SETTINGS] = SettingsConfHandler()
self.stores[OptionStores.ENV] = EnvConfHandler()
| 318 |
511 | /****************************************************************************
*
* Copyright 2016 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
/***********************************************************************
* example/kernel_sample/rmutex.c
*
* Copyright (C) 2008 <NAME>. All rights reserved.
* Author: <NAME> <<EMAIL>>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
***********************************************************************/
#include <sys/types.h>
#include <stdio.h>
#include <pthread.h>
#include "kernel_sample.h"
#ifndef NULL
# define NULL (void*)0
#endif
#define NTHREADS 3
#define NLOOPS 3
#define NRECURSIONS 3
static pthread_mutex_t mut;
static void thread_inner(int id, int level)
{
int status;
if (level < NRECURSIONS) {
/* Take the mutex */
printf("thread_inner[%d, %d]: Locking\n", id, level);
status = pthread_mutex_lock(&mut);
if (status != 0) {
printf("thread_inner[%d, %d]: ERROR pthread_mutex_lock failed: %d\n",
id, level, status);
}
printf("thread_inner[%d, %d]: Locked\n", id, level);
/* Give the other threads a chance */
pthread_yield();
thread_inner(id, level + 1);
pthread_yield();
/* Unlock the mutex */
printf("thread_inner[%d, %d]: Unlocking\n", id, level);
status = pthread_mutex_unlock(&mut);
if (status != 0) {
printf("thread_inner[%d, %d]: ERROR pthread_mutex_unlock failed: %d\n",
id, level, status);
}
printf("thread_inner[%d, %d]: Unlocked\n", id, level);
pthread_yield();
}
}
static void *thread_outer(void *parameter)
{
int i;
printf("thread_outer[%d]: Started\n", (int)parameter);
for (i = 0; i < NLOOPS; i++) {
printf("thread_outer[%d]: Loop %d\n", (int)parameter, i);
thread_inner((int)parameter, 0);
}
printf("thread_outer[%d]: Exitting\n", (int)parameter);
pthread_exit(NULL);
return NULL; /* Non-reachable -- needed for some compilers */
}
void recursive_mutex_test(void)
{
pthread_t thread[NTHREADS];
#ifdef SDCC
pthread_addr_t result[NTHREADS];
pthread_attr_t attr;
#endif
pthread_mutexattr_t mattr;
int type;
int status;
int i;
/* Initialize the mutex attributes */
pthread_mutexattr_init(&mattr);
status = pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
if (status != 0) {
printf("recursive_mutex_test: ERROR pthread_mutexattr_settype failed, status=%d\n", status);
}
status = pthread_mutexattr_gettype(&mattr, &type);
if (status != 0) {
printf("recursive_mutex_test: ERROR pthread_mutexattr_gettype failed, status=%d\n", status);
}
if (type != PTHREAD_MUTEX_RECURSIVE) {
printf("recursive_mutex_test: ERROR pthread_mutexattr_gettype return type=%d\n", type);
}
/* Initialize the mutex */
printf("recursive_mutex_test: Initializing mutex\n");
pthread_mutex_init(&mut, &mattr);
/* Start the threads -- all at the same, default priority */
for (i = 0; i < NTHREADS; i++) {
printf("recursive_mutex_test: Starting thread %d\n", i + 1);
#ifdef SDCC
(void)pthread_attr_init(&attr);
status = pthread_create(&thread[i], &attr, thread_outer, (pthread_addr_t)i + 1);
#else
status = pthread_create(&thread[i], NULL, thread_outer, (pthread_addr_t)i + 1);
#endif
if (status != 0) {
printf("recursive_mutex_test: ERRROR thread#%d creation: %d\n", i + 1, status);
}
}
/* Wait for all; of the threads to complete */
for (i = 0; i < NTHREADS; i++) {
printf("recursive_mutex_test: Waiting for thread %d\n", i + 1);
#ifdef SDCC
pthread_join(thread[i], &result[i]);
#else
pthread_join(thread[i], NULL);
#endif
}
printf("recursive_mutex_test: Complete\n");
}
| 1,923 |
573 | <filename>tools/lint.py
#!/usr/bin/env python2.7
# Copyright 2015, VIXL authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ARM Limited nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import fnmatch
import hashlib
import multiprocessing
import os
import pickle
import re
import signal
import subprocess
import sys
import config
import git
import printer
import util
# Catch SIGINT to gracefully exit when ctrl+C is pressed.
def sigint_handler(signal, frame):
sys.exit(1)
signal.signal(signal.SIGINT, sigint_handler)
def BuildOptions():
parser = argparse.ArgumentParser(
description =
'''This tool lints C++ files and produces a summary of the errors found.
If no files are provided on the command-line, all C++ source files are
processed, except for the test traces.
Results are cached to speed up the process.
''',
# Print default values.
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('files', nargs = '*')
parser.add_argument('--jobs', '-j', metavar='N', type=int, nargs='?',
default=multiprocessing.cpu_count(),
const=multiprocessing.cpu_count(),
help='''Runs the tests using N jobs. If the option is set
but no value is provided, the script will use as many jobs
as it thinks useful.''')
parser.add_argument('--no-cache',
action='store_true', default=False,
help='Do not use cached lint results.')
return parser.parse_args()
# Returns a tuple (filename, number of lint errors).
def Lint(filename, progress_prefix = ''):
command = ['cpplint.py', filename]
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outerr, _ = process.communicate()
if process.returncode == 0:
printer.PrintOverwritableLine(
progress_prefix + "Done processing %s" % filename,
type = printer.LINE_TYPE_LINTER)
return (filename, 0)
if progress_prefix:
outerr = re.sub('^', progress_prefix, outerr, flags=re.MULTILINE)
printer.Print(outerr)
# Find the number of errors in this file.
res = re.search('Total errors found: (\d+)', outerr)
if res:
n_errors_str = res.string[res.start(1):res.end(1)]
n_errors = int(n_errors_str)
else:
print("Couldn't parse cpplint.py output.")
n_errors = -1
return (filename, n_errors)
# The multiprocessing map_async function does not allow passing multiple
# arguments directly, so use a wrapper.
def LintWrapper(args):
# Run under a try-catch to avoid flooding the output when the script is
# interrupted from the keyboard with ctrl+C.
try:
return Lint(*args)
except:
sys.exit(1)
def ShouldLint(filename, cached_results):
filename = os.path.realpath(filename)
if filename not in cached_results:
return True
with open(filename, 'rb') as f:
file_hash = hashlib.md5(f.read()).hexdigest()
return file_hash != cached_results[filename]
# Returns the total number of errors found in the files linted.
# `cached_results` must be a dictionary, with the format:
# { 'filename': file_hash, 'other_filename': other_hash, ... }
# If not `None`, `cached_results` is used to avoid re-linting files, and new
# results are stored in it.
def LintFiles(files,
jobs = 1,
progress_prefix = '',
cached_results = None):
if not IsCppLintAvailable():
print(
printer.COLOUR_RED + \
("cpplint.py not found. Please ensure the depot"
" tools are installed and in your PATH. See"
" http://dev.chromium.org/developers/how-tos/install-depot-tools for"
" details.") + \
printer.NO_COLOUR)
return -1
# Filter out directories.
files = filter(os.path.isfile, files)
# Filter out files for which we have a cached correct result.
if cached_results is not None and len(cached_results) != 0:
n_input_files = len(files)
files = filter(lambda f: ShouldLint(f, cached_results), files)
n_skipped_files = n_input_files - len(files)
if n_skipped_files != 0:
printer.Print(
progress_prefix +
'Skipping %d correct files that were already processed.' %
n_skipped_files)
pool = multiprocessing.Pool(jobs)
# The '.get(9999999)' is workaround to allow killing the test script with
# ctrl+C from the shell. This bug is documented at
# http://bugs.python.org/issue8296.
tasks = [(f, progress_prefix) for f in files]
# Run under a try-catch to avoid flooding the output when the script is
# interrupted from the keyboard with ctrl+C.
try:
results = pool.map_async(LintWrapper, tasks).get(9999999)
pool.close()
pool.join()
except KeyboardInterrupt:
pool.terminate()
sys.exit(1)
n_errors = sum(map(lambda (filename, errors): errors, results))
if cached_results is not None:
for filename, errors in results:
if errors == 0:
with open(filename, 'rb') as f:
filename = os.path.realpath(filename)
file_hash = hashlib.md5(f.read()).hexdigest()
cached_results[filename] = file_hash
printer.PrintOverwritableLine(
progress_prefix + 'Total errors found: %d' % n_errors)
printer.EnsureNewLine()
return n_errors
def IsCppLintAvailable():
retcode, unused_output = util.getstatusoutput('which cpplint.py')
return retcode == 0
CPP_EXT_REGEXP = re.compile('\.(cc|h)$')
def IsLinterInput(filename):
# lint all C++ files.
return CPP_EXT_REGEXP.search(filename) != None
cached_results_pkl_filename = \
os.path.join(config.dir_tools, '.cached_lint_results.pkl')
def ReadCachedResults():
cached_results = {}
if os.path.isfile(cached_results_pkl_filename):
with open(cached_results_pkl_filename, 'rb') as pkl_file:
cached_results = pickle.load(pkl_file)
return cached_results
def CacheResults(results):
with open(cached_results_pkl_filename, 'wb') as pkl_file:
pickle.dump(results, pkl_file)
def FilterOutTestTraceHeaders(files):
def IsTraceHeader(f):
relative_aarch32_traces_path = os.path.relpath(config.dir_aarch32_traces,'.')
relative_aarch64_traces_path = os.path.relpath(config.dir_aarch64_traces,'.')
return \
fnmatch.fnmatch(f, os.path.join(relative_aarch32_traces_path, '*.h')) or \
fnmatch.fnmatch(f, os.path.join(relative_aarch64_traces_path, '*.h'))
return filter(lambda f: not IsTraceHeader(f), files)
def RunLinter(files, jobs=1, progress_prefix='', cached=True):
results = {} if not cached else ReadCachedResults()
rc = LintFiles(files,
jobs=jobs,
progress_prefix=progress_prefix,
cached_results=results)
CacheResults(results)
return rc
if __name__ == '__main__':
# Parse the arguments.
args = BuildOptions()
files = args.files or util.get_source_files()
cached = not args.no_cache
retcode = RunLinter(files, jobs=args.jobs, cached=cached)
sys.exit(retcode)
| 3,106 |
444 | <filename>orchestra/migrations/0028_snapshots_to_iterations.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-06 21:57
from __future__ import unicode_literals
import logging
from operator import itemgetter
from dateutil.parser import parse
from django.db import migrations
from django.db import models
# NOTE(jrbotros): change import or copy into migration if code moves/changes
from orchestra.tests.helpers.iterations import verify_iterations
logger = logging.getLogger(__name__)
class SnapshotType:
SUBMIT = 0
ACCEPT = 1
REJECT = 2
class IterationStatus:
PROCESSING = 0
REQUESTED_REVIEW = 1
PROVIDED_REVIEW = 2
class AssignmentStatus:
PROCESSING = 0
SUBMITTED = 1
FAILED = 2
# Map snapshot types onto iteration statuses
snapshot_type_to_iteration_status = {
SnapshotType.SUBMIT: IterationStatus.REQUESTED_REVIEW,
SnapshotType.ACCEPT: IterationStatus.REQUESTED_REVIEW,
SnapshotType.REJECT: IterationStatus.PROVIDED_REVIEW,
}
def get_ordered_snapshots(task):
task_snapshots = []
for assignment in task.assignments.all():
assignment_snapshots = assignment.snapshots.get('snapshots')
if assignment_snapshots is None:
continue
for assignment_snap_index, snapshot in enumerate(assignment_snapshots):
snapshot['assignment'] = assignment
# Keep track of the snapshot's index relative to the assignment
snapshot['assignment_snap_index'] = assignment_snap_index
task_snapshots.append(snapshot)
return sorted(task_snapshots, key=itemgetter('datetime'))
def snapshots_to_iterations(apps, schema_editor):
Iteration = apps.get_model('orchestra', 'Iteration')
Task = apps.get_model('orchestra', 'Task')
TaskAssignment = apps.get_model('orchestra', 'TaskAssignment')
for task in Task.objects.all():
task_snapshots = get_ordered_snapshots(task)
for task_snap_index, snapshot in enumerate(task_snapshots):
assignment = snapshot['assignment']
iteration = Iteration.objects.create(
assignment=assignment,
end_datetime=parse(snapshot['datetime']),
submitted_data=snapshot['data'],
status=snapshot_type_to_iteration_status[snapshot['type']])
if snapshot['assignment_snap_index'] == 0:
# Snapshot is the first for its assignment, so its start
# datetime will be the same as its assignment
# NOTE: This should cover the case where task_snap_index == 0
iteration.start_datetime = assignment.start_datetime
else:
iteration.start_datetime = (
task_snapshots[task_snap_index - 1]['datetime'])
iteration.save()
processing_assignments = task.assignments.filter(
status=AssignmentStatus.PROCESSING)
if processing_assignments.exists():
if processing_assignments.count() > 1:
logger.error(
'Task {} has too many processing assignments'.format(
task.id))
else:
# If task has a processing assignment, create a final
# processing iteration
processing_assignment = processing_assignments.first()
if (not task_snapshots
or not processing_assignment.iterations.exists()):
final_start_datetime = processing_assignment.start_datetime
else:
final_start_datetime = task_snapshots[-1]['datetime']
iteration = Iteration.objects.create(
assignment=processing_assignment,
start_datetime=final_start_datetime,
status=IterationStatus.PROCESSING)
try:
verify_iterations(task.id)
except AssertionError:
logger.error('Iterations invalid for task {}.'.format(task.id))
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0027_create_time_entries_for_snapshots'),
]
operations = [
migrations.AlterField(
model_name='iteration',
name='status',
field=models.IntegerField(choices=[(
0, 'Processing'), (1, 'Requested Review'), (2, 'Provided Review')], default=0),
),
migrations.RunPython(snapshots_to_iterations) # manually-reviewed
]
| 1,936 |
2,989 | <filename>databus-core/databus-core-container/src/main/java/com/linkedin/databus2/core/container/monitoring/mbean/DbusHttpTotalStats.java
package com.linkedin.databus2.core.container.monitoring.mbean;
/*
*
* Copyright 2013 LinkedIn Corp. All rights reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOException;
import java.io.OutputStream;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.concurrent.locks.Lock;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.avro.io.JsonEncoder;
import org.apache.avro.specific.SpecificDatumWriter;
import org.apache.log4j.Logger;
import com.linkedin.databus.core.Checkpoint;
import com.linkedin.databus.core.monitoring.mbean.AbstractMonitoringMBean;
import com.linkedin.databus.core.monitoring.mbean.DatabusMonitoringMBean;
import com.linkedin.databus2.core.container.monitoring.events.DbusHttpTotalStatsEvent;
public class DbusHttpTotalStats extends AbstractMonitoringMBean<DbusHttpTotalStatsEvent>
implements DbusHttpTotalStatsMBean
{
public static final String MODULE = DbusHttpTotalStats.class.getName();
public static final Logger LOG = Logger.getLogger(MODULE);
private final HashSet<Object> _peers;
private final String _dimension;
public DbusHttpTotalStats(int ownerId, String dimesion, boolean enabled, boolean threadSafe,
DbusHttpTotalStatsEvent initData)
{
super(enabled, threadSafe, initData);
_event.ownerId = ownerId;
_event.dimension = dimesion;
_peers = new HashSet<Object>(1000);
_dimension = dimesion;
reset();
}
public DbusHttpTotalStats clone(boolean threadSafe)
{
return new DbusHttpTotalStats(_event.ownerId, _dimension, _enabled.get(), threadSafe,
getStatistics(null));
}
@Override
public int getNumPeers()
{
Lock readLock = acquireReadLock();
int result = 0;
try
{
result = _event.numPeers;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public int getNumRegisterCalls()
{
Lock readLock = acquireReadLock();
int result = 0;
try
{
result = _event.numRegisterCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public int getNumSourcesCalls()
{
Lock readLock = acquireReadLock();
int result = 0;
try
{
result = _event.numSourcesCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getNumStreamCalls()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.numStreamCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getTimeSinceLastResetMs()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = System.currentTimeMillis() - _event.timestampLastResetMs;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getTimestampLastResetMs()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.timestampLastResetMs;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getMaxStreamWinScn()
{
Lock readLock = acquireReadLock();
try
{
return _event.maxStreamWinScn;
}
finally
{
releaseLock(readLock);
}
}
@Override
public long getMinStreamWinScn()
{
Lock readLock = acquireReadLock();
try
{
return _event.minStreamWinScn;
}
finally
{
releaseLock(readLock);
}
}
@Override
public double getLatencyStreamCalls()
{
Lock readLock = acquireReadLock();
try
{
long numCalls = _event.numStreamCalls;
return (0 == numCalls) ? 0.0 : 1.0 * _event.timeStreamCallsMs / numCalls;
}
finally
{
releaseLock(readLock);
}
}
/** Not thread-safe! Make sure you are holding a write lock*/
private void registerPeer(String peer)
{
_peers.add(peer);
_event.numPeers = _peers.size();
}
public void registerRegisterCall(String peer)
{
if (! _enabled.get()) return;
Lock writeLock = acquireWriteLock();
try
{
_event.numRegisterCalls++;
registerPeer(peer);
}
finally
{
releaseLock(writeLock);
}
}
public void registerSourcesCall(String peer)
{
if (! _enabled.get()) return;
Lock writeLock = acquireWriteLock();
try
{
_event.numSourcesCalls++;
registerPeer(peer);
}
finally
{
releaseLock(writeLock);
}
}
public void registerStreamResponse(long totalTime)
{
if (! _enabled.get()) return;
Lock writeLock = acquireWriteLock();
try
{
_event.numStreamCalls++;
_event.timeStreamCallsMs += totalTime;
}
finally
{
releaseLock(writeLock);
}
}
public void registerStreamRequest(String peer, Checkpoint cp)
{
long winScn = cp.getWindowScn();
Lock writeLock = acquireWriteLock();
try
{
_event.maxStreamWinScn = maxValue(_event.maxStreamWinScn, winScn);
_event.minStreamWinScn = minValue(_event.minStreamWinScn, winScn);
registerPeer(peer);
}
finally
{
releaseLock(writeLock);
}
}
@Override
protected void resetData()
{
_event.timestampLastResetMs = System.currentTimeMillis();
_event.timeSinceLastResetMs = 0;
_event.numPeers = 0;
_event.numRegisterCalls = 0;
_event.numSourcesCalls = 0;
_event.numStreamCalls = 0;
_event.timeStreamCallsMs = 0;
_event.maxStreamWinScn = DEFAULT_MAX_LONG_VALUE;
_event.minStreamWinScn = DEFAULT_MIN_LONG_VALUE;
_event.numErrInvalidParamsRegisterCalls = 0;
_event.numErrRegisterCalls = 0;
_event.numErrInvalidParamsStreamCalls = 0;
_event.numErrScnNotFoundStreamCalls = 0;
_event.numErrSourcesCalls = 0;
_event.numErrStreamCalls = 0;
_event.numErrInvalidParamsRegisterCalls = 0;
_event.numErrInvalidParamsSourcesCalls = 0;
_event.mastershipStatus = 0;
_peers.clear();
}
@Override
public JsonEncoder createJsonEncoder(OutputStream out) throws IOException
{
return new JsonEncoder(_event.getSchema(), out);
}
@Override
protected void cloneData(DbusHttpTotalStatsEvent event)
{
event.ownerId = _event.ownerId;
event.dimension = _event.dimension;
event.timestampLastResetMs = _event.timestampLastResetMs;
event.timeSinceLastResetMs = System.currentTimeMillis() - _event.timestampLastResetMs;
event.numPeers = _event.numPeers;
event.numRegisterCalls = _event.numRegisterCalls;
event.numSourcesCalls = _event.numSourcesCalls;
event.numStreamCalls = _event.numStreamCalls;
event.timeStreamCallsMs = _event.timeStreamCallsMs;
event.maxStreamWinScn = _event.maxStreamWinScn;
event.minStreamWinScn = _event.minStreamWinScn;
event.numErrInvalidParamsRegisterCalls = _event.numErrInvalidParamsRegisterCalls;
event.numErrRegisterCalls = _event.numErrRegisterCalls;
event.numErrInvalidParamsStreamCalls = _event.numErrInvalidParamsStreamCalls;
event.numErrScnNotFoundStreamCalls = _event.numErrScnNotFoundStreamCalls;
event.numErrSourcesCalls = _event.numErrSourcesCalls;
event.numErrStreamCalls = _event.numErrStreamCalls;
event.numErrInvalidParamsRegisterCalls = _event.numErrInvalidParamsRegisterCalls;
event.numErrInvalidParamsSourcesCalls = _event.numErrInvalidParamsSourcesCalls;
event.mastershipStatus = _event.mastershipStatus;
}
@Override
protected DbusHttpTotalStatsEvent newDataEvent()
{
return new DbusHttpTotalStatsEvent();
}
@Override
protected SpecificDatumWriter<DbusHttpTotalStatsEvent> getAvroWriter()
{
return new SpecificDatumWriter<DbusHttpTotalStatsEvent>(DbusHttpTotalStatsEvent.class);
}
@Override
public void mergeStats(DatabusMonitoringMBean<DbusHttpTotalStatsEvent> other)
{
super.mergeStats(other);
if (other instanceof DbusHttpTotalStats)
{
mergeClients((DbusHttpTotalStats)other);
}
}
@Override
protected void doMergeStats(Object eventData)
{
if (! (eventData instanceof DbusHttpTotalStatsEvent))
{
LOG.warn("Attempt to merge unknown event class" + eventData.getClass().getName());
return;
}
DbusHttpTotalStatsEvent e = (DbusHttpTotalStatsEvent)eventData;
/** Allow use negative relay IDs for aggregation across multiple relays */
if (_event.ownerId > 0 && e.ownerId != _event.ownerId)
{
LOG.warn("Attempt to data for a different relay " + e.ownerId);
return;
}
_event.numPeers += e.numPeers;
_event.numRegisterCalls += e.numRegisterCalls;
_event.numSourcesCalls += e.numSourcesCalls;
_event.numStreamCalls += e.numStreamCalls;
_event.timeStreamCallsMs += e.timeStreamCallsMs;
_event.maxStreamWinScn = maxValue(_event.maxStreamWinScn, e.maxStreamWinScn);
_event.minStreamWinScn = minValue(_event.minStreamWinScn, e.minStreamWinScn);
_event.numErrInvalidParamsRegisterCalls += e.numErrInvalidParamsRegisterCalls;
_event.numErrRegisterCalls += e.numErrRegisterCalls;
_event.numErrInvalidParamsStreamCalls += e.numErrInvalidParamsStreamCalls;
_event.numErrScnNotFoundStreamCalls += e.numErrScnNotFoundStreamCalls;
_event.numErrSourcesCalls += e.numErrSourcesCalls;
_event.numErrStreamCalls += e.numErrStreamCalls;
_event.numErrInvalidParamsRegisterCalls += e.numErrInvalidParamsRegisterCalls;
_event.numErrInvalidParamsSourcesCalls += e.numErrInvalidParamsSourcesCalls;
_event.mastershipStatus = Math.max(e.mastershipStatus,_event.mastershipStatus);
// numClients cannot be merged
}
/** A bit of a hack to merge state outside the event state */
private void mergeClients(DbusHttpTotalStats other)
{
Lock otherReadLock = other.acquireReadLock();
Lock writeLock = acquireWriteLock(otherReadLock);
try
{
_peers.addAll(other._peers);
_event.numPeers = _peers.size();
}
finally
{
releaseLock(writeLock);
releaseLock(otherReadLock);
}
}
@Override
public ObjectName generateObjectName() throws MalformedObjectNameException
{
Hashtable<String, String> mbeanProps = generateBaseMBeanProps();
mbeanProps.put("ownerId", Integer.toString(_event.ownerId));
mbeanProps.put("dimension", _dimension);
return new ObjectName(AbstractMonitoringMBean.JMX_DOMAIN, mbeanProps);
}
@Override
public String getDimension()
{
return _dimension;
}
public void registerInvalidStreamRequest(String peer)
{
Lock writeLock = acquireWriteLock();
try
{
registerPeer(peer);
_event.numErrStreamCalls++;
_event.numErrInvalidParamsStreamCalls++;
}
finally
{
releaseLock(writeLock);
}
}
public void registerScnNotFoundStreamResponse(String peer)
{
Lock writeLock = acquireWriteLock();
try
{
registerPeer(peer);
_event.numErrStreamCalls++;
_event.numErrScnNotFoundStreamCalls++;
}
finally
{
releaseLock(writeLock);
}
}
public void registerInvalidSourceRequest(String peer)
{
Lock writeLock = acquireWriteLock();
try
{
registerPeer(peer);
_event.numErrInvalidParamsSourcesCalls++;
_event.numErrSourcesCalls++;
}
finally
{
releaseLock(writeLock);
}
}
public void registerInvalidRegisterCall(String peer)
{
Lock writeLock = acquireWriteLock();
try
{
registerPeer(peer);
_event.numErrStreamCalls++;
_event.numErrInvalidParamsRegisterCalls++;
}
finally
{
releaseLock(writeLock);
}
}
@Override
public long getNumScnNotFoundStream()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.numErrScnNotFoundStreamCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getNumErrStream()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.numErrStreamCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getNumErrStreamReq()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.numErrInvalidParamsStreamCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getNumErrRegister()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.numErrRegisterCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getNumErrRegisterReq()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.numErrInvalidParamsRegisterCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getNumErrSources()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.numErrSourcesCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public long getNumErrSourcesReq()
{
Lock readLock = acquireReadLock();
long result = 0;
try
{
result = _event.numErrInvalidParamsSourcesCalls;
}
finally
{
releaseLock(readLock);
}
return result;
}
@Override
public int getMastershipStatus()
{
Lock readLock = acquireReadLock();
int result = 0;
try
{
result = _event.mastershipStatus;
}
finally
{
releaseLock(readLock);
}
return result;
}
public void registerMastershipStatus(int i)
{
Lock writeLock = acquireWriteLock();
try
{
_event.mastershipStatus = i;
}
finally
{
releaseLock(writeLock);
}
}
@Override
public double getHttpErrorRate()
{
Lock readLock = acquireReadLock();
try
{
long totalCalls = _event.numStreamCalls + _event.numRegisterCalls + _event.numSourcesCalls;
long totalErrors = _event.numErrStreamCalls + _event.numErrSourcesCalls +
_event.numErrRegisterCalls;
return 0 == totalCalls ? 0.0 : 1.0 * totalErrors / totalCalls;
}
finally
{
releaseLock(readLock);
}
}
}
| 5,958 |
754 | /*-
* <<
* UAVStack
* ==
* Copyright (C) 2016 - 2017 UAVStack
* ==
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* >>
*/
package com.creditease.uav.util;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Proxy;
import com.creditease.monitor.proxy.spi.JDKProxyInvokeHandler;
public class JDKProxyInvokeUtil {
public static boolean isJDKProxy(Object t) {
if (!Proxy.isProxyClass(t.getClass())) {
return false;
}
InvocationHandler ih = Proxy.getInvocationHandler(t);
if (ih == null) {
return false;
}
return JDKProxyInvokeHandler.class.isAssignableFrom(ih.getClass());
}
@SuppressWarnings("unchecked")
public static <T> T newProxyInstance(ClassLoader loader, Class<?>[] interfaces, JDKProxyInvokeHandler<T> ih) {
if (isJDKProxy(ih.getTarget())) {
return ih.getTarget();
}
return (T) Proxy.newProxyInstance(loader, interfaces, ih);
}
}
| 544 |
850 | <reponame>AlexChrisF/udacity<filename>tensorflow/core/util/env_var.h
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_UTIL_ENV_VAR_H_
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
// Return a boolean into "value" from the environmental variable "env_var_name".
// If it is unset, the default value is used.
// A string "0" or a case insensitive "false" is interpreted as false.
// A string "1" or a case insensitive "true" is interpreted as true.
// Otherwise, an error status is returned.
Status ReadBoolFromEnvVar(StringPiece env_var_name, bool default_val,
bool* value);
// Return an int64 into "value" from the environmental variable "env_var_name".
// If it is unset, the default value is used.
// If the string cannot be parsed into int64, an error status is returned.
Status ReadInt64FromEnvVar(StringPiece env_var_name, int64 default_val,
int64* value);
} // namespace tensorflow
#endif // TENSORFLOW_UTIL_ENV_VAR_H_
| 545 |
2,372 | //
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_GEOMUTILS_PX_TRIANGLE
#define PX_PHYSICS_GEOMUTILS_PX_TRIANGLE
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Triangle class.
*/
class PxTriangle
{
public:
/**
\brief Constructor
*/
PX_FORCE_INLINE PxTriangle() {}
/**
\brief Constructor
\param[in] p0 Point 0
\param[in] p1 Point 1
\param[in] p2 Point 2
*/
PX_FORCE_INLINE PxTriangle(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2)
{
verts[0] = p0;
verts[1] = p1;
verts[2] = p2;
}
/**
\brief Copy constructor
\param[in] triangle Tri to copy
*/
PX_FORCE_INLINE PxTriangle(const PxTriangle& triangle)
{
verts[0] = triangle.verts[0];
verts[1] = triangle.verts[1];
verts[2] = triangle.verts[2];
}
/**
\brief Destructor
*/
PX_FORCE_INLINE ~PxTriangle() {}
/**
\brief Assignment operator
*/
PX_FORCE_INLINE void operator=(const PxTriangle& triangle)
{
verts[0] = triangle.verts[0];
verts[1] = triangle.verts[1];
verts[2] = triangle.verts[2];
}
/**
\brief Compute the normal of the Triangle.
\param[out] _normal Triangle normal.
*/
PX_FORCE_INLINE void normal(PxVec3& _normal) const
{
_normal = (verts[1]-verts[0]).cross(verts[2]-verts[0]);
_normal.normalize();
}
/**
\brief Compute the unnormalized normal of the triangle.
\param[out] _normal Triangle normal (not normalized).
*/
PX_FORCE_INLINE void denormalizedNormal(PxVec3& _normal) const
{
_normal = (verts[1]-verts[0]).cross(verts[2]-verts[0]);
}
/**
\brief Compute the area of the triangle.
\return Area of the triangle.
*/
PX_FORCE_INLINE PxReal area() const
{
const PxVec3& p0 = verts[0];
const PxVec3& p1 = verts[1];
const PxVec3& p2 = verts[2];
return ((p0 - p1).cross(p0 - p2)).magnitude() * 0.5f;
}
/**
\return Computes a point on the triangle from u and v barycentric coordinates.
*/
PxVec3 pointFromUV(PxReal u, PxReal v) const { return (1.0f-u-v)*verts[0] + u*verts[1] + v*verts[2]; }
/**
\brief Array of Vertices.
*/
PxVec3 verts[3];
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
| 1,449 |
3,285 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import json
import time
import numpy as np
import six
from oneflow.compatible import single_client as flow
from oneflow.core.summary import event_pb2 as event_pb2
from oneflow.core.summary import plugin_data_pb2 as plugin_data_pb2
from oneflow.core.summary import projector_pb2 as projector_pb2
from oneflow.core.summary import summary_pb2 as summary_pb2
from oneflow.core.summary import tensor_pb2 as tensor_pb2
def text(text, tag=None):
"""Add a text list to Summary
Args:
text: A str list
tag: The tag of summary
Returns:
A protobuf message [Summary]
"""
if isinstance(text, (tuple, list)) and len(text) > 0:
if not isinstance(tag, str) or tag is None:
tag = "text"
text_size = len(text)
tensor_shape = tensor_pb2.TensorShapeProto()
dim = tensor_shape.dim.add()
dim.size = text_size
tensor = tensor_pb2.TensorProto(
dtype=tensor_pb2.DT_STRING, tensor_shape=tensor_shape
)
for idx in range(text_size):
tensor.string_val.append(text[idx].encode("utf-8"))
summary = summary_pb2.Summary()
value = summary.value.add(
tag=tag,
metadata=summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name="text")
),
tensor=tensor,
)
return summary
def _get_tensor(values, dtype=None, shape=None):
array = np.empty(shape, dtype=np.float)
tensor_shape = tensor_pb2.TensorShapeProto()
dim = tensor_shape.dim.add()
dim.size = 0
tensor_proto = tensor_pb2.TensorProto(
dtype=tensor_pb2.DT_FLOAT, tensor_shape=tensor_shape
)
proto_values = array.ravel()
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
return tensor_proto
def hparams(hparams):
"""Add hparams to Summary
Args:
hparams: A dict of Hparams
Raises:
TypeError: If the type of hparam not in (str, int, float, bool)
TypeError: If the type of metric not in (float, int)
Returns:
A protobuf message [Summary]
"""
(hparams, metrics) = _get_hparams_dict(hparams)
jparams = json.dumps(hparams, sort_keys=True, separators=(",", ":"))
group_name = hashlib.sha256(jparams.encode("utf-8")).hexdigest()
session_start_info = plugin_data_pb2.SessionStartInfo(
group_name=group_name, start_time_secs=time.time()
)
for key in sorted(hparams):
value = hparams[key]
if isinstance(value, str):
session_start_info.hparams[key].string_value = value
elif isinstance(value, (float, int)):
session_start_info.hparams[key].number_value = value
elif isinstance(value, bool):
session_start_info.hparams[key].bool_value = value
else:
raise TypeError("the type of value: %r is not supported!" % value)
for key in metrics:
value = metrics[key]
if isinstance(value, (float, int)):
session_start_info.metrics[key].number_value = value
else:
raise TypeError("the type of value: %r is not supported!" % value)
summary = summary_pb2.Summary()
summary_metadata = _get_metadata(
plugin_data_pb2.HParamsPluginData(session_start_info=session_start_info)
)
summary.value.add(
tag="_hparams_/session_start_info",
metadata=summary_metadata,
tensor=_get_tensor([], tensor_pb2.DT_FLOAT, (0,)),
)
return summary
def _get_metadata(hparams_plugin_data):
plugin_data = plugin_data_pb2.HParamsPluginData()
plugin_data.CopyFrom(hparams_plugin_data)
plugin_data.version = 0
return summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name="hparams", content=plugin_data.SerializeToString()
)
)
def _get_hparams_dict(hparams):
hparams_dict = {}
metrics_dict = {}
for (key, value) in dict.items(hparams):
if key in hparams_dict or key in metrics_dict:
raise ValueError("the key is already exist %r" % (key,))
if isinstance(key, HParam):
key = key.name
if isinstance(key, Metric):
metrics_dict[key.name] = _get_value(value)
continue
hparams_dict[key] = _get_value(value)
return (hparams_dict, metrics_dict)
def _get_value(value):
if isinstance(value, np.generic):
return value.item()
else:
return value
class HParam(object):
"""The class of Hparam
This class describes the name and the type of Hparam
"""
def __init__(self, name, dtype=None):
"""Create a Hparam object
Args:
name: Hparam name
dtype: Hparam type
Raises:
ValueError: If Hparam type not in (IntegerRange, RealRange, ValueSet)
"""
self.name_ = name
self.dtype_ = dtype
if not isinstance(self.dtype_, (IntegerRange, RealRange, ValueSet, type(None))):
raise ValueError(
"Hparam dtype must be: (IntegerRange, RealRange, ValueSet) : %r"
% (self.dtype_,)
)
@property
def name(self):
return self.name_
@property
def dtype(self):
return self.dtype_
class IntegerRange(object):
"""The class of IntegerRange
This class takes a integer range between min_value and max_value
"""
def __init__(self, min_value, max_value):
"""Create an 'IntegerRange' object
Args:
min_value: The min value of the range
max_value: The max value of the range
Raises:
TypeError: If 'min_value' or 'max_value' is not an int
ValueError: If 'min_value' > 'max_value'
"""
if not isinstance(max_value, int):
raise TypeError("max_value is not an integer value: %r" % (max_value,))
if not isinstance(min_value, int):
raise TypeError("min_value is not an integer value: %r" % (min_value,))
if min_value > max_value:
raise ValueError(
"max_value must bigger than min_value: %r > %r" % (min_value, max_value)
)
self.min_value_ = min_value
self.max_value_ = max_value
@property
def min_value(self):
return self.min_value_
@property
def max_value(self):
return self.max_value_
class RealRange(object):
"""The class of RealRange
This class takes a realnumber range between min_value and max_value
"""
def __init__(self, min_value, max_value):
"""Create a 'RealRange' object
Args:
min_value: The min value of the range
max_value: The max value of the range
Raises:
TypeError: If 'min_value' or 'max_value' is not an float
ValueError: If 'min_value' > 'max_value'
"""
if not isinstance(max_value, float):
raise TypeError("max_value is not an float value: %r" % (max_value,))
if not isinstance(min_value, float):
raise TypeError("min_value is not an float value: %r" % (min_value,))
if min_value > max_value:
raise ValueError(
"max_value must bigger than min_value: %r > %r" % (min_value, max_value)
)
self.min_value_ = min_value
self.max_value_ = max_value
@property
def min_value(self):
return self.min_value_
@property
def max_value(self):
return self.max_value_
class ValueSet(object):
"""The class of ValueSet
This class takes a list of value
"""
def __init__(self, values, dtype=None):
"""Create a ValueSet object
Args:
values: a list of values
dtype: the value type
Raises:
ValueError: If the value type not in (int, float, bool, str)
TypeError: If the value in the list is not same
"""
self.values_ = list(values)
if dtype is None:
if self.values_:
dtype = type(self.values_[0])
if dtype not in (int, float, bool, str):
raise ValueError(
"Value type must in (int, float, bool, str), %r is not supported!"
% (dtype,)
)
self.dtype_ = dtype
for value in self.values_:
if not isinstance(value, self.dtype_):
raise TypeError(
"The type of value is not supported! value: %r type: %s"
% (value, self.dtype_.__name__)
)
self.values_.sort()
@property
def dtype(self):
return self.dtype_
@property
def values(self):
return list(self.values_)
class Metric(object):
"""The class of Metric
This class takes a 'int' or 'float' value
"""
def __init__(self, name, dtype=None):
"""Create a Metric object
Args:
name: Metric name
dtype: Value type
Raises:
ValueError: If type is not 'int' or 'float'
"""
self.name_ = name
if dtype is None:
dtype = float
if dtype not in (int, float):
raise ValueError(
"Value type must in (int, float), %r is not supported!" % (dtype,)
)
self.dtype_ = dtype
@property
def name(self):
return self.name_
@property
def dtype(self):
return self.dtype_
| 4,503 |
2,460 |
package io.fabric8.kubernetes.api.model.storage.v1beta1;
import java.util.HashMap;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonAnyGetter;
import com.fasterxml.jackson.annotation.JsonAnySetter;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import io.fabric8.kubernetes.api.model.Container;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.IntOrString;
import io.fabric8.kubernetes.api.model.LocalObjectReference;
import io.fabric8.kubernetes.api.model.Namespaced;
import io.fabric8.kubernetes.api.model.ObjectReference;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.PodTemplateSpec;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.ResourceRequirements;
import io.fabric8.kubernetes.model.annotation.Group;
import io.fabric8.kubernetes.model.annotation.Version;
import io.sundr.builder.annotations.Buildable;
import io.sundr.builder.annotations.BuildableReference;
import io.sundr.transform.annotations.TemplateTransformation;
import io.sundr.transform.annotations.TemplateTransformations;
import lombok.EqualsAndHashCode;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.Accessors;
@JsonDeserialize(using = com.fasterxml.jackson.databind.JsonDeserializer.None.class)
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonPropertyOrder({
"apiVersion",
"kind",
"metadata",
"capacity",
"maximumVolumeSize",
"nodeTopology",
"storageClassName"
})
@ToString
@EqualsAndHashCode
@Setter
@Accessors(prefix = {
"_",
""
})
@Buildable(editableEnabled = false, validationEnabled = false, generateBuilderPackage = false, lazyCollectionInitEnabled = false, builderPackage = "io.fabric8.kubernetes.api.builder", refs = {
@BuildableReference(io.fabric8.kubernetes.api.model.ObjectMeta.class),
@BuildableReference(io.fabric8.kubernetes.api.model.LabelSelector.class),
@BuildableReference(Container.class),
@BuildableReference(PodTemplateSpec.class),
@BuildableReference(ResourceRequirements.class),
@BuildableReference(IntOrString.class),
@BuildableReference(ObjectReference.class),
@BuildableReference(LocalObjectReference.class),
@BuildableReference(PersistentVolumeClaim.class)
})
@Version("v1beta1")
@Group("storage.k8s.io")
@TemplateTransformations({
@TemplateTransformation(value = "/manifest.vm", outputPath = "storage.properties", gather = true)
})
public class CSIStorageCapacity implements HasMetadata, Namespaced
{
/**
*
* (Required)
*
*/
@JsonProperty("apiVersion")
private String apiVersion = "storage.k8s.io/v1beta1";
@JsonProperty("capacity")
private Quantity capacity;
/**
*
* (Required)
*
*/
@JsonProperty("kind")
private String kind = "CSIStorageCapacity";
@JsonProperty("maximumVolumeSize")
private Quantity maximumVolumeSize;
@JsonProperty("metadata")
private io.fabric8.kubernetes.api.model.ObjectMeta metadata;
@JsonProperty("nodeTopology")
private io.fabric8.kubernetes.api.model.LabelSelector nodeTopology;
@JsonProperty("storageClassName")
private String storageClassName;
@JsonIgnore
private Map<String, Object> additionalProperties = new HashMap<String, Object>();
/**
* No args constructor for use in serialization
*
*/
public CSIStorageCapacity() {
}
/**
*
* @param maximumVolumeSize
* @param metadata
* @param apiVersion
* @param nodeTopology
* @param kind
* @param storageClassName
* @param capacity
*/
public CSIStorageCapacity(String apiVersion, Quantity capacity, String kind, Quantity maximumVolumeSize, io.fabric8.kubernetes.api.model.ObjectMeta metadata, io.fabric8.kubernetes.api.model.LabelSelector nodeTopology, String storageClassName) {
super();
this.apiVersion = apiVersion;
this.capacity = capacity;
this.kind = kind;
this.maximumVolumeSize = maximumVolumeSize;
this.metadata = metadata;
this.nodeTopology = nodeTopology;
this.storageClassName = storageClassName;
}
/**
*
* (Required)
*
*/
@JsonProperty("apiVersion")
public String getApiVersion() {
return apiVersion;
}
/**
*
* (Required)
*
*/
@JsonProperty("apiVersion")
public void setApiVersion(String apiVersion) {
this.apiVersion = apiVersion;
}
@JsonProperty("capacity")
public Quantity getCapacity() {
return capacity;
}
@JsonProperty("capacity")
public void setCapacity(Quantity capacity) {
this.capacity = capacity;
}
/**
*
* (Required)
*
*/
@JsonProperty("kind")
public String getKind() {
return kind;
}
/**
*
* (Required)
*
*/
@JsonProperty("kind")
public void setKind(String kind) {
this.kind = kind;
}
@JsonProperty("maximumVolumeSize")
public Quantity getMaximumVolumeSize() {
return maximumVolumeSize;
}
@JsonProperty("maximumVolumeSize")
public void setMaximumVolumeSize(Quantity maximumVolumeSize) {
this.maximumVolumeSize = maximumVolumeSize;
}
@JsonProperty("metadata")
public io.fabric8.kubernetes.api.model.ObjectMeta getMetadata() {
return metadata;
}
@JsonProperty("metadata")
public void setMetadata(io.fabric8.kubernetes.api.model.ObjectMeta metadata) {
this.metadata = metadata;
}
@JsonProperty("nodeTopology")
public io.fabric8.kubernetes.api.model.LabelSelector getNodeTopology() {
return nodeTopology;
}
@JsonProperty("nodeTopology")
public void setNodeTopology(io.fabric8.kubernetes.api.model.LabelSelector nodeTopology) {
this.nodeTopology = nodeTopology;
}
@JsonProperty("storageClassName")
public String getStorageClassName() {
return storageClassName;
}
@JsonProperty("storageClassName")
public void setStorageClassName(String storageClassName) {
this.storageClassName = storageClassName;
}
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
@JsonAnySetter
public void setAdditionalProperty(String name, Object value) {
this.additionalProperties.put(name, value);
}
}
| 2,544 |
343 | from rest_framework import serializers as rest_framework_serializers
from rest_framework.fields import ( # NOQA
BooleanField, CharField, ChoiceField, DateField, DateTimeField,
DecimalField, DictField, DurationField, EmailField, Field, FileField,
FilePathField, FloatField, HiddenField, HStoreField, IPAddressField,
ImageField, IntegerField, JSONField, ListField, ModelField,
MultipleChoiceField, NullBooleanField, ReadOnlyField, RegexField,
SerializerMethodField, SlugField, TimeField, URLField, UUIDField
)
from rest_framework.serializers import (
HyperlinkedModelSerializer as RESTFrameworkHyperlinkedModelSerializer,
ModelSerializer as RESTFrameworkModelSerializer
)
from rest_framework.relations import ( # NOQA
HyperlinkedIdentityField, HyperlinkedRelatedField, ManyRelatedField,
PrimaryKeyRelatedField, RelatedField, SlugRelatedField,
StringRelatedField
)
from rest_framework.reverse import reverse
from .classes import BatchRequestCollection
from .serializer_mixins import CreateOnlyFieldSerializerMixin, DynamicFieldListSerializerMixin
class Serializer(
DynamicFieldListSerializerMixin, rest_framework_serializers.Serializer
):
"""Serializer subclass to add Mayan specific mixins."""
class BatchAPIRequestResponseSerializer(Serializer):
content = CharField(read_only=True)
data = JSONField(read_only=True)
headers = DictField(read_only=True)
name = CharField(read_only=True)
status_code = IntegerField(read_only=True)
requests = JSONField(
style={'base_template': 'textarea.html'},
write_only=True
)
def validate(self, data):
try:
BatchRequestCollection(request_list=data['requests'])
except Exception as exception:
raise rest_framework_serializers.ValidationError(
'Error validating requests; {}'.format(exception)
)
return data
class BlankSerializer(Serializer):
"""Serializer for the object action API view."""
class EndpointSerializer(Serializer):
label = CharField(read_only=True)
url = SerializerMethodField()
def get_url(self, instance):
if instance.viewname:
return reverse(
kwargs=instance.kwargs, viewname=instance.viewname,
request=self.context['request'],
format=self.context['format']
)
class HyperlinkedModelSerializer(
CreateOnlyFieldSerializerMixin, DynamicFieldListSerializerMixin,
RESTFrameworkHyperlinkedModelSerializer
):
"""HyperlinkedModelSerializer subclass to add Mayan specific mixins."""
class ModelSerializer(
CreateOnlyFieldSerializerMixin, DynamicFieldListSerializerMixin,
RESTFrameworkModelSerializer
):
"""ModelSerializer subclass to add Mayan specific mixins."""
class ProjectInformationSerializer(Serializer):
__title__ = CharField(read_only=True)
__version__ = CharField(read_only=True)
__build__ = CharField(read_only=True)
__build_string__ = CharField(read_only=True)
__django_version__ = CharField(read_only=True)
__author__ = CharField(read_only=True)
__author_email__ = CharField(read_only=True)
__description__ = CharField(read_only=True)
__license__ = CharField(read_only=True)
__copyright__ = CharField(read_only=True)
__website__ = CharField(read_only=True)
| 1,154 |
1,473 | package com.navercorp.pinpoint.plugin.druid.interceptor;
import com.navercorp.pinpoint.bootstrap.context.MethodDescriptor;
import com.navercorp.pinpoint.bootstrap.context.SpanEventRecorder;
import com.navercorp.pinpoint.bootstrap.context.TraceContext;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class DataSourceGetConnectionInterceptorTest {
@Mock
private TraceContext traceContext;
@Mock
private MethodDescriptor descriptor;
@Mock
private SpanEventRecorder recorder;
@Test
public void doInBeforeTrace() {
DataSourceGetConnectionInterceptor interceptor = new DataSourceGetConnectionInterceptor(traceContext, descriptor);
interceptor.doInBeforeTrace(null, null, null);
}
@Test
public void doInAfterTrace1() {
DataSourceGetConnectionInterceptor interceptor = new DataSourceGetConnectionInterceptor(traceContext, descriptor);
interceptor.doInAfterTrace(recorder, null, null, null, null);
}
@Test
public void doInAfterTrace2() {
DataSourceGetConnectionInterceptor interceptor = new DataSourceGetConnectionInterceptor(traceContext, descriptor);
interceptor.doInAfterTrace(recorder, null, new Object[]{"", ""}, null, null);
}
} | 466 |
1,738 | <filename>dev/Gems/ScriptCanvas/Code/Include/ScriptCanvas/Translation/GraphToCPlusPlus.cpp
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#include "precompiled.h"
#include "GraphToCPlusPlus.h"
#include <ScriptCanvas/Grammar/AbstractCodeModel.h>
namespace ScriptCanvas
{
namespace Translation
{
Configuration CreateCPlusPluseConfig()
{
Configuration configuration;
configuration.m_blockCommentClose = "*/";
configuration.m_blockCommentOpen = "/*";
configuration.m_namespaceClose = "}";
configuration.m_namespaceOpen = "{";
configuration.m_namespaceOpenPrefix = "namespace";
configuration.m_scopeClose = "}";
configuration.m_scopeOpen = "{";
configuration.m_singleLineComment = "//";
return configuration;
}
GraphToCPlusPlus::GraphToCPlusPlus(const Grammar::AbstractCodeModel& model)
: GraphToX(CreateCPlusPluseConfig(), model)
{
WriteHeaderDotH();
WriteHeaderDotCPP();
TranslateDependenciesDotH();
TranslateDependenciesDotCPP();
TranslateNamespaceOpen();
{
TranslateClassOpen();
{
TranslateVariables();
TranslateHandlers();
TranslateConstruction();
TranslateDestruction();
TranslateFunctions();
TranslateStartNode();
}
TranslateClassClose();
}
TranslateNamespaceClose();
}
AZ::Outcome<void, AZStd::string> GraphToCPlusPlus::Translate(const Grammar::AbstractCodeModel& model, AZStd::string& dotH, AZStd::string& dotCPP)
{
GraphToCPlusPlus translation(model);
auto outcome = translation.m_outcome;
if (outcome.IsSuccess())
{
dotH = AZStd::move(translation.m_dotH.MoveOutput());
dotCPP = AZStd::move(translation.m_dotCPP.MoveOutput());
}
return outcome;
}
void GraphToCPlusPlus::TranslateClassClose()
{
m_dotH.Outdent();
m_dotH.WriteIndent();
m_dotH.Write("};");
m_dotH.WriteSpace();
SingleLineComment(m_dotH);
m_dotH.WriteSpace();
m_dotH.WriteLine("class %s", GetGraphName());
}
void GraphToCPlusPlus::TranslateClassOpen()
{
m_dotH.WriteIndent();
m_dotH.WriteLine("class %s", GetGraphName());
m_dotH.WriteIndent();
m_dotH.WriteLine("{");
m_dotH.Indent();
}
void GraphToCPlusPlus::TranslateConstruction()
{
}
void GraphToCPlusPlus::TranslateDependencies()
{
TranslateDependenciesDotH();
TranslateDependenciesDotCPP();
}
void GraphToCPlusPlus::TranslateDependenciesDotH()
{
}
void GraphToCPlusPlus::TranslateDependenciesDotCPP()
{
}
void GraphToCPlusPlus::TranslateDestruction()
{
}
void GraphToCPlusPlus::TranslateFunctions()
{
const Grammar::Functions& functions = m_model.GetFunctions();
for (const auto& functionsEntry : functions)
{
const Grammar::Function& function = functionsEntry.second;
// TranslateFunction(function);
}
}
void GraphToCPlusPlus::TranslateHandlers()
{
}
void GraphToCPlusPlus::TranslateNamespaceOpen()
{
OpenNamespace(m_dotH, "ScriptCanvas");
OpenNamespace(m_dotH, GetAutoNativeNamespace());
OpenNamespace(m_dotCPP, "ScriptCanvas");
OpenNamespace(m_dotCPP, GetAutoNativeNamespace());
}
void GraphToCPlusPlus::TranslateNamespaceClose()
{
CloseNamespace(m_dotH, "ScriptCanvas");
CloseNamespace(m_dotH, GetAutoNativeNamespace());
CloseNamespace(m_dotCPP, "ScriptCanvas");
CloseNamespace(m_dotCPP, GetAutoNativeNamespace());
}
void GraphToCPlusPlus::TranslateStartNode()
{
// write a start function
if (const Node* startNode = m_model.GetStartNode())
{
{ // .h
m_dotH.WriteIndent();
m_dotH.WriteLine("public: static void OnGraphStart(const RuntimeContext& context);");
}
{ // .cpp
m_dotCPP.WriteIndent();
m_dotCPP.WriteLine("void %s::OnGraphStart(const RuntimeContext& context)", GetGraphName());
OpenScope(m_dotCPP);
{
m_dotCPP.WriteIndent();
m_dotCPP.WriteLine("AZ_TracePrintf(\"ScriptCanvas\", \"This call wasn't generated from parsing a print node!\");");
m_dotCPP.WriteLine("LogNotificationBus::Event(context.GetGraphId(), &LogNotifications::LogMessage, \"This call wasn't generated from parsing a print node!\");");
// get the related function call and call it
// with possible appropriate variables
}
CloseScope(m_dotCPP);
}
}
}
void GraphToCPlusPlus::TranslateVariables()
{
}
void GraphToCPlusPlus::WriterHeader()
{
WriteHeaderDotH();
WriteHeaderDotCPP();
}
void GraphToCPlusPlus::WriteHeaderDotCPP()
{
WriteCopyright(m_dotCPP);
m_dotCPP.WriteNewLine();
WriteDoNotModify(m_dotCPP);
m_dotCPP.WriteNewLine();
m_dotCPP.WriteLine("#include \"precompiled.h\"");
m_dotCPP.WriteLine("#include \"%s.h\"", GetGraphName());
m_dotCPP.WriteNewLine();
}
void GraphToCPlusPlus::WriteHeaderDotH()
{
WriteCopyright(m_dotH);
m_dotH.WriteNewLine();
m_dotH.WriteLine("#pragma once");
m_dotH.WriteNewLine();
WriteDoNotModify(m_dotH);
m_dotH.WriteNewLine();
m_dotH.WriteLine("#include <Execution/NativeHostDeclarations.h>");
m_dotH.WriteNewLine();
}
} // namespace Translation
} // namespace ScriptCanvas | 3,531 |
337 | package cn.ycbjie.ycaudioplayer.utils.encryption;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.Key;
import javax.crypto.Cipher;
import javax.crypto.CipherInputStream;
import javax.crypto.spec.SecretKeySpec;
/**
* Created by yc on 2018/2/7.
* 将音视频文件中的每个字节都经过加密算法进行了加密,显示安全性比较高,耗时也长。
*/
public class FileDESUtils {
/**
* 这个方法只是解释使用方法,并无意义
*/
private void utils(){
FileDESUtils fileDES = null;
try {
fileDES = new FileDESUtils("spring.sky");
//加密调用这个方法
fileDES.doEncryptFile("d:/a.mp4", "d:/b");
//解密调用这个方法
//fileDES.doDecryptFile("d:/b");
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 加密解密的key
*/
private Key mKey;
/**
* 解密的密码
*/
private Cipher mDecryptCipher;
/**
* 加密的密码
*/
private Cipher mEncryptCipher;
public FileDESUtils(String key) throws Exception {
initKey(key);
initCipher();
}
/**
* 创建一个加密解密的key
*
* @param keyRule
*/
public void initKey(String keyRule) {
byte[] keyByte = keyRule.getBytes();
// 创建一个空的八位数组,默认情况下为0
byte[] byteTemp = new byte[8];
// 将用户指定的规则转换成八位数组
for (int i = 0; i < byteTemp.length && i < keyByte.length; i++) {
byteTemp[i] = keyByte[i];
}
mKey = new SecretKeySpec(byteTemp, "DES");
}
/***
* 初始化加载密码
*
* @throws Exception
*/
private void initCipher() throws Exception {
mEncryptCipher = Cipher.getInstance("DES");
mEncryptCipher.init(Cipher.ENCRYPT_MODE, mKey);
mDecryptCipher = Cipher.getInstance("DES");
mDecryptCipher.init(Cipher.DECRYPT_MODE, mKey);
}
/**
* 加密文件
*
* @param in
* @param savePath 加密后保存的位置
*/
public void doEncryptFile(InputStream in, String savePath) {
if (in == null) {
System.out.println("inputstream is null");
return;
}
try {
CipherInputStream cin = new CipherInputStream(in, mEncryptCipher);
OutputStream os = new FileOutputStream(savePath);
byte[] bytes = new byte[1024];
int len = -1;
while ((len = cin.read(bytes)) > 0) {
os.write(bytes, 0, len);
os.flush();
}
os.close();
cin.close();
in.close();
System.out.println("加密成功");
} catch (Exception e) {
System.out.println("加密失败");
e.printStackTrace();
}
}
/**
* 加密文件
*
* @param filePath 需要加密的文件路径
* @param savePath 加密后保存的位置
* @throws FileNotFoundException
*/
public void doEncryptFile(String filePath, String savePath) throws FileNotFoundException {
doEncryptFile(new FileInputStream(filePath), savePath);
}
/**
* 解密文件
*
* @param in
*/
public void doDecryptFile(InputStream in, String path) {
if (in == null) {
System.out.println("inputstream is null");
return;
}
try {
CipherInputStream cin = new CipherInputStream(in, mDecryptCipher);
OutputStream outputStream = new FileOutputStream(path);
byte[] bytes = new byte[1024];
int length = -1;
while ((length = cin.read(bytes)) > 0) {
outputStream.write(bytes, 0, length);
outputStream.flush();
}
cin.close();
in.close();
System.out.println("解密成功");
} catch (Exception e) {
System.out.println("解密失败");
e.printStackTrace();
}
}
/**
* 解密文件
*
* @param filePath 文件路径
* @throws Exception
*/
public void doDecryptFile(String filePath, String outPath) throws Exception {
doDecryptFile(new FileInputStream(filePath), outPath);
}
}
| 2,347 |
601 | /**
* Query support for relational database repositories.
*/
@NonNullApi
package org.springframework.data.relational.repository.query;
import org.springframework.lang.NonNullApi;
| 52 |
2,041 | <filename>library/src/main/java/com/airbnb/android/airmapview/LeafletGoogleMapType.java
package com.airbnb.android.airmapview;
public class LeafletGoogleMapType extends LeafletMapType {
public LeafletGoogleMapType() {
super("Google");
}
}
| 83 |
474 | #include <Babylon/GraphicsPlatform.h>
#include <GraphicsImpl.h>
#include <Windows.h>
namespace Babylon
{
void GraphicsImpl::ConfigureBgfxPlatformData(const WindowConfiguration& config, bgfx::PlatformData& pd)
{
pd.ndt = nullptr;
pd.nwh = config.WindowPtr;
pd.context = nullptr;
pd.backBuffer = nullptr;
pd.backBufferDS = nullptr;
}
void GraphicsImpl::ConfigureBgfxPlatformData(const ContextConfiguration& config, bgfx::PlatformData& pd)
{
pd.ndt = nullptr;
pd.nwh = nullptr;
pd.context = config.Context;
pd.backBuffer = nullptr;
pd.backBufferDS = nullptr;
}
float GraphicsImpl::UpdateDevicePixelRatio()
{
UINT dpi{GetDpiForWindow(GetNativeWindow<WindowType>())};
// In windows, 100% DPI scaling is 96dpi.
// See https://docs.microsoft.com/en-us/windows/win32/learnwin32/dpi-and-device-independent-pixels
std::scoped_lock lock{m_state.Mutex};
m_state.Resolution.DevicePixelRatio = static_cast<float>(dpi) / 96.0f;
return m_state.Resolution.DevicePixelRatio;
}
}
| 492 |
526 | /* SPDX-License-Identifier: Apache-2.0 */
/* Copyright Contributors to the ODPi Egeria project. */
package org.odpi.openmetadata.viewservices.serverauthor.api.rest;
import org.odpi.openmetadata.adminservices.configuration.properties.OMAGServerConfig;
import java.util.Arrays;
/**
* A server Author response containing an Open Metadata and governance (OMAG) Server configuration
*/
public class ServerAuthorConfigurationResponse extends ServerAuthorViewOMVSAPIResponse {
/**
* Associated Omag server config
*/
OMAGServerConfig omagServerConfig = null;
/**
* Default constructor
*/
public ServerAuthorConfigurationResponse()
{
super();
}
/**
* Copy/clone constructor
*
* @param template object to copy
*/
public ServerAuthorConfigurationResponse(ServerAuthorConfigurationResponse template)
{
super(template);
if (template != null)
{
this.omagServerConfig = template.getOmagServerConfig();
}
}
/**
* Get the Open Metadata and governance (OMAG) Server configuration
* @return the omag server configuration
*/
public OMAGServerConfig getOmagServerConfig() {
return omagServerConfig;
}
/**
* set the Open Metadata and governance (OMAG) Server configuration
* @param omagServerConfig the omag server configuration
*/
public void setOmagServerConfig(OMAGServerConfig omagServerConfig) {
this.omagServerConfig = omagServerConfig;
}
/**
* JSON-style toString
*
* @return return string containing the property names and values
*/
@Override
public String toString()
{
return "ServerAuthorResponse{" +
"omagServerConfig=" + omagServerConfig +
", exceptionClassName='" + getExceptionClassName() + '\'' +
", exceptionCausedBy='" + getExceptionCausedBy() + '\'' +
", actionDescription='" + getActionDescription() + '\'' +
", relatedHTTPCode=" + getRelatedHTTPCode() +
", exceptionErrorMessage='" + getExceptionErrorMessage() + '\'' +
", exceptionErrorMessageId='" + getExceptionErrorMessageId() + '\'' +
", exceptionErrorMessageParameters=" + Arrays.toString(getExceptionErrorMessageParameters()) +
", exceptionSystemAction='" + getExceptionSystemAction() + '\'' +
", exceptionUserAction='" + getExceptionUserAction() + '\'' +
", exceptionProperties=" + getExceptionProperties() +
'}';
}
}
| 991 |
1,780 | <reponame>Nishu123328/phenomic<gh_stars>1000+
{
"name": "@phenomic/bs-platform",
"version": "1.0.0",
"publishConfig": {
"access": "public"
},
"repository": "https://github.com/phenomic/phenomic.git",
"homepage": "https://phenomic.io",
"license": "MIT",
"authors": [
"<NAME> (MoOx)"
],
"description": "bs-platform/lib files for phenomic",
"files": [
"lib/**/*.js"
],
"devDependencies": {
"bs-platform": "^5.0.4",
"cpy-cli": "^2.0.0",
"replace": "^1.0.0"
},
"scripts": {
"prepare": "cpy \"../../node_modules/bs-platform/lib/js/*.js\" lib/js && cpy \"../../node_modules/bs-platform/lib/es6/*.js\" lib/es6"
}
}
| 302 |
313 | <filename>YBL365/Class/Category/Category/ViewModel/YBLCategoryViewModel.h
//
// YBLCategoryViewModel.h
// YBL365
//
// Created by 乔同新 on 2017/2/23.
// Copyright © 2017年 乔同新. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "YBLCategoryTreeModel.h"
@interface YBLCategoryViewModel : NSObject
@property (nonatomic, assign) GoodCategoryType goodCategoryType;
@property (nonatomic, strong) NSMutableArray *topCategoryArray;
///key1:section key2:row
@property (nonatomic, strong) NSMutableDictionary *allSubTreeCategoryDict;
- (RACSignal *)singalForCategoryTreeWithId:(NSString *)ids Index:(NSInteger)index;
@end
| 227 |
429 | import os
import torch
import shutil
import torch.nn.functional as F
import time
#============================================
__author__ = "<NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
#============================================
'''
This file is mostly adapted from the PyTorch ImageNet example
'''
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
'''
Utility to save checkpoint or not
'''
def save_checkpoint(state, is_best, back_check, epoch, dir):
check_pt_file = dir + os.sep + 'checkpoint.pth.tar'
torch.save(state, check_pt_file)
if is_best:
#We only need best models weight and not check point states, etc.
torch.save(state['state_dict'], dir + os.sep + 'model_best.pth')
if back_check:
shutil.copyfile(check_pt_file, dir + os.sep + 'checkpoint_back' + str(epoch) + '.pth.tar')
'''
Cross entropy loss function
'''
def loss_fn(outputs, labels):
return F.cross_entropy(outputs, labels)
'''
Training loop
'''
def train(train_loader, model, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
# compute loss
loss = loss_fn(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
#losses.update(loss.data[0], input.size(0))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 100 == 0: #print after every 100 batches
print("Epoch: %d[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\ttop1:%.4f (%.4f)\t\ttop5:%.4f (%.4f)" %
(epoch, i, len(train_loader), batch_time.avg, losses.avg, top1.val, top1.avg, top5.val, top5.avg))
return top1.avg, losses.avg
'''
Validation loop
'''
def validate(val_loader, model):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
# with torch.no_grad():
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = loss_fn(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
# replace if using pytorch version < 0.4
#losses.update(loss.data[0], input.size(0))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 100 == 0: # print after every 100 batches
print("Batch:[%d/%d]\t\tBatchTime:%.3f\t\tLoss:%.3f\t\ttop1:%.3f (%.3f)\t\ttop5:%.3f(%.3f)" %
(i, len(val_loader), batch_time.avg, losses.avg, top1.val, top1.avg, top5.val, top5.avg))
print(' * Prec@1:%.3f Prec@5:%.3f' % (top1.avg, top5.avg))
return top1.avg, losses.avg | 2,096 |
328 | package com.ctg.test.api.util;
import org.springframework.remoting.httpinvoker.HttpInvokerProxyFactoryBean;
import org.springframework.remoting.httpinvoker.SimpleHttpInvokerRequestExecutor;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.HashMap;
import java.util.Map;
public class HttpInvokerProxyUtil {
private static HttpInvokerProxyUtil invokerProxyHelper=null;
private static Map<String,Object> proxyMap=new HashMap<>();
private HttpInvokerProxyUtil(){}
public static HttpInvokerProxyUtil getInstance(){
if(invokerProxyHelper==null){
invokerProxyHelper=new HttpInvokerProxyUtil();
}
return invokerProxyHelper;
}
public <T> T doRefer(final Class<T> serviceType, final String url){
if(proxyMap.get(url+"/"+serviceType.getSimpleName())==null){
final HttpInvokerProxyFactoryBean httpProxyFactoryBean = new HttpInvokerProxyFactoryBean();
httpProxyFactoryBean.setServiceUrl(url);
httpProxyFactoryBean.setServiceInterface(serviceType);
SimpleHttpInvokerRequestExecutor httpInvokerRequestExecutor = new SimpleHttpInvokerRequestExecutor() {
@Override
protected void prepareConnection(HttpURLConnection con,
int contentLength) throws IOException {
super.prepareConnection(con, contentLength);
con.setReadTimeout(30000);
con.setConnectTimeout(30000);
}
};
httpProxyFactoryBean.setHttpInvokerRequestExecutor(httpInvokerRequestExecutor);
httpProxyFactoryBean.afterPropertiesSet();
proxyMap.put(url+"/"+serviceType.getSimpleName(),(T) httpProxyFactoryBean.getObject());
}
return (T)proxyMap.get(url+"/"+serviceType.getSimpleName());
}}
| 854 |
368 | /*
Plugin-SDK (Grand Theft Auto San Andreas) source file
Authors: GTA Community. See more here
https://github.com/DK22Pac/plugin-sdk
Do not delete this comment block. Respect others' work!
*/
#include "CDecisionSimple.h"
// Converted from thiscall void CDecisionSimple::Set(int *taskTypes,uchar *chances,int count) 0x600730
void CDecisionSimple::Set(int* taskTypes, unsigned char* chances, int count) {
plugin::CallMethod<0x600730, CDecisionSimple *, int*, unsigned char*, int>(this, taskTypes, chances, count);
}
// Converted from thiscall void CDecisionSimple::MakeDecision(int taskType,short &outTaskType,int &outDecisionIndex) 0x6007A0
void CDecisionSimple::MakeDecision(int taskType, short& outTaskType, int& outDecisionIndex) {
plugin::CallMethod<0x6007A0, CDecisionSimple *, int, short&, int&>(this, taskType, outTaskType, outDecisionIndex);
}
// Converted from thiscall void CDecisionSimple::SetDefault(void) 0x604220
void CDecisionSimple::SetDefault() {
plugin::CallMethod<0x604220, CDecisionSimple *>(this);
} | 345 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.