max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
823 | <gh_stars>100-1000
package burp;
import java.util.List;
public class FatGet extends ParamScan {
FatGet(String name) {
super(name);
}
@Override
List<IScanIssue> doScan(IHttpRequestResponse baseRequestResponse, IScannerInsertionPoint insertionPoint) {
// don't scan POST
if (baseRequestResponse.getRequest()[0] == 'P') {
return null;
}
// set value to canary
String canary = Utilities.generateCanary();
String fullValue = insertionPoint.getBaseValue()+canary;
byte[] poison = insertionPoint.buildRequest(fullValue.getBytes());
// convert to POST
poison = Utilities.helpers.toggleRequestMethod(poison);
poison = Utilities.fixContentLength(Utilities.replaceFirst(poison, canary.getBytes(), (canary).getBytes()));
// convert method back to GET
poison = Utilities.setMethod(poison, "GET");
poison = Utilities.addOrReplaceHeader(poison, "X-HTTP-Method-Override", "POST");
poison = Utilities.addOrReplaceHeader(poison, "X-HTTP-Method", "POST");
poison = Utilities.addOrReplaceHeader(poison, "X-Method-Override", "POST");
poison = Utilities.addCacheBuster(poison, Utilities.generateCanary());
IHttpService service = baseRequestResponse.getHttpService();
Resp resp = request(service, poison);
byte[] response = resp.getReq().getResponse();
if (Utilities.containsBytes(response, canary.getBytes())) {
recordCandidateFound();
// report("Fat-GET body reflection", canary, resp);
for (int i=0; i<5; i++) {
request(service, poison);
}
//String toReplace = insertionPoint.getInsertionPointName()+"="+fullValue;
String toReplace = canary;
byte[] getPoison = Utilities.fixContentLength(Utilities.replaceFirst(poison, toReplace.getBytes(), "".getBytes()));
// byte[] getPoison = baseRequestResponse.getRequest();
// getPoison = Utilities.appendToQuery(getPoison, "x="+cacheBuster);
Resp poisonedResp = request(service, getPoison);
if (Utilities.containsBytes(poisonedResp.getReq().getResponse(), canary.getBytes())) {
report("Web Cache Poisoning via Fat GET", "The application lets users pass parameters in the body of GET requests, but does not include them in the cache key. This was confirmed by injecting the value "+canary+" using the "+insertionPoint.getInsertionPointName()+" parameter, then replaying the request without the injected value, and confirming it still appears in the response.<br><br>For further information on this technique, please refer to https://portswigger.net/research/web-cache-entanglement", resp, poisonedResp);
}
}
return null;
}
@Override
List<IScanIssue> doScan(byte[] baseReq, IHttpService service) {
return null;
}
}
| 1,109 |
524 | import json
import logging
from aiohttp import web
from ledfx.api import RestEndpoint
from ledfx.config import save_config
# from ledfx.api.websocket import WebsocketConnection
from ledfx.utils import generate_id
_LOGGER = logging.getLogger(__name__)
class IntegrationsEndpoint(RestEndpoint):
"""REST end-point for querying and managing integrations"""
ENDPOINT_PATH = "/api/integrations"
async def get(self, request=None) -> web.Response:
"""Get info of all integrations"""
response = {"status": "success", "integrations": {}}
for integration in self._ledfx.integrations.values():
response["integrations"][integration.id] = {
"id": integration.id,
"type": integration.type,
"active": integration.active,
"status": integration.status,
"data": integration.data,
"config": integration.config,
}
if request.body_exists:
data = await request.json()
info = data.get("info")
for integration in self._ledfx.integrations.values():
if info not in response["integrations"][integration.id].keys():
response = {
"status": "failed",
"reason": f"info attribute {info} not found",
}
return web.json_response(data=response, status=404)
response["integrations"][integration.id] = {
info: response["integrations"][integration.id][info]
}
return web.Response(text=json.dumps(response), status=200)
async def put(self, request) -> web.Response:
"""Toggle an integration on or off"""
data = await request.json()
integration_id = data.get("id")
if integration_id is None:
response = {
"status": "failed",
"reason": 'Required attribute "id" was not provided',
}
return web.json_response(data=response, status=500)
integration = self._ledfx.integrations.get(integration_id)
if integration is None:
response = {"not found": 404}
return web.Response(text=json.dumps(response), status=404)
# Toggle the integration
active = integration.active
if not active:
await integration.activate()
else:
await integration.deactivate()
# Update and save the configuration
for _integration in self._ledfx.config["integrations"]:
if _integration["id"] == integration.id:
_integration["active"] = not active
break
save_config(
config=self._ledfx.config,
config_dir=self._ledfx.config_dir,
)
response = {"status": "success"}
return web.json_response(data=response, status=200)
async def delete(self, request) -> web.Response:
"""Delete an integration, erasing all its configuration
NOTE: THIS DOES NOT TURN OFF THE INTEGRATION, IT DELETES IT!
USE PUT TO TOGGLE!"""
data = await request.json()
integration_id = data.get("id")
if integration_id is None:
response = {
"status": "failed",
"reason": 'Required attribute "id" was not provided',
}
return web.json_response(data=response, status=500)
integration = self._ledfx.integrations.get(integration_id)
if integration is None:
response = {"not found": 404}
return web.Response(text=json.dumps(response), status=404)
self._ledfx.integrations.destroy(integration_id)
self._ledfx.config["integrations"] = [
integration
for integration in self._ledfx.config["integrations"]
if integration["id"] != integration_id
]
# Save the config
save_config(
config=self._ledfx.config, config_dir=self._ledfx.config_dir
)
response = {"status": "success"}
return web.json_response(data=response, status=200)
async def post(self, request) -> web.Response:
"""Create a new integration, or update an existing one"""
data = await request.json()
integration_config = data.get("config")
if integration_config is None:
response = {
"status": "failed",
"reason": 'Required attribute "config" was not provided',
}
return web.json_response(data=response, status=500)
integration_type = data.get("type")
if integration_type is None:
response = {
"status": "failed",
"reason": 'Required attribute "type" was not provided',
}
return web.json_response(data=response, status=500)
integration_id = data.get("id")
new = not bool(integration_id)
if integration_id is None:
# Create new integration if no id is given
integration_id = generate_id(integration_config.get("name"))
_LOGGER.info(
("Creating {} integration with config {}").format(
integration_type, integration_config
)
)
else:
# Update existing integration if valid id is given
existing_integration = self._ledfx.integrations.get(integration_id)
if existing_integration is None:
response = {
"status": "failed",
"reason": f"Integration with id {integration_id} not found",
}
return web.json_response(data=response, status=500)
_LOGGER.info(
("Updating {} integration '{}' with config {}").format(
integration_type, integration_id, integration_config
)
)
self._ledfx.integrations.destroy(integration_id)
integration = self._ledfx.integrations.create(
id=integration_id,
type=integration_type,
active=True,
config=integration_config,
data=None,
ledfx=self._ledfx,
)
# Update and save the configuration
if new:
self._ledfx.config["integrations"].append(
{
"id": integration.id,
"type": integration.type,
"active": integration.active,
"data": integration.data,
"config": integration.config,
}
)
else:
for integration in self._ledfx.config["integrations"]:
if integration["id"] == integration_id:
integration["config"] = integration_config
break
# Update and save the configuration
save_config(
config=self._ledfx.config,
config_dir=self._ledfx.config_dir,
)
response = {"status": "success"}
return web.json_response(data=response, status=200)
| 3,346 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_OZONE_PLATFORM_WAYLAND_HOST_WAYLAND_BUFFER_BACKING_SHM_H_
#define UI_OZONE_PLATFORM_WAYLAND_HOST_WAYLAND_BUFFER_BACKING_SHM_H_
#include "base/files/scoped_file.h"
#include "ui/ozone/platform/wayland/host/wayland_buffer_backing.h"
namespace ui {
// Manager of wl_buffers backed by a shm buffer.
class WaylandBufferBackingShm : public WaylandBufferBacking {
public:
WaylandBufferBackingShm() = delete;
WaylandBufferBackingShm(const WaylandBufferBackingShm&) = delete;
WaylandBufferBackingShm& operator=(const WaylandBufferBackingShm&) = delete;
WaylandBufferBackingShm(const WaylandConnection* connection,
base::ScopedFD fd,
uint64_t length,
const gfx::Size& size,
uint32_t buffer_id);
~WaylandBufferBackingShm() override;
private:
// WaylandBufferBacking override:
void RequestBufferHandle(
base::OnceCallback<void(wl::Object<wl_buffer>)> callback) override;
base::ScopedFD fd_;
const uint64_t length_;
};
} // namespace ui
#endif // UI_OZONE_PLATFORM_WAYLAND_HOST_WAYLAND_BUFFER_BACKING_SHM_H_
| 534 |
7,272 | <filename>lectures/17-oop/code/src/com/kunal/staticExample/Main.java
package com.kunal.staticExample;
public class Main {
public static void main(String[] args) {
// Human kunal = new Human(22, "Kunal", 10000, false);
// Human rahul = new Human(34, "Rahul", 15000, true);
// Human arpit = new Human(34, "arpit", 15000, true);
//
// System.out.println(Human.population);
// System.out.println(Human.population);
// System.out.println(Human.population);
Main funn = new Main();
funn.fun2();
}
// this is not dependent on objects
static void fun() {
// greeting(); // you cant use this because it requires an instance
// but the function you are using it in does not depend on instances
// you cannot access non static stuff without referencing their instances in
// a static context
// hence, here I am referencing it
Main obj = new Main();
obj.greeting();
}
void fun2() {
greeting();
}
// we know that something which is not static, belongs to an object
void greeting() {
// fun();
System.out.println("Hello world");
}
}
| 457 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.db.metadata.model.jdbc;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.netbeans.modules.db.metadata.model.MetadataUtilities;
import org.netbeans.modules.db.metadata.model.api.MetadataException;
import org.netbeans.modules.db.metadata.model.api.Schema;
import org.netbeans.modules.db.metadata.model.spi.CatalogImplementation;
/**
*
* @author <NAME>
*/
public class JDBCCatalog extends CatalogImplementation {
private static final Logger LOGGER = Logger.getLogger(JDBCCatalog.class.getName());
protected final JDBCMetadata jdbcMetadata;
protected final String name;
protected final boolean _default;
protected final String defaultSchemaName;
protected Schema defaultSchema;
protected Schema syntheticSchema;
protected Map<String, Schema> schemas;
public JDBCCatalog(JDBCMetadata jdbcMetadata, String name, boolean _default, String defaultSchemaName) {
// defaultSchemaName only makes sense for the default catalog.
assert defaultSchemaName == null || _default;
this.jdbcMetadata = jdbcMetadata;
this.name = name;
this._default = _default;
this.defaultSchemaName = defaultSchemaName;
LOGGER.log(Level.FINE, "Create JDBCCatalog(jdbcMetadata={0}, name={1}, _default={2}, defaultSchemaName={3})",
new Object[] {jdbcMetadata, name, _default, defaultSchemaName});
}
@Override
public final String getName() {
return name;
}
@Override
public final boolean isDefault() {
return _default;
}
@Override
public final Schema getSyntheticSchema() {
initSchemas();
return syntheticSchema;
}
@Override
public final Collection<Schema> getSchemas() {
return initSchemas().values();
}
@Override
public final Schema getSchema(String name) {
return MetadataUtilities.find(name, initSchemas());
}
@Override
public final void refresh() {
schemas = null;
}
@Override
public String toString() {
return "JDBCCatalog[name='" + name + "',default=" + _default + "]"; // NOI18N
}
public final Schema getDefaultSchema() {
initSchemas();
return defaultSchema;
}
protected JDBCSchema createJDBCSchema(String name, boolean _default, boolean synthetic) {
return new JDBCSchema(this, name, _default, synthetic);
}
protected void createSchemas() {
Map<String, Schema> newSchemas = new LinkedHashMap<String, Schema>();
try {
if (jdbcMetadata.getDmd().supportsSchemasInTableDefinitions()) {
ResultSet rs = jdbcMetadata.getDmd().getSchemas();
if (rs != null) {
int columnCount = rs.getMetaData().getColumnCount();
if (columnCount < 2) {
LOGGER.fine("DatabaseMetaData.getSchemas() not JDBC 3.0-compliant");
}
boolean supportsCatalog = jdbcMetadata.getDmd().supportsCatalogsInTableDefinitions();
try {
while (rs.next()) {
String schemaName = MetadataUtilities.trimmed(rs.getString("TABLE_SCHEM")); // NOI18N
// Workaround for pre-JDBC 3.0 drivers, where DatabaseMetaData.getSchemas()
// only returns a TABLE_SCHEM column.
String catalogName = columnCount > 1 && supportsCatalog ? MetadataUtilities.trimmed(rs.getString("TABLE_CATALOG")) : name; // NOI18N
LOGGER.log(Level.FINE, "Read schema ''{0}'' in catalog ''{1}''", new Object[] { schemaName, catalogName });
LOGGER.log(Level.FINEST, "MetadataUtilities.equals(catalogName=''{0}'', name=''{1}'') returns {2}",
new Object[] {catalogName, name, MetadataUtilities.equals(catalogName, name)});
if (MetadataUtilities.equals(catalogName, name)) {
if (defaultSchemaName != null && MetadataUtilities.equals(schemaName, defaultSchemaName)) {
defaultSchema = createJDBCSchema(defaultSchemaName, true, false).getSchema();
newSchemas.put(defaultSchema.getName(), defaultSchema);
LOGGER.log(Level.FINE, "Created default schema {0}", defaultSchema);
} else {
Schema schema = createJDBCSchema(schemaName, false, false).getSchema();
newSchemas.put(schemaName, schema);
LOGGER.log(Level.FINE, "Created schema {0}", schema);
}
}
}
} finally {
if (rs != null) {
rs.close();
}
}
} else {
LOGGER.info(this + " returns null from jdbcMetadata.getDmd().getSchemas().");
}
}
if (newSchemas.isEmpty() && !jdbcMetadata.getDmd().supportsSchemasInTableDefinitions()) {
syntheticSchema = createJDBCSchema(null, _default, true).getSchema();
if (_default) {
defaultSchema = syntheticSchema;
}
LOGGER.log(Level.FINE, "Created synthetic schema {0}", syntheticSchema);
}
} catch (SQLException e) {
throw new MetadataException(e);
}
schemas = Collections.unmodifiableMap(newSchemas);
}
private Map<String, Schema> initSchemas() {
if (schemas != null) {
return schemas;
}
LOGGER.log(Level.FINE, "Initializing schemas in {0}", this);
createSchemas();
return schemas;
}
public final JDBCMetadata getJDBCMetadata() {
return jdbcMetadata;
}
}
| 3,138 |
777 | <reponame>google-ar/chromium
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.net.impl;
import android.support.annotation.IntDef;
import static org.chromium.net.UrlRequest.Status.CONNECTING;
import static org.chromium.net.UrlRequest.Status.DOWNLOADING_PROXY_SCRIPT;
import static org.chromium.net.UrlRequest.Status.ESTABLISHING_PROXY_TUNNEL;
import static org.chromium.net.UrlRequest.Status.IDLE;
import static org.chromium.net.UrlRequest.Status.INVALID;
import static org.chromium.net.UrlRequest.Status.READING_RESPONSE;
import static org.chromium.net.UrlRequest.Status.RESOLVING_HOST;
import static org.chromium.net.UrlRequest.Status.RESOLVING_HOST_IN_PROXY_SCRIPT;
import static org.chromium.net.UrlRequest.Status.RESOLVING_PROXY_FOR_URL;
import static org.chromium.net.UrlRequest.Status.SENDING_REQUEST;
import static org.chromium.net.UrlRequest.Status.SSL_HANDSHAKE;
import static org.chromium.net.UrlRequest.Status.WAITING_FOR_AVAILABLE_SOCKET;
import static org.chromium.net.UrlRequest.Status.WAITING_FOR_CACHE;
import static org.chromium.net.UrlRequest.Status.WAITING_FOR_DELEGATE;
import static org.chromium.net.UrlRequest.Status.WAITING_FOR_RESPONSE;
import static org.chromium.net.UrlRequest.Status.WAITING_FOR_STALLED_SOCKET_POOL;
import org.chromium.net.ExperimentalUrlRequest;
import org.chromium.net.UploadDataProvider;
import org.chromium.net.UrlRequest;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.util.concurrent.Executor;
/**
* Base class for classes that implement {@link UrlRequest} including experimental
* features. {@link CronetUrlRequest} and {@link JavaUrlRequest} extends this class.
*/
public abstract class UrlRequestBase extends ExperimentalUrlRequest {
/**
* Sets the HTTP method verb to use for this request. Must be done before
* request has started.
*
* <p>The default when this method is not called is "GET" if the request has
* no body or "POST" if it does.
*
* @param method "GET", "HEAD", "DELETE", "POST" or "PUT".
*/
protected abstract void setHttpMethod(String method);
/**
* Adds a request header. Must be done before request has started.
*
* @param header header name.
* @param value header value.
*/
protected abstract void addHeader(String header, String value);
/**
* Sets upload data provider. Must be done before request has started. May only be
* invoked once per request. Switches method to "POST" if not explicitly
* set. Starting the request will throw an exception if a Content-Type
* header is not set.
*
* @param uploadDataProvider responsible for providing the upload data.
* @param executor All {@code uploadDataProvider} methods will be invoked
* using this {@code Executor}. May optionally be the same
* {@code Executor} the request itself is using.
*/
protected abstract void setUploadDataProvider(
UploadDataProvider uploadDataProvider, Executor executor);
/**
* Possible URL Request statuses.
*/
@IntDef({
INVALID, IDLE, WAITING_FOR_STALLED_SOCKET_POOL, WAITING_FOR_AVAILABLE_SOCKET,
WAITING_FOR_DELEGATE, WAITING_FOR_CACHE, DOWNLOADING_PROXY_SCRIPT,
RESOLVING_PROXY_FOR_URL, RESOLVING_HOST_IN_PROXY_SCRIPT, ESTABLISHING_PROXY_TUNNEL,
RESOLVING_HOST, CONNECTING, SSL_HANDSHAKE, SENDING_REQUEST, WAITING_FOR_RESPONSE,
READING_RESPONSE,
})
@Retention(RetentionPolicy.SOURCE)
public @interface StatusValues {}
/**
* Convert a LoadState int to one of values listed above.
* @param loadState a LoadState to convert.
* @return static int Status.
*/
@StatusValues
public static int convertLoadState(int loadState) {
assert loadState >= LoadState.IDLE && loadState <= LoadState.READING_RESPONSE;
switch (loadState) {
case (LoadState.IDLE):
return IDLE;
case (LoadState.WAITING_FOR_STALLED_SOCKET_POOL):
return WAITING_FOR_STALLED_SOCKET_POOL;
case (LoadState.WAITING_FOR_AVAILABLE_SOCKET):
return WAITING_FOR_AVAILABLE_SOCKET;
case (LoadState.WAITING_FOR_DELEGATE):
return WAITING_FOR_DELEGATE;
case (LoadState.WAITING_FOR_CACHE):
return WAITING_FOR_CACHE;
case (LoadState.DOWNLOADING_PROXY_SCRIPT):
return DOWNLOADING_PROXY_SCRIPT;
case (LoadState.RESOLVING_PROXY_FOR_URL):
return RESOLVING_PROXY_FOR_URL;
case (LoadState.RESOLVING_HOST_IN_PROXY_SCRIPT):
return RESOLVING_HOST_IN_PROXY_SCRIPT;
case (LoadState.ESTABLISHING_PROXY_TUNNEL):
return ESTABLISHING_PROXY_TUNNEL;
case (LoadState.RESOLVING_HOST):
return RESOLVING_HOST;
case (LoadState.CONNECTING):
return CONNECTING;
case (LoadState.SSL_HANDSHAKE):
return SSL_HANDSHAKE;
case (LoadState.SENDING_REQUEST):
return SENDING_REQUEST;
case (LoadState.WAITING_FOR_RESPONSE):
return WAITING_FOR_RESPONSE;
case (LoadState.READING_RESPONSE):
return READING_RESPONSE;
default:
// A load state is retrieved but there is no corresponding
// request status. This most likely means that the mapping is
// incorrect.
throw new IllegalArgumentException("No request status found.");
}
}
}
| 2,392 |
350 | /******************************************************************************
* Copyright (c) 2014, 2018 Potential Ventures Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Potential Ventures Ltd not the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
#include "VhpiImpl.h"
#include <stdlib.h>
#include <algorithm>
#include <cmath>
extern "C" {
static VhpiCbHdl *sim_init_cb;
static VhpiCbHdl *sim_finish_cb;
static VhpiImpl *vhpi_table;
}
#define CASE_STR(_X) \
case _X: \
return #_X
const char *VhpiImpl::format_to_string(int format) {
switch (format) {
CASE_STR(vhpiBinStrVal);
CASE_STR(vhpiOctStrVal);
CASE_STR(vhpiDecStrVal);
CASE_STR(vhpiHexStrVal);
CASE_STR(vhpiEnumVal);
CASE_STR(vhpiIntVal);
CASE_STR(vhpiLogicVal);
CASE_STR(vhpiRealVal);
CASE_STR(vhpiStrVal);
CASE_STR(vhpiCharVal);
CASE_STR(vhpiTimeVal);
CASE_STR(vhpiPhysVal);
CASE_STR(vhpiObjTypeVal);
CASE_STR(vhpiPtrVal);
CASE_STR(vhpiEnumVecVal);
CASE_STR(vhpiRawDataVal);
default:
return "unknown";
}
}
const char *VhpiImpl::reason_to_string(int reason) {
switch (reason) {
CASE_STR(vhpiCbValueChange);
CASE_STR(vhpiCbStartOfNextCycle);
CASE_STR(vhpiCbStartOfPostponed);
CASE_STR(vhpiCbEndOfTimeStep);
CASE_STR(vhpiCbNextTimeStep);
CASE_STR(vhpiCbAfterDelay);
CASE_STR(vhpiCbStartOfSimulation);
CASE_STR(vhpiCbEndOfSimulation);
CASE_STR(vhpiCbEndOfProcesses);
CASE_STR(vhpiCbLastKnownDeltaCycle);
default:
return "unknown";
}
}
#undef CASE_STR
void VhpiImpl::get_sim_time(uint32_t *high, uint32_t *low) {
vhpiTimeT vhpi_time_s;
vhpi_get_time(&vhpi_time_s, NULL);
check_vhpi_error();
*high = vhpi_time_s.high;
*low = vhpi_time_s.low;
}
static int32_t log10int(uint64_t v) {
int32_t i = -1;
do {
v /= 10;
i += 1;
} while (v);
return i;
}
void VhpiImpl::get_sim_precision(int32_t *precision) {
/* The value returned is in number of femtoseconds */
vhpiPhysT prec = vhpi_get_phys(vhpiResolutionLimitP, NULL);
uint64_t femtoseconds = ((uint64_t)prec.high << 32) | prec.low;
*precision = log10int(femtoseconds) - 15;
}
const char *VhpiImpl::get_simulator_product() {
if (m_product.empty()) {
vhpiHandleT tool = vhpi_handle(vhpiTool, NULL);
if (tool) {
m_product = vhpi_get_str(vhpiNameP, tool);
vhpi_release_handle(tool);
} else {
m_product = "UNKNOWN";
}
}
return m_product.c_str();
}
const char *VhpiImpl::get_simulator_version() {
if (m_version.empty()) {
vhpiHandleT tool = vhpi_handle(vhpiTool, NULL);
if (tool) {
m_version = vhpi_get_str(vhpiToolVersionP, tool);
vhpi_release_handle(tool);
} else {
m_version = "UNKNOWN";
}
}
return m_version.c_str();
}
// Determine whether a VHPI object type is a constant or not
bool is_const(vhpiHandleT hdl) {
vhpiHandleT tmp = hdl;
/* Need to walk the prefix's back to the original handle to get a type
* that is not vhpiSelectedNameK or vhpiIndexedNameK
*/
do {
vhpiIntT vhpitype = vhpi_get(vhpiKindP, tmp);
if (vhpiConstDeclK == vhpitype || vhpiGenericDeclK == vhpitype)
return true;
} while ((tmp = vhpi_handle(vhpiPrefix, tmp)) != NULL);
return false;
}
bool is_enum_logic(vhpiHandleT hdl) {
const char *type = vhpi_get_str(vhpiNameP, hdl);
if (0 == strncmp(type, "BIT", sizeof("BIT") - 1) ||
0 == strncmp(type, "STD_ULOGIC", sizeof("STD_ULOGIC") - 1) ||
0 == strncmp(type, "STD_LOGIC", sizeof("STD_LOGIC") - 1)) {
return true;
} else {
vhpiIntT num_enum = vhpi_get(vhpiNumLiteralsP, hdl);
if (2 == num_enum) {
vhpiHandleT it = vhpi_iterator(vhpiEnumLiterals, hdl);
if (it != NULL) {
const char *enums_1[2] = {
"0", "1"}; // Aldec does not return the single quotes
const char *enums_2[2] = {"'0'", "'1'"};
vhpiHandleT enum_hdl;
int cnt = 0;
while ((enum_hdl = vhpi_scan(it)) != NULL) {
const char *etype = vhpi_get_str(vhpiStrValP, enum_hdl);
if (1 < cnt || (0 != strncmp(etype, enums_1[cnt],
strlen(enums_1[cnt])) &&
0 != strncmp(etype, enums_2[cnt],
strlen(enums_2[cnt])))) {
vhpi_release_handle(it);
return false;
}
++cnt;
}
return true;
}
} else if (9 == num_enum) {
vhpiHandleT it = vhpi_iterator(vhpiEnumLiterals, hdl);
if (it != NULL) {
const char *enums_1[9] = {
"U", "X", "0", "1", "Z",
"W", "L", "H", "-"}; // Aldec does not return the single
// quotes
const char *enums_2[9] = {"'U'", "'X'", "'0'", "'1'", "'Z'",
"'W'", "'L'", "'H'", "'-'"};
vhpiHandleT enum_hdl;
int cnt = 0;
while ((enum_hdl = vhpi_scan(it)) != NULL) {
const char *etype = vhpi_get_str(vhpiStrValP, enum_hdl);
if (8 < cnt || (0 != strncmp(etype, enums_1[cnt],
strlen(enums_1[cnt])) &&
0 != strncmp(etype, enums_2[cnt],
strlen(enums_2[cnt])))) {
vhpi_release_handle(it);
return false;
}
++cnt;
}
return true;
}
}
}
return false;
}
bool is_enum_char(vhpiHandleT hdl) {
const vhpiIntT NUM_ENUMS_IN_CHAR_TYPE = 256;
const char *type = vhpi_get_str(vhpiNameP, hdl);
if (0 == strncmp(type, "CHARACTER", sizeof("STD_ULOGIC") - 1)) {
return true;
} else {
return (vhpi_get(vhpiNumLiteralsP, hdl) == NUM_ENUMS_IN_CHAR_TYPE);
}
}
bool is_enum_boolean(vhpiHandleT hdl) {
const char *type = vhpi_get_str(vhpiNameP, hdl);
if (0 == strncmp(type, "BOOLEAN", sizeof("BOOLEAN") - 1)) {
return true;
} else {
vhpiIntT num_enum = vhpi_get(vhpiNumLiteralsP, hdl);
if (2 == num_enum) {
vhpiHandleT it = vhpi_iterator(vhpiEnumLiterals, hdl);
if (it != NULL) {
vhpiHandleT enum_hdl;
int cnt = 0;
while ((enum_hdl = vhpi_scan(it)) != NULL) {
const char *etype = vhpi_get_str(vhpiStrValP, enum_hdl);
if (((0 == cnt &&
0 != strncmp(etype, "FALSE", strlen("FALSE"))) &&
(0 == cnt &&
0 != strncmp(etype, "false", strlen("false")))) ||
((1 == cnt &&
0 != strncmp(etype, "TRUE", strlen("TRUE"))) &&
(1 == cnt &&
0 != strncmp(etype, "true", strlen("true")))) ||
2 <= cnt) {
vhpi_release_handle(it);
return false;
}
++cnt;
}
return true;
}
}
}
return false;
}
GpiObjHdl *VhpiImpl::create_gpi_obj_from_handle(vhpiHandleT new_hdl,
std::string &name,
std::string &fq_name) {
vhpiIntT type;
gpi_objtype_t gpi_type;
GpiObjHdl *new_obj = NULL;
if (vhpiVerilog == (type = vhpi_get(vhpiKindP, new_hdl))) {
LOG_DEBUG("VHPI: vhpiVerilog returned from vhpi_get(vhpiType, ...)")
return NULL;
}
/* We need to delve further here to determine how to later set
the values of an object */
vhpiHandleT base_hdl = vhpi_handle(vhpiBaseType, new_hdl);
if (base_hdl == NULL) {
vhpiHandleT st_hdl = vhpi_handle(vhpiSubtype, new_hdl);
if (st_hdl != NULL) {
base_hdl = vhpi_handle(vhpiBaseType, st_hdl);
vhpi_release_handle(st_hdl);
}
}
vhpiHandleT query_hdl = (base_hdl != NULL) ? base_hdl : new_hdl;
vhpiIntT base_type = vhpi_get(vhpiKindP, query_hdl);
vhpiIntT is_static = vhpi_get(vhpiStaticnessP, query_hdl);
/* Non locally static objects are not accessible for read/write
so we create this as a GpiObjType
*/
if (is_static == vhpiGloballyStatic) {
gpi_type = GPI_MODULE;
goto create;
}
switch (base_type) {
case vhpiArrayTypeDeclK: {
vhpiIntT num_dim = vhpi_get(vhpiNumDimensionsP, query_hdl);
if (num_dim > 1) {
LOG_DEBUG("VHPI: Detected a MULTI-DIMENSIONAL ARRAY type %s",
fq_name.c_str());
gpi_type = GPI_ARRAY;
} else {
vhpiHandleT elem_base_type_hdl = NULL;
vhpiIntT elem_base_type = 0;
/* vhpiElemSubtype is deprecated. Should be using vhpiElemType,
* but not supported in all simulators. */
vhpiHandleT elem_sub_type_hdl =
vhpi_handle(vhpiElemSubtype, query_hdl);
if (elem_sub_type_hdl != NULL) {
elem_base_type_hdl =
vhpi_handle(vhpiBaseType, elem_sub_type_hdl);
vhpi_release_handle(elem_sub_type_hdl);
}
if (elem_base_type_hdl != NULL) {
elem_base_type = vhpi_get(vhpiKindP, elem_base_type_hdl);
if (elem_base_type == vhpiEnumTypeDeclK) {
if (is_enum_logic(elem_base_type_hdl)) {
LOG_DEBUG("VHPI: Detected a LOGIC VECTOR type %s",
fq_name.c_str());
gpi_type = GPI_REGISTER;
} else if (is_enum_char(elem_base_type_hdl)) {
LOG_DEBUG("VHPI: Detected a STRING type %s",
fq_name.c_str());
gpi_type = GPI_STRING;
} else {
LOG_DEBUG(
"VHPI: Detected a NON-LOGIC ENUM VECTOR type "
"%s",
fq_name.c_str());
gpi_type = GPI_ARRAY;
}
} else {
LOG_DEBUG("VHPI: Detected a NON-ENUM VECTOR type %s",
fq_name.c_str());
gpi_type = GPI_ARRAY;
}
} else {
LOG_ERROR(
"VHPI: Unable to determine the Array Element Base Type "
"for %s. Defaulting to GPI_ARRAY.",
vhpi_get_str(vhpiFullCaseNameP, new_hdl));
gpi_type = GPI_ARRAY;
}
}
break;
}
case vhpiEnumTypeDeclK: {
if (is_enum_logic(query_hdl)) {
LOG_DEBUG("VHPI: Detected a LOGIC type %s", fq_name.c_str());
gpi_type = GPI_REGISTER;
} else if (is_enum_char(query_hdl)) {
LOG_DEBUG("VHPI: Detected a CHAR type %s", fq_name.c_str());
gpi_type = GPI_INTEGER;
} else if (is_enum_boolean(query_hdl)) {
LOG_DEBUG("VHPI: Detected a BOOLEAN/INTEGER type %s",
fq_name.c_str());
gpi_type = GPI_INTEGER;
} else {
LOG_DEBUG("VHPI: Detected an ENUM type %s", fq_name.c_str());
gpi_type = GPI_ENUM;
}
break;
}
case vhpiIntTypeDeclK: {
LOG_DEBUG("VHPI: Detected an INT type %s", fq_name.c_str());
gpi_type = GPI_INTEGER;
break;
}
case vhpiFloatTypeDeclK: {
LOG_DEBUG("VHPI: Detected a REAL type %s", fq_name.c_str());
gpi_type = GPI_REAL;
break;
}
case vhpiRecordTypeDeclK: {
LOG_DEBUG("VHPI: Detected a STRUCTURE type %s", fq_name.c_str());
gpi_type = GPI_STRUCTURE;
break;
}
case vhpiProcessStmtK:
case vhpiSimpleSigAssignStmtK:
case vhpiCondSigAssignStmtK:
case vhpiSelectSigAssignStmtK: {
gpi_type = GPI_MODULE;
break;
}
case vhpiRootInstK:
case vhpiIfGenerateK:
case vhpiForGenerateK:
case vhpiBlockStmtK:
case vhpiCompInstStmtK: {
std::string hdl_name = vhpi_get_str(vhpiCaseNameP, new_hdl);
if (base_type == vhpiRootInstK && hdl_name != name) {
vhpiHandleT arch = vhpi_handle(vhpiDesignUnit, new_hdl);
if (NULL != arch) {
vhpiHandleT prim = vhpi_handle(vhpiPrimaryUnit, arch);
if (NULL != prim) {
hdl_name = vhpi_get_str(vhpiCaseNameP, prim);
}
}
}
if (name != hdl_name) {
LOG_DEBUG("VHPI: Found pseudo-region %s", fq_name.c_str());
gpi_type = GPI_GENARRAY;
} else {
gpi_type = GPI_MODULE;
}
break;
}
default: {
LOG_ERROR("VHPI: Not able to map type (%s) %u to object",
vhpi_get_str(vhpiKindStrP, query_hdl), type);
new_obj = NULL;
goto out;
}
}
create:
LOG_DEBUG("VHPI: Creating %s of type %d (%s)",
vhpi_get_str(vhpiFullCaseNameP, new_hdl), gpi_type,
vhpi_get_str(vhpiKindStrP, query_hdl));
if (gpi_type != GPI_ARRAY && gpi_type != GPI_GENARRAY &&
gpi_type != GPI_MODULE && gpi_type != GPI_STRUCTURE) {
if (gpi_type == GPI_REGISTER)
new_obj = new VhpiLogicSignalObjHdl(this, new_hdl, gpi_type,
is_const(new_hdl));
else
new_obj = new VhpiSignalObjHdl(this, new_hdl, gpi_type,
is_const(new_hdl));
} else if (gpi_type == GPI_ARRAY) {
new_obj = new VhpiArrayObjHdl(this, new_hdl, gpi_type);
} else {
new_obj = new VhpiObjHdl(this, new_hdl, gpi_type);
}
if (new_obj->initialise(name, fq_name)) {
delete new_obj;
new_obj = NULL;
}
out:
if (base_hdl != NULL) vhpi_release_handle(base_hdl);
return new_obj;
}
GpiObjHdl *VhpiImpl::native_check_create(void *raw_hdl, GpiObjHdl *parent) {
LOG_DEBUG("VHPI: Trying to convert raw to VHPI handle");
vhpiHandleT new_hdl = (vhpiHandleT)raw_hdl;
std::string fq_name = parent->get_fullname();
const char *c_name = vhpi_get_str(vhpiCaseNameP, new_hdl);
if (!c_name) {
LOG_DEBUG("VHPI: Unable to query name of passed in handle");
return NULL;
}
std::string name = c_name;
if (fq_name == ":") {
fq_name += name;
} else {
fq_name += "." + name;
}
GpiObjHdl *new_obj = create_gpi_obj_from_handle(new_hdl, name, fq_name);
if (new_obj == NULL) {
vhpi_release_handle(new_hdl);
LOG_DEBUG("VHPI: Unable to fetch object %s", fq_name.c_str());
return NULL;
}
return new_obj;
}
GpiObjHdl *VhpiImpl::native_check_create(std::string &name, GpiObjHdl *parent) {
vhpiHandleT vhpi_hdl = parent->get_handle<vhpiHandleT>();
vhpiHandleT new_hdl;
std::string fq_name = parent->get_fullname();
if (fq_name == ":") {
fq_name += name;
} else {
fq_name += "." + name;
}
std::vector<char> writable(fq_name.begin(), fq_name.end());
writable.push_back('\0');
new_hdl = vhpi_handle_by_name(&writable[0], NULL);
if (new_hdl == NULL && parent->get_type() == GPI_STRUCTURE) {
/* vhpi_handle_by_name() doesn't always work for records, specificaly
* records in generics */
vhpiHandleT iter = vhpi_iterator(vhpiSelectedNames, vhpi_hdl);
if (iter != NULL) {
while ((new_hdl = vhpi_scan(iter)) != NULL) {
std::string selected_name =
vhpi_get_str(vhpiCaseNameP, new_hdl);
std::size_t found = selected_name.find_last_of(".");
if (found != std::string::npos) {
selected_name = selected_name.substr(found + 1);
}
if (selected_name == name) {
vhpi_release_handle(iter);
break;
}
}
}
} else if (new_hdl == NULL) {
/* If not found, check to see if the name of a generate loop */
vhpiHandleT iter = vhpi_iterator(vhpiInternalRegions, vhpi_hdl);
if (iter != NULL) {
vhpiHandleT rgn;
for (rgn = vhpi_scan(iter); rgn != NULL; rgn = vhpi_scan(iter)) {
if (vhpi_get(vhpiKindP, rgn) == vhpiForGenerateK) {
std::string rgn_name = vhpi_get_str(vhpiCaseNameP, rgn);
if (rgn_name.compare(0, name.length(), name) == 0) {
new_hdl = vhpi_hdl;
vhpi_release_handle(iter);
break;
}
}
}
}
if (new_hdl == NULL) {
LOG_DEBUG("VHPI: Unable to query vhpi_handle_by_name %s",
fq_name.c_str());
return NULL;
}
}
/* Generate Loops have inconsistent behavior across VHPI. A "name"
* without an index, i.e. dut.loop vs dut.loop(0), may or may not map to
* to the start index. If it doesn't then it won't find anything.
*
* If this unique case is hit, we need to create the Pseudo-region, with the
* handle being equivalent to the parent handle.
*/
if (vhpi_get(vhpiKindP, new_hdl) == vhpiForGenerateK) {
vhpi_release_handle(new_hdl);
new_hdl = vhpi_hdl;
}
GpiObjHdl *new_obj = create_gpi_obj_from_handle(new_hdl, name, fq_name);
if (new_obj == NULL) {
vhpi_release_handle(new_hdl);
LOG_DEBUG("VHPI: Unable to fetch object %s", fq_name.c_str());
return NULL;
}
return new_obj;
}
GpiObjHdl *VhpiImpl::native_check_create(int32_t index, GpiObjHdl *parent) {
vhpiHandleT vhpi_hdl = parent->get_handle<vhpiHandleT>();
std::string name = parent->get_name();
std::string fq_name = parent->get_fullname();
vhpiHandleT new_hdl = NULL;
char buff[14]; // needs to be large enough to hold -2^31 to 2^31-1 in
// string form ('(''-'10+'')'\0')
gpi_objtype_t obj_type = parent->get_type();
if (obj_type == GPI_GENARRAY) {
LOG_DEBUG(
"VHPI: Native check create for index %d of parent %s "
"(pseudo-region)",
index, parent->get_name_str());
snprintf(buff, sizeof(buff), "%d", index);
std::string idx_str = buff;
name += (GEN_IDX_SEP_LHS + idx_str + GEN_IDX_SEP_RHS);
fq_name += (GEN_IDX_SEP_LHS + idx_str + GEN_IDX_SEP_RHS);
std::vector<char> writable(fq_name.begin(), fq_name.end());
writable.push_back('\0');
new_hdl = vhpi_handle_by_name(&writable[0], NULL);
} else if (obj_type == GPI_REGISTER || obj_type == GPI_ARRAY ||
obj_type == GPI_STRING) {
LOG_DEBUG("VHPI: Native check create for index %d of parent %s (%s)",
index, parent->get_fullname_str(),
vhpi_get_str(vhpiKindStrP, vhpi_hdl));
snprintf(buff, sizeof(buff), "(%d)", index);
std::string idx_str = buff;
name += idx_str;
fq_name += idx_str;
vhpiHandleT base_hdl = vhpi_handle(vhpiBaseType, vhpi_hdl);
if (base_hdl == NULL) {
vhpiHandleT st_hdl = vhpi_handle(vhpiSubtype, vhpi_hdl);
if (st_hdl != NULL) {
base_hdl = vhpi_handle(vhpiBaseType, st_hdl);
vhpi_release_handle(st_hdl);
}
}
if (base_hdl == NULL) {
LOG_ERROR("VHPI: Unable to get the vhpiBaseType of %s",
parent->get_fullname_str());
return NULL;
}
vhpiIntT num_dim = vhpi_get(vhpiNumDimensionsP, base_hdl);
int idx = 0;
/* Need to translate the index into a zero-based flattened array index
*/
if (num_dim > 1) {
std::string hdl_name = vhpi_get_str(vhpiCaseNameP, vhpi_hdl);
std::vector<int> indices;
/* Need to determine how many indices have been received. A valid
* handle will only be found when all indices are received,
* otherwise need a pseudo-handle.
*
* When working with pseudo-handles:
* hdl_name: sig_name
* parent->get_name(): sig_name(x)(y)... where x,y,... are the
* indices to a multi-dimensional array. pseudo_idx: (x)(y)...
*/
if (hdl_name.length() < parent->get_name().length()) {
std::string pseudo_idx =
parent->get_name().substr(hdl_name.length());
while (pseudo_idx.length() > 0) {
std::size_t found = pseudo_idx.find_first_of(")");
if (found != std::string::npos) {
indices.push_back(
atoi(pseudo_idx.substr(1, found - 1).c_str()));
pseudo_idx = pseudo_idx.substr(found + 1);
} else {
break;
}
}
}
indices.push_back(index);
if (indices.size() == num_dim) {
#ifdef IUS
/* IUS/Xcelium does not appear to set the vhpiIsUnconstrainedP
* property. IUS Docs say it will return -1 if unconstrained,
* but with vhpiIntT being unsigned, the value returned is
* below.
*/
const vhpiIntT UNCONSTRAINED = 2147483647;
#endif
std::vector<vhpiHandleT> constraints;
/* All necessary indices are available, need to iterate over
* dimension constraints to determine the index into the
* zero-based flattened array.
*
* Check the constraints on the base type first. (always works
* for Aldec, but not unconstrained types in IUS/Xcelium) If the
* base type fails, then try the sub-type. (sub-type is listed
* as deprecated for Aldec)
*/
vhpiHandleT it, constraint;
it = vhpi_iterator(vhpiConstraints, base_hdl);
if (it != NULL) {
while ((constraint = vhpi_scan(it)) != NULL) {
#ifdef IUS
vhpiIntT l_rng = vhpi_get(vhpiLeftBoundP, constraint);
vhpiIntT r_rng = vhpi_get(vhpiRightBoundP, constraint);
if (l_rng == UNCONSTRAINED || r_rng == UNCONSTRAINED) {
#else
if (vhpi_get(vhpiIsUnconstrainedP, constraint)) {
#endif
/* Bail and try the sub-type handle */
vhpi_release_handle(it);
break;
}
constraints.push_back(constraint);
}
}
/* If all the dimensions were not obtained, try again with the
* sub-type handle */
if (constraints.size() != num_dim) {
vhpiHandleT sub_hdl = vhpi_handle(vhpiSubtype, vhpi_hdl);
;
constraints.clear();
if (sub_hdl != NULL) {
it = vhpi_iterator(vhpiConstraints, sub_hdl);
if (it != NULL) {
while ((constraint = vhpi_scan(it)) != NULL) {
/* IUS/Xcelium only sets the
* vhpiIsUnconstrainedP incorrectly on the base
* type */
if (vhpi_get(vhpiIsUnconstrainedP,
constraint)) {
vhpi_release_handle(it);
break;
}
constraints.push_back(constraint);
}
}
}
}
if (constraints.size() == num_dim) {
int scale = 1;
while (constraints.size() > 0) {
int raw_idx = indices.back();
constraint = constraints.back();
int left = static_cast<int>(
vhpi_get(vhpiLeftBoundP, constraint));
int right = static_cast<int>(
vhpi_get(vhpiRightBoundP, constraint));
int len = 0;
if (left > right) {
idx += (scale * (left - raw_idx));
len = left - right + 1;
} else {
idx += (scale * (raw_idx - left));
len = right - left + 1;
}
scale = scale * len;
indices.pop_back();
constraints.pop_back();
}
} else {
LOG_ERROR("VHPI: Unable to access all constraints for %s",
parent->get_fullname_str());
return NULL;
}
} else {
new_hdl = vhpi_hdl; // Set to the parent handle to create the
// pseudo-handle
}
} else {
int left = parent->get_range_left();
int right = parent->get_range_right();
if (left > right) {
idx = left - index;
} else {
idx = index - left;
}
}
if (new_hdl == NULL) {
new_hdl = vhpi_handle_by_index(vhpiIndexedNames, vhpi_hdl, idx);
if (!new_hdl) {
/* Support for the above seems poor, so if it did not work
try an iteration instead, spotty support for
multi-dimensional arrays */
vhpiHandleT iter = vhpi_iterator(vhpiIndexedNames, vhpi_hdl);
if (iter != NULL) {
int curr_index = 0;
while ((new_hdl = vhpi_scan(iter)) != NULL) {
if (idx == curr_index) {
vhpi_release_handle(iter);
break;
}
curr_index++;
}
}
}
if (new_hdl != NULL) {
LOG_DEBUG("VHPI: Index (%d->%d) found %s (%s)", index, idx,
vhpi_get_str(vhpiCaseNameP, new_hdl),
vhpi_get_str(vhpiKindStrP, new_hdl));
}
}
} else {
LOG_ERROR(
"VHPI: Parent of type %s must be of type GPI_GENARRAY, "
"GPI_REGISTER, GPI_ARRAY, or GPI_STRING to have an index.",
parent->get_type_str());
return NULL;
}
if (new_hdl == NULL) {
LOG_DEBUG("VHPI: Unable to query vhpi_handle_by_index %d", index);
return NULL;
}
GpiObjHdl *new_obj = create_gpi_obj_from_handle(new_hdl, name, fq_name);
if (new_obj == NULL) {
vhpi_release_handle(new_hdl);
LOG_DEBUG(
"VHPI: Could not fetch object below entity (%s) at index (%d)",
parent->get_name_str(), index);
return NULL;
}
return new_obj;
}
GpiObjHdl *VhpiImpl::get_root_handle(const char *name) {
vhpiHandleT root = NULL;
vhpiHandleT arch = NULL;
vhpiHandleT dut = NULL;
std::string root_name;
const char *found;
root = vhpi_handle(vhpiRootInst, NULL);
check_vhpi_error();
if (!root) {
LOG_ERROR("VHPI: Attempting to get the vhpiRootInst failed");
return NULL;
} else {
LOG_DEBUG("VHPI: We have found root='%s'",
vhpi_get_str(vhpiCaseNameP, root));
}
if (name) {
if (NULL == (dut = vhpi_handle_by_name(name, NULL))) {
LOG_DEBUG("VHPI: Unable to query by name");
check_vhpi_error();
}
}
if (!dut) {
if (NULL == (arch = vhpi_handle(vhpiDesignUnit, root))) {
LOG_DEBUG("VHPI: Unable to get vhpiDesignUnit via root");
check_vhpi_error();
return NULL;
}
if (NULL == (dut = vhpi_handle(vhpiPrimaryUnit, arch))) {
LOG_DEBUG("VHPI: Unable to get vhpiPrimaryUnit via arch");
check_vhpi_error();
return NULL;
}
/* If this matches the name then it is what we want, but we
use the handle two levels up as the DUT as we do not want an
object of type vhpiEntityDeclK as the DUT */
found = vhpi_get_str(vhpiCaseNameP, dut);
dut = root;
} else {
found = vhpi_get_str(vhpiCaseNameP, dut);
}
if (!dut) {
LOG_ERROR("VHPI: Attempting to get the DUT handle failed");
return NULL;
}
if (!found) {
LOG_ERROR("VHPI: Unable to query name for DUT handle");
return NULL;
}
if (name != NULL && strcmp(name, found)) {
LOG_WARN("VHPI: DUT '%s' doesn't match requested toplevel %s", found,
name);
return NULL;
}
root_name = found;
return create_gpi_obj_from_handle(dut, root_name, root_name);
}
GpiIterator *VhpiImpl::iterate_handle(GpiObjHdl *obj_hdl,
gpi_iterator_sel_t type) {
GpiIterator *new_iter = NULL;
switch (type) {
case GPI_OBJECTS:
new_iter = new VhpiIterator(this, obj_hdl);
break;
case GPI_DRIVERS:
LOG_WARN("VHPI: Drivers iterator not implemented yet");
break;
case GPI_LOADS:
LOG_WARN("VHPI: Loads iterator not implemented yet");
break;
default:
LOG_WARN("VHPI: Other iterator types not implemented yet");
break;
}
return new_iter;
}
GpiCbHdl *VhpiImpl::register_timed_callback(uint64_t time) {
VhpiTimedCbHdl *hdl = new VhpiTimedCbHdl(this, time);
if (hdl->arm_callback()) {
delete (hdl);
hdl = NULL;
}
return hdl;
}
GpiCbHdl *VhpiImpl::register_readwrite_callback() {
if (m_read_write.arm_callback()) return NULL;
return &m_read_write;
}
GpiCbHdl *VhpiImpl::register_readonly_callback() {
if (m_read_only.arm_callback()) return NULL;
return &m_read_only;
}
GpiCbHdl *VhpiImpl::register_nexttime_callback() {
if (m_next_phase.arm_callback()) return NULL;
return &m_next_phase;
}
int VhpiImpl::deregister_callback(GpiCbHdl *gpi_hdl) {
gpi_hdl->cleanup_callback();
return 0;
}
void VhpiImpl::sim_end() {
sim_finish_cb->set_call_state(GPI_DELETE);
vhpi_control(vhpiFinish, vhpiDiagTimeLoc);
check_vhpi_error();
}
extern "C" {
// Main entry point for callbacks from simulator
void handle_vhpi_callback(const vhpiCbDataT *cb_data) {
VhpiCbHdl *cb_hdl = (VhpiCbHdl *)cb_data->user_data;
if (!cb_hdl) {
LOG_CRITICAL("VHPI: Callback data corrupted: ABORTING");
gpi_embed_end();
return;
}
gpi_cb_state_e old_state = cb_hdl->get_call_state();
if (old_state == GPI_PRIMED) {
cb_hdl->set_call_state(GPI_CALL);
cb_hdl->run_callback();
gpi_cb_state_e new_state = cb_hdl->get_call_state();
/* We have re-primed in the handler */
if (new_state != GPI_PRIMED)
if (cb_hdl->cleanup_callback()) {
delete cb_hdl;
}
}
return;
};
static void register_initial_callback() {
sim_init_cb = new VhpiStartupCbHdl(vhpi_table);
sim_init_cb->arm_callback();
}
static void register_final_callback() {
sim_finish_cb = new VhpiShutdownCbHdl(vhpi_table);
sim_finish_cb->arm_callback();
}
static void register_embed() {
vhpi_table = new VhpiImpl("VHPI");
gpi_register_impl(vhpi_table);
}
// pre-defined VHPI registration table
COCOTBVHPI_EXPORT void (*vhpi_startup_routines[])() = {
register_embed, gpi_load_extra_libs, register_initial_callback,
register_final_callback, nullptr};
// For non-VHPI compliant applications that cannot find vhpi_startup_routines
COCOTBVHPI_EXPORT void vhpi_startup_routines_bootstrap() {
void (*routine)();
int i;
routine = vhpi_startup_routines[0];
for (i = 0, routine = vhpi_startup_routines[i]; routine;
routine = vhpi_startup_routines[++i]) {
routine();
}
}
}
GPI_ENTRY_POINT(cocotbvhpi, register_embed)
| 19,625 |
541 | <reponame>Jerryxiaoyu/maml_rl_v2
import numpy as np
import tensorflow as tf
import sandbox.rocky.tf.core.layers as L
### Start Helper functions ###
def make_input(shape, input_var=None, name="input", **kwargs):
if input_var is None:
if name is not None:
with tf.variable_scope(name):
input_var = tf.placeholder(tf.float32, shape=shape, name="input")
else:
input_var = tf.placeholder(tf.float32, shape=shape, name="input")
return input_var
def _create_param(spec, shape, name, trainable=True, regularizable=True):
if not hasattr(spec, '__call__'):
assert isinstance(spec, (tf.Tensor, tf.Variable))
return spec
assert hasattr(spec, '__call__')
if regularizable:
regularizer = None
else:
regularizer = lambda _: tf.constant(0.)
return tf.get_variable(
name=name, shape=shape, initializer=spec, trainable=trainable,
regularizer=regularizer, dtype=tf.float32
)
def add_param(spec, shape, layer_name, name, weight_norm=None, variable_reuse=None, **tags):
with tf.variable_scope(layer_name, reuse=variable_reuse):
tags['trainable'] = tags.get('trainable', True)
tags['regularizable'] = tags.get('regularizable', True)
param = _create_param(spec, shape, name, **tags)
if weight_norm:
raise NotImplementedError('Not supported.')
return param
def make_dense_layer(input_shape, num_units, name='fc', W=L.XavierUniformInitializer(), b=tf.zeros_initializer, weight_norm=False, **kwargs):
# make parameters
num_inputs = int(np.prod(input_shape[1:]))
W = add_param(W, (num_inputs, num_units), layer_name=name, name='W', weight_norm=weight_norm)
if b is not None:
b = add_param(b, (num_units,), layer_name=name, name='b', regularizable=False, weight_norm=weight_norm)
output_shape = (input_shape[0], num_units)
return W,b, output_shape
def forward_dense_layer(input, W, b, nonlinearity=tf.identity, batch_norm=False, scope='', reuse=True, is_training=False):
# compute output tensor
if input.get_shape().ndims > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = tf.reshape(input, tf.stack([tf.shape(input)[0], -1]))
activation = tf.matmul(input, W)
if b is not None:
activation = activation + tf.expand_dims(b, 0)
if batch_norm:
raise NotImplementedError('not supported')
else:
return nonlinearity(activation)
def make_param_layer(num_units, name='', param=tf.zeros_initializer(), trainable=True):
param = add_param(param, (num_units,), layer_name=name, name='param', trainable=trainable)
return param
def forward_param_layer(input, param):
ndim = input.get_shape().ndims
param = tf.convert_to_tensor(param)
num_units = int(param.get_shape()[0])
reshaped_param = tf.reshape(param, (1,)*(ndim-1)+(num_units,))
tile_arg = tf.concat([tf.shape(input)[:ndim-1], [1]], 0)
tiled = tf.tile(reshaped_param, tile_arg)
return tiled
### End Helper functions ###
| 1,256 |
36,552 | <filename>src/core/lib/gprpp/memory.h
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_GPRPP_MEMORY_H
#define GRPC_CORE_LIB_GPRPP_MEMORY_H
#include <grpc/support/port_platform.h>
#include <limits>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
namespace grpc_core {
class DefaultDeleteChar {
public:
void operator()(char* p) {
if (p == nullptr) return;
gpr_free(p);
}
};
// UniquePtr<T> is only allowed for char and UniquePtr<char> is deprecated
// in favor of std::string. UniquePtr<char> is equivalent std::unique_ptr
// except that it uses gpr_free for deleter.
template <typename T>
using UniquePtr = std::unique_ptr<T, DefaultDeleteChar>;
template <class T>
T* Zalloc() {
static_assert(std::is_trivial<T>::value, "Type should be trivial");
return static_cast<T*>(gpr_zalloc(sizeof(T)));
}
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_MEMORY_H */
| 533 |
9,734 | <reponame>dbbyn/arrow<gh_stars>1000+
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "arrow/flight/sql/types.h"
namespace arrow {
namespace flight {
namespace sql {
namespace internal {
/// \brief Auxiliary class used to populate GetSqlInfo's DenseUnionArray with different
/// data types.
class SqlInfoResultAppender {
public:
/// \brief Append a string to the DenseUnionBuilder.
/// \param[in] value Value to be appended.
Status operator()(const std::string& value);
/// \brief Append a bool to the DenseUnionBuilder.
/// \param[in] value Value to be appended.
Status operator()(bool value);
/// \brief Append a int64_t to the DenseUnionBuilder.
/// \param[in] value Value to be appended.
Status operator()(int64_t value);
/// \brief Append a int32_t to the DenseUnionBuilder.
/// \param[in] value Value to be appended.
Status operator()(int32_t value);
/// \brief Append a string list to the DenseUnionBuilder.
/// \param[in] value Value to be appended.
Status operator()(const std::vector<std::string>& value);
/// \brief Append a int32 to int32 list map to the DenseUnionBuilder.
/// \param[in] value Value to be appended.
Status operator()(const std::unordered_map<int32_t, std::vector<int32_t>>& value);
/// \brief Create a Variant visitor that appends data to given
/// DenseUnionBuilder. \param[in] value_builder DenseUnionBuilder to append data to.
explicit SqlInfoResultAppender(DenseUnionBuilder* value_builder);
SqlInfoResultAppender(const SqlInfoResultAppender&) = delete;
SqlInfoResultAppender(SqlInfoResultAppender&&) = delete;
SqlInfoResultAppender& operator=(const SqlInfoResultAppender&) = delete;
private:
DenseUnionBuilder* value_builder_;
// Builders for each child on dense union
StringBuilder* string_value_builder_;
BooleanBuilder* bool_value_builder_;
Int64Builder* bigint_value_builder_;
Int32Builder* int32_bitmask_builder_;
ListBuilder* string_list_builder_;
MapBuilder* int32_to_int32_list_builder_;
enum : int8_t {
kStringValueIndex = 0,
kBoolValueIndex = 1,
kBigIntValueIndex = 2,
kInt32BitMaskIndex = 3,
kStringListIndex = 4,
kInt32ToInt32ListIndex = 5
};
};
} // namespace internal
} // namespace sql
} // namespace flight
} // namespace arrow
| 938 |
3,428 | <reponame>ghalimi/stdlib
{"id":"01976","group":"easy-ham-1","checksum":{"type":"MD5","value":"36f1e6364384d6b12b89200071b5c926"},"text":"From <EMAIL> Thu Sep 26 16:42:45 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: [email protected]\nReceived: from localhost (jalapeno [127.0.0.1])\n\tby jmason.org (Postfix) with ESMTP id C0A1B16F1E\n\tfor <jm@localhost>; Thu, 26 Sep 2002 16:41:43 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Thu, 26 Sep 2002 16:41:43 +0100 (IST)\nReceived: from dogma.slashnull.org (localhost [127.0.0.1]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g8QFWJg25145 for\n <<EMAIL>>; Thu, 26 Sep 2002 16:32:25 +0100\nMessage-Id: <<EMAIL>>\nTo: [email protected]\nFrom: newscientist <<EMAIL>>\nSubject: Earth's magnetic field 'boosts gravity'\nDate: Thu, 26 Sep 2002 15:32:19 -0000\nContent-Type: text/plain; encoding=utf-8\n\nURL: http://www.newsisfree.com/click/-0,8132228,1440/\nDate: Not supplied\n\nThe controversial claim could be evidence of hidden extra dimensions and help a \n'theory of everything' fall into place\n\n\n"} | 483 |
652 | <reponame>brayest/testcode<filename>server/api/push_notifications.py
import json
from pathlib import Path
import firebase_admin
from firebase_admin import credentials, messaging
from firebase_admin.messaging import ApiCallError
from loguru import logger
from server.error_handling import NotificationError
def init_app(app):
if not len(firebase_admin._apps):
logger.debug("initializing firebase app")
cred = credentials.Certificate(json.loads(app.config["FIREBASE_JSON"]))
firebase_admin.initialize_app(cred)
class FCM(object):
@staticmethod
def notify(token, title, body, payload=None):
message = messaging.Message(
notification=messaging.Notification(title=title, body=body),
token=token,
data=payload,
)
try:
messaging.send(message)
except (ValueError, ApiCallError) as e:
raise NotificationError(str(e))
| 362 |
2,151 | <gh_stars>1000+
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package android.view.textservice;
import android.os.Parcel;
import android.os.Parcelable;
import android.text.ParcelableSpan;
import android.text.SpannableStringBuilder;
import android.text.TextUtils;
import android.text.style.SpellCheckSpan;
/**
* This class contains a metadata of the input of TextService
*/
public final class TextInfo implements Parcelable {
private final CharSequence mCharSequence;
private final int mCookie;
private final int mSequenceNumber;
private static final int DEFAULT_COOKIE = 0;
private static final int DEFAULT_SEQUENCE_NUMBER = 0;
/**
* Constructor.
* @param text the text which will be input to TextService
*/
public TextInfo(String text) {
this(text, 0, getStringLengthOrZero(text), DEFAULT_COOKIE, DEFAULT_SEQUENCE_NUMBER);
}
/**
* Constructor.
* @param text the text which will be input to TextService
* @param cookie the cookie for this TextInfo
* @param sequenceNumber the sequence number for this TextInfo
*/
public TextInfo(String text, int cookie, int sequenceNumber) {
this(text, 0, getStringLengthOrZero(text), cookie, sequenceNumber);
}
private static int getStringLengthOrZero(final String text) {
return TextUtils.isEmpty(text) ? 0 : text.length();
}
/**
* Constructor.
* @param charSequence the text which will be input to TextService. Attached spans that
* implement {@link ParcelableSpan} will also be marshaled alongside with the text.
* @param start the beginning of the range of text (inclusive).
* @param end the end of the range of text (exclusive).
* @param cookie the cookie for this TextInfo
* @param sequenceNumber the sequence number for this TextInfo
*/
public TextInfo(CharSequence charSequence, int start, int end, int cookie, int sequenceNumber) {
if (TextUtils.isEmpty(charSequence)) {
throw new IllegalArgumentException("charSequence is empty");
}
// Create a snapshot of the text including spans in case they are updated outside later.
final SpannableStringBuilder spannableString =
new SpannableStringBuilder(charSequence, start, end);
// SpellCheckSpan is for internal use. We do not want to marshal this for TextService.
final SpellCheckSpan[] spans = spannableString.getSpans(0, spannableString.length(),
SpellCheckSpan.class);
for (int i = 0; i < spans.length; ++i) {
spannableString.removeSpan(spans[i]);
}
mCharSequence = spannableString;
mCookie = cookie;
mSequenceNumber = sequenceNumber;
}
public TextInfo(Parcel source) {
mCharSequence = TextUtils.CHAR_SEQUENCE_CREATOR.createFromParcel(source);
mCookie = source.readInt();
mSequenceNumber = source.readInt();
}
/**
* Used to package this object into a {@link Parcel}.
*
* @param dest The {@link Parcel} to be written.
* @param flags The flags used for parceling.
*/
@Override
public void writeToParcel(Parcel dest, int flags) {
TextUtils.writeToParcel(mCharSequence, dest, flags);
dest.writeInt(mCookie);
dest.writeInt(mSequenceNumber);
}
/**
* @return the text which is an input of a text service
*/
public String getText() {
if (mCharSequence == null) {
return null;
}
return mCharSequence.toString();
}
/**
* @return the charSequence which is an input of a text service. This may have some parcelable
* spans.
*/
public CharSequence getCharSequence() {
return mCharSequence;
}
/**
* @return the cookie of TextInfo
*/
public int getCookie() {
return mCookie;
}
/**
* @return the sequence of TextInfo
*/
public int getSequence() {
return mSequenceNumber;
}
/**
* Used to make this class parcelable.
*/
public static final Parcelable.Creator<TextInfo> CREATOR
= new Parcelable.Creator<TextInfo>() {
@Override
public TextInfo createFromParcel(Parcel source) {
return new TextInfo(source);
}
@Override
public TextInfo[] newArray(int size) {
return new TextInfo[size];
}
};
/**
* Used to make this class parcelable.
*/
@Override
public int describeContents() {
return 0;
}
}
| 1,896 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _WINCLIPBOARD_HXX_
#define _WINCLIPBOARD_HXX_
//------------------------------------------------------------------------
// includes
//------------------------------------------------------------------------
#include <rtl/ustring.hxx>
#include <sal/types.h>
#include <cppuhelper/compbase4.hxx>
#include <com/sun/star/datatransfer/XTransferable.hpp>
#include <com/sun/star/datatransfer/clipboard/XClipboardEx.hpp>
#include <com/sun/star/datatransfer/clipboard/XClipboardOwner.hpp>
#include <com/sun/star/datatransfer/clipboard/XClipboardListener.hpp>
#include <com/sun/star/datatransfer/clipboard/XClipboardNotifier.hpp>
#include <com/sun/star/datatransfer/clipboard/XFlushableClipboard.hpp>
#include <com/sun/star/lang/XServiceInfo.hpp>
#include <osl/conditn.hxx>
#include <memory>
// forward
class CWinClipbImpl;
//------------------------------------------------------------------------
// implements the XClipboard[Ex] ... interfaces
// for the clipboard viewer mechanism we need a static callback function
// and a static member to reasocciate from this static function to the
// class instance
// watch out: we are using only one static member variable and not a list
// because we assume to be instanciated only once
// this will be asured by an OneInstanceFactory of the service and not
// by this class!
//------------------------------------------------------------------------
// helper class, so that the mutex is constructed
// before the constructor of WeakComponentImplHelper
// will be called and initialized with this mutex
class CWinClipboardDummy
{
protected:
osl::Mutex m_aMutex;
osl::Mutex m_aCbListenerMutex;
};
class CWinClipboard :
public CWinClipboardDummy,
public cppu::WeakComponentImplHelper4<
::com::sun::star::datatransfer::clipboard::XClipboardEx, \
::com::sun::star::datatransfer::clipboard::XFlushableClipboard,
::com::sun::star::datatransfer::clipboard::XClipboardNotifier,
::com::sun::star::lang::XServiceInfo >
{
public:
CWinClipboard( const ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory >& rServiceManager,
const ::rtl::OUString& aClipboardName );
//------------------------------------------------
// XClipboard
//------------------------------------------------
virtual ::com::sun::star::uno::Reference< ::com::sun::star::datatransfer::XTransferable > SAL_CALL getContents( )
throw( ::com::sun::star::uno::RuntimeException );
virtual void SAL_CALL setContents(
const ::com::sun::star::uno::Reference< ::com::sun::star::datatransfer::XTransferable >& xTransferable,
const ::com::sun::star::uno::Reference< ::com::sun::star::datatransfer::clipboard::XClipboardOwner >& xClipboardOwner )
throw( ::com::sun::star::uno::RuntimeException );
virtual ::rtl::OUString SAL_CALL getName( )
throw( ::com::sun::star::uno::RuntimeException );
//------------------------------------------------
// XFlushableClipboard
//------------------------------------------------
virtual void SAL_CALL flushClipboard( ) throw( com::sun::star::uno::RuntimeException );
//------------------------------------------------
// XClipboardEx
//------------------------------------------------
virtual sal_Int8 SAL_CALL getRenderingCapabilities( ) throw( ::com::sun::star::uno::RuntimeException );
//------------------------------------------------
// XClipboardNotifier
//------------------------------------------------
virtual void SAL_CALL addClipboardListener(
const ::com::sun::star::uno::Reference< ::com::sun::star::datatransfer::clipboard::XClipboardListener >& listener )
throw( ::com::sun::star::uno::RuntimeException );
virtual void SAL_CALL removeClipboardListener(
const ::com::sun::star::uno::Reference< ::com::sun::star::datatransfer::clipboard::XClipboardListener >& listener )
throw( ::com::sun::star::uno::RuntimeException );
//------------------------------------------------
// overwrite base class method, which is called
// by base class dispose function
//------------------------------------------------
virtual void SAL_CALL disposing();
//------------------------------------------------
// XServiceInfo
//------------------------------------------------
virtual ::rtl::OUString SAL_CALL getImplementationName( )
throw(::com::sun::star::uno::RuntimeException);
virtual sal_Bool SAL_CALL supportsService( const ::rtl::OUString& ServiceName )
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getSupportedServiceNames( )
throw(::com::sun::star::uno::RuntimeException);
private:
void SAL_CALL notifyAllClipboardListener( );
private:
::std::auto_ptr< CWinClipbImpl > m_pImpl;
::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory > m_SrvMgr;
friend class CWinClipbImpl;
};
#endif
| 1,693 |
713 | <reponame>franz1981/infinispan<filename>core/src/test/java/org/infinispan/persistence/ClusteredTxConditionalCommandTest.java
package org.infinispan.persistence;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.distribution.Ownership;
import org.infinispan.test.fwk.InCacheMode;
import org.testng.annotations.Test;
/**
* Tests if the conditional commands correctly fetch the value from cache loader even with the skip cache load/store
* flags.
* <p/>
* The configuration used is a tx distributed cache without passivation.
*
* @author <NAME>
* @since 7.0
*/
@Test(groups = "functional", testName = "persistence.ClusteredTxConditionalCommandTest")
@InCacheMode({CacheMode.DIST_SYNC})
public class ClusteredTxConditionalCommandTest extends ClusteredConditionalCommandTest {
// TX optimistic but without WSC!
public ClusteredTxConditionalCommandTest() {
super(true, false);
}
@Override
protected <K, V> void assertLoadAfterOperation(CacheHelper<K, V> cacheHelper, ConditionalOperation operation, Ownership ownership, boolean skipLoad) {
switch (ownership) {
case PRIMARY:
assertLoad(cacheHelper, skipLoad ? 0 : 1, 0, 0);
break;
case BACKUP:
// If the command succeeds, WSC is executed - load on primary owner
assertLoad(cacheHelper, 0, skipLoad ? 0 : 1, 0);
break;
case NON_OWNER:
if (skipLoad) {
// Replace_if does not load anything because it is unsuccessful on originator, as the retrieved
// remote entry is null due to the skip load flag.
assertLoad(cacheHelper, 0, 0, 0);
} else {
// The entry is loaded into DC upon the initial value retrieval (ClusteredGetCommand).
// It gets loaded all the time on primary, but if the response to the retrieval does not
// come soon enough, staggered logic sends second retrieval to backup owner, so it's possible
// that both owners load once.
assertEquals("primary owner load", 1, cacheHelper.loads(Ownership.PRIMARY));
long backupLoads = cacheHelper.loads(Ownership.BACKUP);
assertTrue("backup owner load: " + backupLoads, backupLoads <= 1);
assertEquals("non owner load", 0, cacheHelper.loads(Ownership.NON_OWNER));
}
break;
}
}
}
| 953 |
1,508 | package org.knowm.xchange.ftx.dto.trade;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.math.BigDecimal;
public class FtxModifyOrderRequestPayload {
private final BigDecimal price;
private final BigDecimal size;
private final String clientId;
public FtxModifyOrderRequestPayload(
@JsonProperty("price") BigDecimal price,
@JsonProperty("size") BigDecimal size,
@JsonProperty("clientId") String clientId) {
this.price = price;
this.size = size;
this.clientId = clientId;
}
public BigDecimal getPrice() {
return price;
}
public BigDecimal getSize() {
return size;
}
public String getClientId() {
return clientId;
}
@Override
public String toString() {
return "FtxModifyOrderRequestPayload{"
+ "price="
+ price
+ ", size="
+ size
+ ", clientId='"
+ clientId
+ '\''
+ '}';
}
}
| 418 |
8,844 | (c for b in a)
(d for e in f)
| 16 |
348 | <filename>docs/data/t2/047/47266.json<gh_stars>100-1000
{"nom":"Saint-Pé-Saint-Simon","dpt":"Lot-et-Garonne","inscrits":174,"abs":32,"votants":142,"blancs":6,"nuls":4,"exp":132,"res":[{"panneau":"1","voix":74},{"panneau":"2","voix":58}]} | 102 |
322 | <gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eagle.storage.jdbc.criteria.impl;
import org.apache.eagle.storage.jdbc.JdbcConstants;
import org.apache.eagle.storage.jdbc.criteria.CriteriaBuilder;
import org.apache.torque.ColumnImpl;
import org.apache.torque.criteria.Criteria;
import org.apache.torque.criteria.Criterion;
import org.apache.torque.criteria.SqlEnum;
import java.util.List;
/**
* @since 3/30/15
*/
public class PrimaryKeyCriteriaBuilder implements CriteriaBuilder {
private final List<String> keys;
private final String tableName;
public PrimaryKeyCriteriaBuilder(List<String> keys,String tableName){
this.keys = keys;
this.tableName = tableName;
}
@Override
public Criteria build() {
Criteria root = new Criteria();
for(String key:keys){
root = root.or(new Criterion(new ColumnImpl(this.tableName, JdbcConstants.ROW_KEY_COLUMN_NAME),key, SqlEnum.EQUAL));
}
return root;
}
} | 569 |
782 | <reponame>YunLemon/BoofCV<filename>main/boofcv-feature/src/main/java/boofcv/alg/tracker/dda/DetectDescribeAssociateTracker.java
/*
* Copyright (c) 2021, <NAME>. All Rights Reserved.
*
* This file is part of BoofCV (http://boofcv.org).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boofcv.alg.tracker.dda;
import boofcv.abst.feature.associate.AssociateDescription2D;
import boofcv.abst.feature.associate.AssociateDescriptionSets2D;
import boofcv.abst.feature.detdesc.DetectDescribePoint;
import boofcv.abst.tracker.ConfigTrackerDda;
import boofcv.abst.tracker.PointTrack;
import boofcv.abst.tracker.PointTracker;
import boofcv.alg.descriptor.UtilFeature;
import boofcv.struct.feature.AssociatedIndex;
import boofcv.struct.feature.TupleDesc;
import boofcv.struct.image.ImageGray;
import boofcv.struct.image.ImageType;
import georegression.struct.point.Point2D_F64;
import lombok.Getter;
import lombok.Setter;
import org.ddogleg.struct.DogArray;
import org.ddogleg.struct.DogArray_I32;
import org.ddogleg.struct.FastAccess;
import org.ddogleg.struct.FastArray;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
/**
* <p>
* Base class for detect-describe-associate type trackers. Tracker works by detecting features in each image,
* computing a descriptor for each feature, then associating the features together.
* </p>
*
* @author <NAME>
*/
public class DetectDescribeAssociateTracker<I extends ImageGray<I>, TD extends TupleDesc<TD>> {
// associates features between two images together
protected AssociateDescriptionSets2D<TD> associate;
// Detects and describes image features
protected DetectDescribePoint<I, TD> detector;
// all tracks. active and inactive.
protected @Getter DogArray<PointTrack> tracksAll;
// recently associated tracks
protected @Getter List<PointTrack> tracksActive = new ArrayList<>();
// tracks not matched to any recent features
protected @Getter List<PointTrack> tracksInactive = new ArrayList<>();
// tracks dropped by the tracker
protected @Getter List<PointTrack> tracksDropped = new ArrayList<>();
// tracks recently spawned
protected @Getter List<PointTrack> tracksNew = new ArrayList<>();
// ID of the most recently processed frame
protected @Getter long frameID = -1;
// number of features created. Used to assign unique IDs
protected long featureID = 0;
// should it update the feature description after each association?
@Getter @Setter boolean updateDescription;
// maximum number of tracks it will keep track of that were not associated before it starts discarding
protected int maxInactiveTracks;
// Random number generator
protected Random rand;
// destination features are ones which were detected in this frame
protected FastArray<TD> dstDesc;
protected DogArray_I32 dstSet = new DogArray_I32();
protected FastArray<Point2D_F64> dstPixels = new FastArray<>(Point2D_F64.class);
// source features are ones from active tracks
protected FastArray<TD> srcDesc;
protected DogArray_I32 srcSet = new DogArray_I32();
protected FastArray<Point2D_F64> srcPixels = new FastArray<>(Point2D_F64.class);
/**
* Configures tracker
*
* @param associate Association
* @param config Configures behavior.
*/
public DetectDescribeAssociateTracker( DetectDescribePoint<I, TD> detector,
final AssociateDescription2D<TD> associate,
ConfigTrackerDda config ) {
this.detector = detector;
this.associate = new AssociateDescriptionSets2D<>(associate);
this.updateDescription = config.updateDescription;
this.maxInactiveTracks = config.maxInactiveTracks;
this.rand = new Random(config.seed);
this.dstDesc = new FastArray<>(detector.getDescriptionType());
this.srcDesc = new FastArray<>(detector.getDescriptionType());
this.tracksAll = new DogArray<>(this::createNewTrack, this::resetTrack);
this.associate.initializeSets(detector.getNumberOfSets());
}
protected DetectDescribeAssociateTracker() {}
/**
* Creates a new track and sets the descriptor
*/
protected PointTrack createNewTrack() {
var t = new PointTrack();
t.setDescription(detector.createDescription());
return t;
}
/**
* Resets the track but saves the descriptor reference
*/
protected void resetTrack( PointTrack t ) {
TD desc = t.getDescription();
Object cookie = t.getCookie();
t.reset();
t.setDescription(desc);
t.setCookie(cookie);
}
/**
* Discards all tracking history
*/
public void reset() {
dropAllTracks();
featureID = 0;
frameID = -1;
}
/**
* Detect features and associate with existing tracks
*/
public void process( I input ) {
if (frameID == -1)
associate.initializeAssociator(input.width, input.height);
frameID++;
tracksActive.clear();
tracksInactive.clear();
tracksDropped.clear();
tracksNew.clear();
detector.detect(input);
final int N = detector.getNumberOfFeatures();
// initialize data structures
dstDesc.resize(N);
dstSet.resize(N);
dstPixels.resize(N);
// create a list of detected feature descriptions
for (int i = 0; i < N; i++) {
dstDesc.data[i] = detector.getDescription(i);
dstSet.data[i] = detector.getSet(i);
dstPixels.data[i] = detector.getLocation(i);
}
if (tracksAll.size == 0) {
return;
}
performTracking();
// add unassociated to the list
DogArray_I32 unassociatedIdx = associate.getUnassociatedSource();
for (int j = 0; j < unassociatedIdx.size(); j++) {
tracksInactive.add(tracksAll.get(unassociatedIdx.get(j)));
}
dropExcessiveInactiveTracks(unassociatedIdx);
}
/**
* If there are too many unassociated tracks, randomly select some of those tracks and drop them
*/
void dropExcessiveInactiveTracks( DogArray_I32 unassociated ) {
if (unassociated.size > maxInactiveTracks) {
// make the first N elements the ones which will be dropped
int numDrop = unassociated.size - maxInactiveTracks;
for (int i = 0; i < numDrop; i++) {
int selected = rand.nextInt(unassociated.size - i) + i;
int a = unassociated.get(i);
unassociated.data[i] = unassociated.data[selected];
unassociated.data[selected] = a;
}
// Drop tracks, but do so in reverse order so that the faster removeSwap() can be used since it changes
// the order of later tracks
unassociated.size = numDrop;
unassociated.sort();
for (int i = unassociated.size - 1; i >= 0; i--) {
tracksDropped.add(dropTrackIndexInAll(unassociated.get(i)));
}
}
}
/**
* Associate detections to tracks
*/
protected void performTracking() {
// Create a list for association from all tracks
final int numTracks = tracksAll.size();
srcDesc.resize(numTracks);
srcPixels.resize(numTracks);
srcSet.resize(numTracks);
for (int i = 0; i < numTracks; i++) {
PointTrack t = tracksAll.get(i);
srcDesc.data[i] = t.getDescription();
srcPixels.data[i] = t.pixel;
srcSet.data[i] = t.detectorSetId;
}
// Associate existing tracks with detections
UtilFeature.setSource(srcDesc, srcSet, srcPixels, associate);
UtilFeature.setDestination(dstDesc, dstSet, dstPixels, associate);
associate.associate();
FastAccess<AssociatedIndex> matches = associate.getMatches();
for (int i = 0; i < matches.size; i++) {
AssociatedIndex indexes = matches.data[i];
PointTrack track = tracksAll.get(indexes.src);
Point2D_F64 loc = dstPixels.data[indexes.dst];
track.pixel.setTo(loc.x, loc.y);
track.lastSeenFrameID = frameID;
tracksActive.add(track);
// update the description
if (updateDescription) {
((TD)track.getDescription()).setTo(dstDesc.get(indexes.dst));
}
}
}
/**
* Takes the current crop of detected features and makes them the keyframe
*/
public void spawnTracks() {
// If there are no tracks then associate is not called. Reset() could have been called at associate is
// in an undefined state
if (tracksAll.size == 0) {
for (int i = 0; i < dstDesc.size; i++) {
Point2D_F64 loc = dstPixels.get(i);
addNewTrack(dstSet.get(i), loc.x, loc.y, dstDesc.get(i));
}
return;
}
// create new tracks from latest unassociated detected features
DogArray_I32 unassociated = associate.getUnassociatedDestination();
for (int i = 0; i < unassociated.size; i++) {
int indexDst = unassociated.get(i);
Point2D_F64 loc = dstPixels.get(indexDst);
addNewTrack(dstSet.get(i), loc.x, loc.y, dstDesc.get(indexDst));
}
}
/**
* Adds a new track given its location and description
*/
protected void addNewTrack( int set, double x, double y, TD desc ) {
PointTrack p = tracksAll.grow();
p.pixel.setTo(x, y);
((TD)p.getDescription()).setTo(desc);
p.spawnFrameID = frameID;
p.lastSeenFrameID = frameID;
p.detectorSetId = set;
p.featureId = featureID++;
// if( tracksActive.contains(p))
// throw new RuntimeException("Contained twice active! p.id="+p.featureId);
tracksNew.add(p);
tracksActive.add(p);
}
/**
* Drops all tracks
*/
public void dropAllTracks() {
tracksActive.clear();
tracksInactive.clear();
tracksAll.reset();
tracksNew.clear();
}
/**
* Remove from active list and mark so that it is dropped in the next cycle
*
* @param track The track which is to be dropped
*/
public boolean dropTrack( PointTrack track ) {
int indexInAll = tracksAll.indexOf(track);
if (indexInAll < 0)
return false; // this is probably a bug...
dropTrackIndexInAll(indexInAll);
// if( tracksActive.contains(track))
// throw new RuntimeException("Contained twice active! pt.id="+track.featureId);
return true;
}
/**
* Given the index of the track in the `all` list, drop it from the tracker
*/
private PointTrack dropTrackIndexInAll( int indexInAll ) {
PointTrack track = tracksAll.removeSwap(indexInAll);
// the track may or may not be in the active list
boolean found = tracksActive.remove(track);
found |= tracksInactive.remove(track);
// if a track has been drop it must be in `all` and `active` OR `inactive` lists
assert found;
return track;
}
public void dropTracks( PointTracker.Dropper dropper ) {
for (int i = tracksAll.size() - 1; i >= 0; i--) {
PointTrack track = tracksAll.get(i);
if (!dropper.shouldDropTrack(track))
continue;
dropTrackIndexInAll(i);
}
}
public ImageType<I> getImageType() {
return this.detector.getInputType();
}
}
| 3,662 |
1,074 | <gh_stars>1000+
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import crypten
try:
from ..benchmarks import benchmark
except ValueError:
# ValueError is raised for relative import
# when calling $python -m unittest test/test_benchmark.py
from benchmarks import benchmark
class TestBenchmark(unittest.TestCase):
def setUp(self):
crypten.init()
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_func_benchmarks_run(self):
"""Ensure function benchmarks run without an exception"""
func_benchmarks = benchmark.FuncBenchmarks()
func_benchmarks.run()
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_model_benchmarks_run(self):
"""Ensure model benchmarks run without an exception"""
model_benchmarks = benchmark.ModelBenchmarks()
for model in model_benchmarks.models:
model.epochs = 2
model_benchmarks.run()
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_func_benchmarks_data(self):
"""Sanity check length and columns of function benchmarks"""
func_benchmarks = benchmark.FuncBenchmarks()
func_benchmarks.run()
expected_n_rows = len(benchmark.FuncBenchmarks.UNARY)
expected_n_rows += len(benchmark.FuncBenchmarks.BINARY)
expected_n_rows += len(benchmark.FuncBenchmarks.LAYERS)
n_rows = func_benchmarks.df.shape[0]
self.assertEqual(
n_rows,
expected_n_rows,
msg=f"function benchmarks {n_rows} rows. Expected {expected_n_rows}",
)
self.assertGreater(
func_benchmarks.df["total abs error"].sum(),
0,
msg="total abs error should be greater than 0",
)
self.assertTrue(
all(func_benchmarks.df["runtime"] > 0),
msg="runtime is less than or equal to zero",
)
self.assertTrue(
all(func_benchmarks.df["runtime crypten"] > 0),
msg="crypten runtime is less than or equal to zero",
)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_model_benchmarks_data(self):
"""Sanity check length and columns of model benchmarks"""
model_benchmarks = benchmark.ModelBenchmarks()
for model in model_benchmarks.models:
model.epochs = 2
model_benchmarks.run()
expected_n_rows = 2 * len(model_benchmarks.models)
n_rows = model_benchmarks.df.shape[0]
self.assertEqual(
n_rows,
expected_n_rows,
msg=f"model benchmarks have {n_rows} rows. Expected {expected_n_rows}",
)
self.assertTrue(
all(model_benchmarks.df["seconds per epoch"] > 0),
msg="seconds per epoch should be greater than 0",
)
self.assertTrue(
all(model_benchmarks.df["inference time"] > 0),
msg="inference time should be greater than 0",
)
self.assertTrue(
all(model_benchmarks.df["accuracy"] > 0)
and all(model_benchmarks.df["accuracy"] < 1.0),
msg="accuracy should be between 0 and 1.0",
)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_advanced_model_benchmarks(self):
"""Tests advanced models are added with flag"""
model_benchmarks = benchmark.ModelBenchmarks(advanced_models=False)
self.assertTrue(all(not model.advanced for model in model_benchmarks.models))
all_model_benchmarks = benchmark.ModelBenchmarks(advanced_models=True)
self.assertGreater(
len(all_model_benchmarks.models), len(model_benchmarks.models)
)
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| 1,673 |
2,504 | //*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
#include "pch.h"
#include "Scenario6_TimeZone.xaml.h"
using namespace SDKTemplate;
using namespace Platform;
using namespace Platform::Collections;
using namespace Windows::Foundation;
using namespace Windows::Foundation::Collections;
using namespace Windows::Globalization;
using namespace Windows::Globalization::DateTimeFormatting;
using namespace Windows::UI::Xaml;
using namespace Windows::UI::Xaml::Controls;
using namespace Windows::UI::Xaml::Navigation;
Scenario6_TimeZone::Scenario6_TimeZone()
{
InitializeComponent();
}
void Scenario6_TimeZone::Display()
{
// This scenario illustrates time zone support in DateTimeFormatter class
// Displayed time zones (other than local time zone)
auto timeZones = ref new Vector<String^>({ "UTC", "America/New_York", "Asia/Kolkata" });
// Keep the results here
String^ results = "";
// Create formatter object using longdate and longtime template
DateTimeFormatter^ dateFormatter = ref new DateTimeFormatter("longdate longtime");
// Create date/time to format and display.
Windows::Globalization::Calendar^ calendar = ref new Windows::Globalization::Calendar();
Windows::Foundation::DateTime dateToFormat = calendar->GetDateTime();
// Show current time.
results += "Current date and time\n" +
"In Local time zone: " + dateFormatter->Format(dateToFormat) + "\n";
for (auto timeZone : timeZones)
{
results += "In " + timeZone + " time zone: " + dateFormatter->Format(dateToFormat, timeZone) + "\n";
}
results += "\n";
// Show a time on 14th day of second month of next year.
// Note the effect of daylight saving on the results.
calendar->AddYears(1); calendar->Month = 2; calendar->Day = 14;
dateToFormat = calendar->GetDateTime();
results += "Same time on 14th day of second month of next year\n" +
"In Local time zone: " + dateFormatter->Format(dateToFormat) + "\n";
for (auto timeZone : timeZones)
{
results += "In " + timeZone + " time zone: " + dateFormatter->Format(dateToFormat, timeZone) + "\n";
}
results += "\n";
// Show a time on 14th day of tenth month of next year.
// Note the effect of daylight saving on the results.
calendar->AddMonths(8);
dateToFormat = calendar->GetDateTime();
results += "Same time on 14th day of tenth month of next year\n" +
"In Local time zone: " + dateFormatter->Format(dateToFormat) + "\n";
for (auto timeZone : timeZones)
{
results += "In " + timeZone + " time zone: " + dateFormatter->Format(dateToFormat, timeZone) + "\n";
}
results += "\n";
// Display the results.
OutputTextBlock->Text = results;
}
| 1,118 |
2,177 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME> - port to Python
# <NAME> - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True, hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber])
self.reset()
| 1,135 |
435 | <reponame>amaajemyfren/data<filename>pycon-italia-2018/videos/m-scarpa-pelican-e-perche-generare-siti-statici.json<gh_stars>100-1000
{
"copyright_text": null,
"description": "Oggi nel web tutti hanno un sito o un blog che aggiornano e mantengono\ncon articoli e informazioni di vario genere ed esistono svariate\npiattaforme per siti web dinamici.\n\nQui invece si parla del contrario, di come mantenere un sito statico,\npi\u00f9 performante e che risulta anche pi\u00f9 economico e rilassante da\nmantenere, integrandolo con tecnologie come i Jupyter Notebook e Git per\nottenere una completa interazione con le tecnologie pi\u00f9 moderne e usate.\nVerranno anche presentati un paio di casi d\u2019uso di Pelican in situazioni\nreali.\n\nCome prerequisiti si assume che l\u2019ascoltatore sappia cosa sia un CMS e\n(a grandi linee) come funziona e cosa sia un Jupyter Notebook (non \u00e8\nnecessario che sappia usarne uno)-\n\nin \\_\\_on **venerd\u00ec 20 aprile** at 11:00 `**See\nschedule** </p3/schedule/pycon9/>`__\n",
"duration": 1920,
"language": "ita",
"recorded": "2018-04-20",
"related_urls": [
{
"label": "Conference schedule",
"url": "https://www.pycon.it/p3/schedule/pycon9/"
}
],
"speakers": [
"<NAME>"
],
"tags": [
"web",
"jinja2",
"blogging",
"Pelican"
],
"thumbnail_url": "https://i.ytimg.com/vi/Z8t0lNEmnHQ/maxresdefault.jpg",
"title": "Pelican e perch\u00e8 generare siti statici",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=Z8t0lNEmnHQ"
}
]
}
| 686 |
360 | <filename>src/include/access/extreme_rto/redo_item.h
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* ---------------------------------------------------------------------------------------
*
* redo_item.h
*
*
*
* IDENTIFICATION
* src/include/access/extreme_rto/redo_item.h
*
* ---------------------------------------------------------------------------------------
*/
#ifndef EXTREME_RTO_REDO_ITEM_H
#define EXTREME_RTO_REDO_ITEM_H
#include "access/xlogreader.h"
#include "datatype/timestamp.h"
#include "nodes/pg_list.h"
#include "utils/atomic.h"
#include "storage/buf/block.h"
#include "storage/smgr/relfilenode.h"
#include "access/extreme_rto/posix_semaphore.h"
#include "replication/replicainternal.h"
namespace extreme_rto {
typedef struct RedoItem_s {
/* Old version. */
bool oldVersion;
bool needImmediateCheckpoint;
bool needFullSyncCheckpoint;
/* Number of workers sharing this item. */
uint32 shareCount;
/* Id of the worker designated to apply this item. */
uint32 designatedWorker;
/* The expected timelines for this record. */
List *expectedTLIs;
/* The timestamp of the log record if it is a transaction record. */
TimestampTz recordXTime;
/* Next item on the free list. */
struct RedoItem_s *freeNext;
/* Number of workers holding a reference to this item. */
pg_atomic_uint32 refCount;
/* If this item has been replayed. */
pg_atomic_uint32 replayed;
/* A "deep" copy of the log record. */
XLogReaderState record;
/* Used for really free */
struct RedoItem_s *allocatedNext;
TimestampTz syncXLogReceiptTime;
int syncXLogReceiptSource;
TransactionId RecentXmin;
ServerMode syncServerMode;
/* temp variable indicate number of redo items with same block number */
pg_atomic_uint32 blkShareCount;
bool isForceAll;
pg_atomic_uint32 distributeCount;
} RedoItem;
static const int32 ANY_BLOCK_ID = -1;
static const uint32 ANY_WORKER = (uint32)-1;
static const uint32 TRXN_WORKER = (uint32)-2;
static const uint32 ALL_WORKER = (uint32)-3;
static const uint32 USTORE_WORKER = (uint32)-4;
static inline RedoItem *GetRedoItemPtr(XLogReaderState *record)
{
return (RedoItem *)(((char *)record) - offsetof(RedoItem, record));
}
RedoItem *CreateRedoItem(XLogReaderState *record, uint32 shareCount, uint32 designatedWorker, List *expectedTLIs,
TimestampTz recordXTime, bool buseoriginal, bool isForceAll = false);
void ApplyRedoRecord(XLogReaderState *record, bool bOld);
} // namespace extreme_rto
#endif
| 1,068 |
697 | <filename>src/VAC/LayersWidget.cpp
// Copyright (C) 2012-2019 The VPaint Developers.
// See the COPYRIGHT file at the top-level directory of this distribution
// and at https://github.com/dalboris/vpaint/blob/master/COPYRIGHT
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "LayersWidget.h"
#include <cmath>
#include <QCheckBox>
#include <QHBoxLayout>
#include <QLabel>
#include <QLineEdit>
#include <QPushButton>
#include <QScrollArea>
#include <QVBoxLayout>
#include "Layer.h"
#include "Scene.h"
namespace impl_
{
LayerWidget::LayerWidget(int index) :
index_(index),
isActive_(false)
{
visibilityCheckBox_ = new QCheckBox();
visibilityCheckBox_->setCheckState(Qt::Checked);
visibilityCheckBox_->setSizePolicy(QSizePolicy::Maximum, QSizePolicy::Maximum);
connect(visibilityCheckBox_, SIGNAL(stateChanged(int)), this, SLOT(onVisibilityCheckBoxStateChanged_(int)));
connect(visibilityCheckBox_, SIGNAL(clicked(bool)), this, SLOT(onVisibilityCheckBoxClicked_(bool)));
nameLabel_ = new QLabel();
nameLabel_->setMinimumHeight(30);
nameLineEdit_ = new QLineEdit();
nameLineEdit_->setMinimumHeight(30);
nameLineEdit_->hide();
connect(nameLineEdit_, &QLineEdit::editingFinished, this, &LayerWidget::onNameLineEditEditingFinished_);
QHBoxLayout * layout = new QHBoxLayout();
layout->addWidget(visibilityCheckBox_);
layout->addWidget(nameLabel_);
layout->addWidget(nameLineEdit_);
setLayout(layout);
setAutoFillBackground(true);
updateBackground_();
}
LayerWidget::~LayerWidget()
{
}
int LayerWidget::index() const
{
return index_;
}
bool LayerWidget::isActive() const
{
return isActive_;
}
void LayerWidget::setActive(bool b)
{
if (b != isActive_)
{
isActive_ = b;
updateBackground_();
if (isActive_)
{
emit activated(index());
}
}
}
bool LayerWidget::visibility() const
{
return visibilityCheckBox_->isChecked();
}
void LayerWidget::setVisibility(bool b)
{
if (b != visibility())
{
visibilityCheckBox_->setChecked(b);
// Note: we don't emit a signal here, as it will be emitted
// in onVisibilityCheckBoxStateChanged_.
}
}
QString LayerWidget::name() const
{
return nameLabel_->text();
}
bool LayerWidget::setName(const QString& newName)
{
// Abord name editing if any
abortNameEditing_();
// Set new name if different form current name
if (newName != name())
{
nameLabel_->setText(newName);
emit nameChanged(index());
return true;
}
else
{
return false;
}
}
void LayerWidget::startNameEditing()
{
startNameEditing_(NameEditingReason_::ExternalRequest);
}
void LayerWidget::mousePressEvent(QMouseEvent*)
{
if (!isActive_)
{
setActive(true);
emit checkpoint();
}
}
void LayerWidget::mouseDoubleClickEvent(QMouseEvent*)
{
startNameEditing_(NameEditingReason_::DoubleClick);
}
void LayerWidget::onVisibilityCheckBoxClicked_(bool)
{
emit checkpoint();
}
void LayerWidget::onVisibilityCheckBoxStateChanged_(int)
{
emit visibilityChanged(index());
}
void LayerWidget::onNameLineEditEditingFinished_()
{
finishNameEditing_();
}
void LayerWidget::startNameEditing_(NameEditingReason_ reason)
{
if (!nameLineEdit_->isVisible())
{
nameEditingReason_ = reason;
nameLineEdit_->setText(name());
nameLabel_->hide();
nameLineEdit_->show();
nameLineEdit_->selectAll();
nameLineEdit_->setFocus();
}
}
void LayerWidget::abortNameEditing_()
{
if (nameLineEdit_->isVisible())
{
nameLineEdit_->hide();
nameLabel_->show();
}
}
void LayerWidget::finishNameEditing_()
{
if (nameLineEdit_->isVisible())
{
QString newName = nameLineEdit_->text();
nameLineEdit_->hide();
nameLabel_->show();
bool changed = setName(newName);
if (changed && nameEditingReason_ == NameEditingReason_::DoubleClick)
{
// We only emit checkpoint if the user action causing the scene to
// change is initiated from this LayerWidget. In other words, the
// widget responsible for starting a user action is the widget
// responsible for calling checkpoint.
emit checkpoint();
}
if (nameEditingReason_ == NameEditingReason_::ExternalRequest)
{
emit nameEditingFinished(index());
}
}
}
void LayerWidget::updateBackground_()
{
QPalette::ColorRole backgroundRole = isActive() ? QPalette::Highlight : QPalette::Base;
setBackgroundRole(backgroundRole);
}
} // namespace impl_
LayersWidget::LayersWidget(Scene * scene) :
scene_(scene),
numVisibleLayerWidgets_(0),
activeLayerWidget_(nullptr)
{
// VBoxLayout with all the individual LayerWidget instances
layerListLayout_ = new QVBoxLayout();
layerListLayout_->setContentsMargins(0,0,0,0);
layerListLayout_->setSpacing(0);
// Create one LayerWidget right now. It will be hidden shortly after if the
// scene has in fact no layers.
//
// This is required because for some reason, LayerWidgets won't show up if
// none exist before layerListLayout_ is added to the scrollArea. I suspect
// this to be a bug of Qt.
//
createNewLayerWidget_();
// Wrap the layerListLayout_ into yet another VBoxLayout.
// We need this because:
// 1. We need scrollArea->setWidgetResizable(true) to enable horizontal stretching
// of the LayerWidget items, so that the background color takes all the
// horizontal space when selected.
// 2. Unfortunately, as a side effect, this enables vertical stretching too, which results
// in ugly vertical stretching of all the LayerWidget items.
// 3. So we add a QSpacerItem to "eat" all the remaining space below layerListLayout_.
QVBoxLayout * layerListLayout2 = new QVBoxLayout();
layerListLayout2->setContentsMargins(0,0,0,0);
layerListLayout2->setSpacing(0);
layerListLayout2->addLayout(layerListLayout_);
layerListLayout2->addStretch();
// Put the vbox layout in a scrollarea
QScrollArea * scrollArea = new QScrollArea();
scrollArea->setWidgetResizable(true);
QWidget * layerList = new QWidget();
layerList->setLayout(layerListLayout2);
scrollArea->setWidget(layerList);
// Set background color for scrollarea
scrollArea->setBackgroundRole(QPalette::Base);
scrollArea->setAutoFillBackground(true);
// Create buttons
QPushButton * newLayerButton = new QPushButton(tr("New"));
QPushButton * moveLayerUpButton = new QPushButton(tr("Move Up"));
QPushButton * moveLayerDownButton = new QPushButton(tr("Move Down"));
QPushButton * deleteLayerButton = new QPushButton(tr("Delete"));
connect(newLayerButton, &QPushButton::clicked, this, &LayersWidget::onNewLayerClicked_);
connect(moveLayerUpButton, &QPushButton::clicked, this, &LayersWidget::onMoveLayerUpClicked_);
connect(moveLayerDownButton, &QPushButton::clicked, this, &LayersWidget::onMoveLayerDownClicked_);
connect(deleteLayerButton, &QPushButton::clicked, this, &LayersWidget::onDeleteLayerClicked_);
QHBoxLayout * buttonsLayout = new QHBoxLayout();
buttonsLayout->addWidget(newLayerButton);
buttonsLayout->addWidget(moveLayerUpButton);
buttonsLayout->addWidget(moveLayerDownButton);
buttonsLayout->addStretch();
buttonsLayout->addWidget(deleteLayerButton);
// Add scrollarea to this widget
QVBoxLayout * layout = new QVBoxLayout();
layout->addWidget(scrollArea);
layout->addLayout(buttonsLayout);
setLayout(layout);
// Connect to scene
updateUiFromScene_();
connect(scene_, SIGNAL(layerAttributesChanged()), this, SLOT(onSceneLayerAttributesChanged_()));
}
LayersWidget::~LayersWidget()
{
}
Scene * LayersWidget::scene() const
{
return scene_;
}
void LayersWidget::onLayerWidgetActivated_(int index)
{
scene()->setActiveLayer(numVisibleLayerWidgets_ - 1 - index);
}
void LayersWidget::onLayerWidgetVisibilityChanged_(int index)
{
if (0 <= index && index < numVisibleLayerWidgets_) {
int j = numVisibleLayerWidgets_ - 1 - index;
bool visibility = layerWidgets_[index]->visibility();
scene()->layer(j)->setVisible(visibility);
}
}
void LayersWidget::onLayerWidgetNameChanged_(int index)
{
if (0 <= index && index < numVisibleLayerWidgets_) {
int j = numVisibleLayerWidgets_ - 1 - index;
QString name = layerWidgets_[index]->name();
scene()->layer(j)->setName(name);
}
}
void LayersWidget::onLayerWidgetNameEditingFinished_(int)
{
scene()->emitCheckpoint();
}
void LayersWidget::onLayerWidgetCheckpoint_()
{
scene()->emitCheckpoint();
}
void LayersWidget::onNewLayerClicked_()
{
// Create layer. This should indirectly create the corresponding
// LayerWidget, unless using asynchronous signals/slots.
Layer * layer = scene()->createLayer(tr("New Layer"));
// Enter name editing mode. We need to check in case of asynchronous
// signals/slots.
//
if (activeLayerWidget_)
{
int j = numVisibleLayerWidgets_ - 1 - activeLayerWidget_->index();
if (scene()->layer(j) == layer)
{
activeLayerWidget_->startNameEditing();
// Checkpoint will be emitted in onLayerWidgetNameEditingFinished_
}
}
else
{
// This is not supposed to happen
scene()->emitCheckpoint();
}
}
void LayersWidget::onDeleteLayerClicked_()
{
scene()->destroyActiveLayer();
scene()->emitCheckpoint();
}
void LayersWidget::onMoveLayerUpClicked_()
{
scene()->moveActiveLayerUp();
scene()->emitCheckpoint();
}
void LayersWidget::onMoveLayerDownClicked_()
{
scene()->moveActiveLayerDown();
scene()->emitCheckpoint();
}
void LayersWidget::onSceneLayerAttributesChanged_()
{
updateUiFromScene_();
}
void LayersWidget::updateUiFromScene_()
{
// Show as many existing LayerWidgets as necessary
int numLayers = scene()->numLayers();
int numLayerWidgets = layerWidgets_.size();
int newNumVisibleLayerWidgets = std::min(numLayers, numLayerWidgets);
for (int i = numVisibleLayerWidgets_; i < newNumVisibleLayerWidgets; ++i) {
layerWidgets_[i]->show();
++numVisibleLayerWidgets_;
}
// Create as many new LayerWidgets as necessary
for (int i = numVisibleLayerWidgets_; i < numLayers; ++i) {
createNewLayerWidget_();
}
// Hide superfluous LayerWidgets
for (int i = numLayers; i < numVisibleLayerWidgets_; ++i) {
layerWidgets_[i]->hide();
}
numVisibleLayerWidgets_ = numLayers;
// Set LayerWidgets names and visibility
for (int i = 0; i < numVisibleLayerWidgets_; ++i) {
int j = numVisibleLayerWidgets_ - 1 - i;
bool visibility= scene()->layer(j)->isVisible();
QString name = scene()->layer(j)->name();
layerWidgets_[i]->setVisibility(visibility);
layerWidgets_[i]->setName(name);
}
// Set active LayerWidget
int jActive = scene()->activeLayerIndex();
int iActive = numVisibleLayerWidgets_ - 1 - jActive;
if (activeLayerWidget_ && activeLayerWidget_->index() != iActive) {
activeLayerWidget_->setActive(false);
activeLayerWidget_ = nullptr;
}
if (0 <= iActive && iActive < numVisibleLayerWidgets_) {
activeLayerWidget_ = layerWidgets_[iActive];
activeLayerWidget_->setActive(true);
}
}
// Precondition: all LayerWidgets are visible
void LayersWidget::createNewLayerWidget_()
{
impl_::LayerWidget * layerWidget = new impl_::LayerWidget(layerWidgets_.size());
++numVisibleLayerWidgets_;
layerWidgets_.push_back(layerWidget);
layerListLayout_->addWidget(layerWidget);
connect(layerWidget, &impl_::LayerWidget::activated, this, &LayersWidget::onLayerWidgetActivated_);
connect(layerWidget, &impl_::LayerWidget::visibilityChanged, this, &LayersWidget::onLayerWidgetVisibilityChanged_);
connect(layerWidget, &impl_::LayerWidget::nameChanged, this, &LayersWidget::onLayerWidgetNameChanged_);
connect(layerWidget, &impl_::LayerWidget::nameEditingFinished, this, &LayersWidget::onLayerWidgetNameEditingFinished_);
connect(layerWidget, &impl_::LayerWidget::checkpoint, this, &LayersWidget::onLayerWidgetCheckpoint_);
}
| 4,722 |
302 | """
@Author : <NAME>
@Email : <EMAIL>
@Time : 2020/7/25 14:16
"""
| 37 |
1,005 | <reponame>wxy347/gan-compression<filename>interactive_demo/models/sub_mobile_resnet_generator.py<gh_stars>1000+
import functools
from torch import nn
from models.mobile_modules import SeparableConv2d
class MobileResnetBlock(nn.Module):
def __init__(self, ic, oc, padding_type, norm_layer, dropout_rate, use_bias):
super(MobileResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(ic, oc, padding_type, norm_layer, dropout_rate, use_bias)
def build_conv_block(self, ic, oc, padding_type, norm_layer, dropout_rate, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [
SeparableConv2d(in_channels=ic, out_channels=oc,
kernel_size=3, padding=p, stride=1),
norm_layer(oc),
nn.ReLU(True)
]
conv_block += [nn.Dropout(dropout_rate)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [
SeparableConv2d(in_channels=oc, out_channels=ic,
kernel_size=3, padding=p, stride=1),
norm_layer(oc)
]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class SubMobileResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, config, norm_layer=nn.BatchNorm2d,
dropout_rate=0, n_blocks=9, padding_type='reflect'):
assert n_blocks >= 0
super(SubMobileResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, config['channels'][0], kernel_size=7, padding=0, bias=use_bias),
norm_layer(config['channels'][0]),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
ic = config['channels'][i]
oc = config['channels'][i + 1]
model += [nn.Conv2d(ic * mult, oc * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ic * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
ic = config['channels'][2]
# oc = config['channels'][3]
# for i in range(n_blocks):
# oc = config['channels'][i + 3]
# model += [MobileResnetBlock(ic * mult, oc * mult, padding_type=padding_type, norm_layer=norm_layer,
# dropout_rate=dropout_rate, use_bias=use_bias)]
for i in range(n_blocks):
if len(config['channels']) == 6:
offset = 0
else:
offset = i // 3
oc = config['channels'][offset + 3]
model += [MobileResnetBlock(ic * mult, oc * mult, padding_type=padding_type, norm_layer=norm_layer,
dropout_rate=dropout_rate, use_bias=use_bias)]
if len(config['channels']) == 6:
offset = 4
else:
offset = 6
for i in range(n_downsampling): # add upsampling layers
oc = config['channels'][offset + i]
# print(oc)
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ic * mult, int(oc * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(oc * mult / 2)),
nn.ReLU(True)]
ic = oc
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ic, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
input = input.clamp(-1, 1)
return self.model(input)
| 2,463 |
4,036 | <reponame>vadi2/codeql
#define MY_MACRO()
MY_MACRO()
#ifdef UNRELATED_MACRO
#undef UNRELATED_MACRO
#endif
| 49 |
589 | <filename>KKJSBridgeDemo/KKJSBridgeDemo/HtmlURLProtocol.h
//
// HtmlURLProtocol.h
// KKJSBridgeDemo
//
// Created by <NAME> on 2020/7/22.
// Copyright © 2020 karosli. All rights reserved.
//
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
@interface HtmlURLProtocol : NSURLProtocol
+ (void)HtmlURLProtocolRegisterScheme:(NSString *)scheme;
+ (void)HtmlURLProtocolUnregisterScheme:(NSString *)scheme;
@end
NS_ASSUME_NONNULL_END
| 174 |
5,169 | {
"name": "SSBannerViewController",
"version": "1.0.0",
"summary": "Simple infinite scrolling bannerView with custom page control",
"description": "A simple infinite scrolling bannerView with custom page control.\nThe shining part is to use 2 UIImageView to accomplish any number of banner image scrolling, so as to minimize the memory use.\nAlso a custom page control may be useful aswell.",
"homepage": "https://github.com/suruihai/SSBannerViewController",
"license": "MIT",
"authors": {
"stevenSu": "<EMAIL>"
},
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/suruihai/SSBannerViewController.git",
"tag": "1.0.0"
},
"source_files": "Source/*.{h,m}",
"exclude_files": "Classes/Exclude",
"requires_arc": true
}
| 269 |
1,253 | <reponame>CarbonDDR/al-go-rithms
import cv2
import numpy as np
lemur = cv2.imread("./lemur.png")
flag = cv2.imread("./flag.png")
height, width, color = lemur.shape
answer = np.empty([height, width, color])
for i in range(height):
for j in range(width):
for k in range(color):
answer[i][j][k] = lemur[i][j][k] ^ flag[i][j][k]
cv2.imwrite("answer.png", answer)
| 176 |
364 | // The MIT License (MIT)
// Copyright (c) 2016, Microsoft
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#pragma once
#include <stddef.h> // size_t parameter.
#include <vector> // std::vector embedded.
#include "BitFunnel/BitFunnelTypes.h" // Rank parameter, DocId template parameter.
#include "ICodeVerifier.h" // Base class.
#include "BitFunnel/Index/RowId.h" // RowId parameter.
namespace BitFunnel
{
class ByteCodeGenerator;
class IShard;
class ISimpleIndex;
class ResultsBuffer;
//*************************************************************************
//
// CodeVerifierBase
//
// This class verifies the correctness of a matching algorithm.
//
//*************************************************************************
class CodeVerifierBase : public ICodeVerifier
{
public:
CodeVerifierBase(ISimpleIndex const & index, Rank initialRank);
//
// ICodeVerifier methods.
//
virtual void VerboseMode(bool mode) override;
virtual void ExpectNoResults() override;
virtual std::vector<size_t> const & GetIterations() const override;
virtual size_t GetSliceNumber(size_t iteration) const override;
virtual size_t GetOffset(size_t iteration) const override;
virtual size_t GetIterationsPerSlice() const override;
virtual void DeclareRow(char const * text) override;
virtual uint64_t GetRowData(size_t row,
size_t offset,
size_t slice) override;
// The class is configured by adding a sequence of of records
// describing the expected interactions betweeen the matcher and its
// IResultsProcessor. Each call to Add() corresponds to expectation
// that the matcher will invoke its interal AddResult() method. The
// accumulator and offset parameters to Add() are the expected
// parameters to AddResult().
//
// The usage pattern for IResultsProcessor is a sequence of calls
// to AddResult() interspersed with calls to FinishIteration().
// The call to FinishIteration() provides the void* slice buffer
// pointer that is applicable to the sequence of calls to AddResult()
// since the previous call to FinishIteration() (or the start of
// the matching algorithm if there was no previous call to
// FinishIteration().
//
// The third parameter of the Add() method indicates the slice
// that expected to be passed on the next call to FinishIteration.
// The slice parameter is a size_t index into an array of slice
// buffer pointers associated with the index.
//
virtual void ExpectResult(uint64_t accumulator,
size_t offset,
size_t slice) override;
protected:
void CheckResults(ResultsBuffer const & results);
private:
static RowId GetFirstRow(ITermTable const & termTable,
Term term);
static ptrdiff_t GetRowOffset(char const * text,
Term::StreamId stream,
IConfiguration const & config,
ITermTable const & termTable,
IShard const & shard);
//
// Constructor parameters.
//
protected:
ISimpleIndex const & m_index;
Rank m_initialRank;
//
// Other parameters.
//
protected:
std::vector<void *> const & m_slices;
private:
std::vector<size_t> m_iterationValues;
protected:
std::vector<ptrdiff_t> m_rowOffsets;
protected:
size_t m_resultsCount;
bool m_verboseMode;
bool m_expectNoResults;
std::set<DocId> m_observed;
private:
std::set<DocId> m_expected;
};
}
| 1,950 |
335 | {
"word": "Wastebasket",
"definitions": [
"A waste-paper basket."
],
"parts-of-speech": "Noun"
} | 60 |
2,325 | <filename>tests/modules/embedders/embedder_utils_test.py
"""
Unit tests for embedder utils.
"""
import tensorflow as tf
from texar.tf.modules.embedders import embedder_utils
class GetEmbeddingTest(tf.test.TestCase):
"""Tests embedding creator.
"""
def test_get_embedding(self):
"""Tests :func:`~texar.tf.modules.embedder.embedder_utils.get_embedding`.
"""
vocab_size = 100
emb = embedder_utils.get_embedding(num_embeds=vocab_size)
self.assertEqual(emb.shape[0].value, vocab_size)
self.assertEqual(emb.shape[1].value,
embedder_utils.default_embedding_hparams()["dim"])
hparams = {
"initializer": {
"type": tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
},
"regularizer": {
"type": tf.keras.regularizers.L1L2(0.1, 0.1)
}
}
emb = embedder_utils.get_embedding(
hparams=hparams, num_embeds=vocab_size,
variable_scope='embedding_2')
self.assertEqual(emb.shape[0].value, vocab_size)
self.assertEqual(emb.shape[1].value,
embedder_utils.default_embedding_hparams()["dim"])
if __name__ == "__main__":
tf.test.main()
| 631 |
432 | package org.openmhealth.shim.fitbit.mapper;
import com.fasterxml.jackson.databind.JsonNode;
import org.openmhealth.schema.domain.omh.*;
import org.openmhealth.shim.common.mapper.DataPointMapperUnitTests;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import java.io.IOException;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.OffsetDateTime;
import java.util.List;
import static java.time.ZoneOffset.UTC;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.openmhealth.schema.domain.omh.DurationUnit.*;
import static org.openmhealth.schema.domain.omh.DurationUnit.DAY;
import static org.openmhealth.schema.domain.omh.KcalUnit.*;
import static org.openmhealth.schema.domain.omh.LengthUnit.KILOMETER;
/**
* @author <NAME>
* @author <NAME>
*/
public class FitbitPhysicalActivityDataPointMapperUnitTests extends DataPointMapperUnitTests {
private final FitbitPhysicalActivityDataPointMapper mapper = new FitbitPhysicalActivityDataPointMapper();
private JsonNode singleActivityResponseNode;
private JsonNode multipleActivityResponseNode;
@BeforeTest
public void initializeResponseNodes() throws IOException {
singleActivityResponseNode =
asJsonNode("org/openmhealth/shim/fitbit/mapper/fitbit-activities-date-single-in-activities-list.json");
multipleActivityResponseNode =
asJsonNode(
"org/openmhealth/shim/fitbit/mapper/fitbit-activities-date-multiple-in-activities-list.json");
}
@Test
public void asDataPointsShouldReturnEmptyListWhenResponseIsEmpty() throws IOException {
JsonNode emptyNode =
asJsonNode("org/openmhealth/shim/fitbit/mapper/fitbit-activities-date-empty-activities-list.json");
assertThat(mapper.asDataPoints(emptyNode), is(empty()));
}
@Test
public void asDataPointsShouldReturnCorrectNumberOfDataPoints() {
assertThat(mapper.asDataPoints(singleActivityResponseNode).size(), equalTo(1));
assertThat(mapper.asDataPoints(multipleActivityResponseNode).size(), equalTo(3));
}
@Test
public void asDataPointsShouldReturnCorrectDataPointForSingleActivity() {
List<DataPoint<PhysicalActivity>> dataPoints = mapper.asDataPoints(singleActivityResponseNode);
assertThatDataPointMatches(dataPoints.get(0), "Walk", "2014-06-19", "09:00", 3.36, 3600000L, 79441095L, 128.0);
}
@Test
public void asDataPointsShouldReturnCorrectDataPointsForMultipleActivities() {
List<DataPoint<PhysicalActivity>> dataPoints = mapper.asDataPoints(multipleActivityResponseNode);
assertThatDataPointMatches(dataPoints.get(0), "Run", "2015-06-23", "11:55", 6.43738, 1440000L, 253202765L,
150.0);
assertThatDataPointMatches(dataPoints.get(1), "Swimming", "2015-06-23", "10:00", null, null, 253246706L, null);
assertThatDataPointMatches(dataPoints.get(2), "Walk", "2015-06-23", 6.43738, 253202766L, 200.0);
}
public void assertThatDataPointMatches(DataPoint<PhysicalActivity> dataPoint, String expectedActivityName,
String expectedEffectiveDate, Double expectedDistance, Long expectedExternalId,
Double expectedCaloriesBurned) {
OffsetDateTime expectedEffectiveStartDateTime =
OffsetDateTime.of(LocalDate.parse(expectedEffectiveDate).atStartOfDay(), UTC);
TimeFrame expectedTimeFrame = new TimeFrame(
TimeInterval.ofStartDateTimeAndDuration(expectedEffectiveStartDateTime, new DurationUnitValue(DAY, 1)));
assertThatDataPointMatches(dataPoint, expectedActivityName, expectedTimeFrame, expectedDistance,
expectedExternalId, expectedCaloriesBurned);
}
public void assertThatDataPointMatches(DataPoint<PhysicalActivity> dataPoint, String expectedActivityName,
TimeFrame expectedTimeFrame, Double expectedDistance, Long expectedExternalId,
Double expectedCaloriesBurned) {
PhysicalActivity.Builder expectedMeasureBuilder = new PhysicalActivity.Builder(expectedActivityName);
expectedMeasureBuilder.setEffectiveTimeFrame(expectedTimeFrame);
if (expectedDistance != null) {
expectedMeasureBuilder.setDistance(new LengthUnitValue(KILOMETER, expectedDistance));
}
if (expectedCaloriesBurned != null) {
expectedMeasureBuilder.setCaloriesBurned(new KcalUnitValue(KILOCALORIE, expectedCaloriesBurned));
}
PhysicalActivity expectedPhysicalActivity = expectedMeasureBuilder.build();
assertThat(dataPoint.getBody(), equalTo(expectedPhysicalActivity));
assertThat(dataPoint.getHeader().getBodySchemaId(), equalTo(PhysicalActivity.SCHEMA_ID));
assertThat(dataPoint.getHeader().getAcquisitionProvenance().getAdditionalProperties().get("external_id"),
equalTo(expectedExternalId));
assertThat(dataPoint.getHeader().getAcquisitionProvenance().getSourceName(),
equalTo(FitbitDataPointMapper.RESOURCE_API_SOURCE_NAME));
}
public void assertThatDataPointMatches(DataPoint<PhysicalActivity> dataPoint, String expectedActivityName,
String expectedEffectiveDate, String expectedEffectiveStartTime, Double expectedDistance,
Long expectedDurationInMs, Long expectedExternalId, Double expectedCaloriesBurned) {
OffsetDateTime expectedEffectiveStartDateTime = OffsetDateTime
.of(LocalDate.parse(expectedEffectiveDate), LocalTime.parse(expectedEffectiveStartTime), UTC);
TimeFrame expectedTimeFrame;
if (expectedDurationInMs != null) {
DurationUnitValue expectedDuration = new DurationUnitValue(MILLISECOND, expectedDurationInMs);
expectedTimeFrame = new TimeFrame(
TimeInterval.ofStartDateTimeAndDuration(expectedEffectiveStartDateTime, expectedDuration));
}
else {
expectedTimeFrame = new TimeFrame(expectedEffectiveStartDateTime);
}
assertThatDataPointMatches(dataPoint, expectedActivityName, expectedTimeFrame, expectedDistance,
expectedExternalId, expectedCaloriesBurned);
}
}
| 2,253 |
716 | <filename>unittest/python/bindings_inverse_dynamics_derivatives.py
import unittest
from test_case import PinocchioTestCase as TestCase
import pinocchio as pin
import numpy as np
class TestDeriavtives(TestCase):
def setUp(self):
self.model = pin.buildSampleModelHumanoidRandom()
self.data = self.model.createData()
qmax = np.full((self.model.nq,1),np.pi)
self.q = pin.randomConfiguration(self.model,-qmax,qmax)
self.v = np.random.rand((self.model.nv))
self.a = np.random.rand((self.model.nv))
self.fext = []
for _ in range(self.model.njoints):
self.fext.append(pin.Force.Random())
def test_rnea_derivatives(self):
res = pin.computeRNEADerivatives(self.model,self.data,self.q,self.v,self.a)
self.assertTrue(len(res) == 3)
data2 = self.model.createData()
pin.rnea(self.model,data2,self.q,self.v,self.a)
self.assertApprox(self.data.ddq,data2.ddq)
# With external forces
res = pin.computeRNEADerivatives(self.model,self.data,self.q,self.v,self.a,self.fext)
self.assertTrue(len(res) == 3)
pin.rnea(self.model,data2,self.q,self.v,self.a,self.fext)
self.assertApprox(self.data.ddq,data2.ddq)
def test_generalized_gravity_derivatives(self):
res = pin.computeGeneralizedGravityDerivatives(self.model,self.data,self.q)
data2 = self.model.createData()
ref,_,_ = pin.computeRNEADerivatives(self.model,data2,self.q,self.v*0,self.a*0)
self.assertApprox(res,ref)
def test_static_torque_derivatives(self):
res = pin.computeStaticTorqueDerivatives(self.model,self.data,self.q,self.fext)
data2 = self.model.createData()
ref,_,_ = pin.computeRNEADerivatives(self.model,data2,self.q,self.v*0,self.a*0,self.fext)
self.assertApprox(res,ref)
if __name__ == '__main__':
unittest.main()
| 881 |
9,861 | <reponame>offa/Catch2
// 311-Gen-CustomCapture.cpp
// Shows how to provide custom capture list to the generator expression
// Note that using variables inside generators is dangerous and should
// be done only if you know what you are doing, because the generators
// _WILL_ outlive the variables. Also, even if you know what you are
// doing, you should probably use GENERATE_COPY or GENERATE_REF macros
// instead. However, if your use case requires having a
// per-variable custom capture list, this example shows how to achieve
// that.
#include <catch2/catch_test_macros.hpp>
#include <catch2/generators/catch_generators_adapters.hpp>
#include <catch2/generators/catch_generators_random.hpp>
TEST_CASE("Generate random doubles across different ranges",
"[generator][example][advanced]") {
// Workaround for old libstdc++
using record = std::tuple<double, double>;
// Set up 3 ranges to generate numbers from
auto r1 = GENERATE(table<double, double>({
record{3, 4},
record{-4, -3},
record{10, 1000}
}));
auto r2(r1);
// This will take r1 by reference and r2 by value.
// Note that there are no advantages for doing so in this example,
// it is done only for expository purposes.
auto number = Catch::Generators::generate( "custom capture generator", CATCH_INTERNAL_LINEINFO,
[&r1, r2]{
using namespace Catch::Generators;
return makeGenerators(take(50, random(std::get<0>(r1), std::get<1>(r2))));
}
);
REQUIRE(std::abs(number) > 0);
}
// Compiling and running this file will result in 150 successful assertions
| 561 |
2,111 | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 <NAME> <<EMAIL>>
#include <stdio.h>
#include "main.h"
#include <unsupported/Eigen/NumericalDiff>
// Generic functor
template<typename _Scalar, int NX=Dynamic, int NY=Dynamic>
struct Functor
{
typedef _Scalar Scalar;
enum {
InputsAtCompileTime = NX,
ValuesAtCompileTime = NY
};
typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
int m_inputs, m_values;
Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
Functor(int inputs_, int values_) : m_inputs(inputs_), m_values(values_) {}
int inputs() const { return m_inputs; }
int values() const { return m_values; }
};
struct my_functor : Functor<double>
{
my_functor(void): Functor<double>(3,15) {}
int operator()(const VectorXd &x, VectorXd &fvec) const
{
double tmp1, tmp2, tmp3;
double y[15] = {1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1,
3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39};
for (int i = 0; i < values(); i++)
{
tmp1 = i+1;
tmp2 = 16 - i - 1;
tmp3 = (i>=8)? tmp2 : tmp1;
fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3));
}
return 0;
}
int actual_df(const VectorXd &x, MatrixXd &fjac) const
{
double tmp1, tmp2, tmp3, tmp4;
for (int i = 0; i < values(); i++)
{
tmp1 = i+1;
tmp2 = 16 - i - 1;
tmp3 = (i>=8)? tmp2 : tmp1;
tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4;
fjac(i,0) = -1;
fjac(i,1) = tmp1*tmp2/tmp4;
fjac(i,2) = tmp1*tmp3/tmp4;
}
return 0;
}
};
void test_forward()
{
VectorXd x(3);
MatrixXd jac(15,3);
MatrixXd actual_jac(15,3);
my_functor functor;
x << 0.082, 1.13, 2.35;
// real one
functor.actual_df(x, actual_jac);
// std::cout << actual_jac << std::endl << std::endl;
// using NumericalDiff
NumericalDiff<my_functor> numDiff(functor);
numDiff.df(x, jac);
// std::cout << jac << std::endl;
VERIFY_IS_APPROX(jac, actual_jac);
}
void test_central()
{
VectorXd x(3);
MatrixXd jac(15,3);
MatrixXd actual_jac(15,3);
my_functor functor;
x << 0.082, 1.13, 2.35;
// real one
functor.actual_df(x, actual_jac);
// using NumericalDiff
NumericalDiff<my_functor,Central> numDiff(functor);
numDiff.df(x, jac);
VERIFY_IS_APPROX(jac, actual_jac);
}
EIGEN_DECLARE_TEST(NumericalDiff)
{
CALL_SUBTEST(test_forward());
CALL_SUBTEST(test_central());
}
| 1,439 |
382 | <filename>src/org/swiftjava/javax_swing/CaretListenerProxy.java
/// generated by: genswift.java 'java/lang|java/util|java/sql|java/awt|javax/swing' ///
/// interface javax.swing.event.CaretListener ///
package org.swiftjava.javax_swing;
@SuppressWarnings("JniMissingFunction")
public class CaretListenerProxy implements javax.swing.event.CaretListener {
// address of proxy object
long __swiftObject;
CaretListenerProxy( long __swiftObject ) {
this.__swiftObject = __swiftObject;
}
/// public abstract void javax.swing.event.CaretListener.caretUpdate(javax.swing.event.CaretEvent)
public native void __caretUpdate( long __swiftObject, javax.swing.event.CaretEvent e );
public void caretUpdate( javax.swing.event.CaretEvent e ) {
__caretUpdate( __swiftObject, e );
}
public native void __finalize( long __swiftObject );
public void finalize() {
__finalize( __swiftObject );
}
}
| 354 |
5,168 | <filename>imperative/python/megengine/random/__init__.py
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from .rng import RNG, beta, gamma, normal, permutation, poisson, seed, shuffle, uniform
__all__ = [
"RNG",
"beta",
"gamma",
"normal",
"permutation",
"poisson",
"seed",
"uniform",
"shuffle",
]
# pylint: disable=undefined-variable
del rng # type: ignore[name-defined]
| 258 |
580 | <gh_stars>100-1000
from icevision.models.mmdet.models.retinanet import backbones
from icevision.models.mmdet.common.bbox.single_stage import *
from icevision.models.interpretation import Interpretation, _move_to_device
from icevision.models.mmdet.common.interpretation_utils import (
sum_losses_mmdet,
loop_mmdet,
)
_LOSSES_DICT = {
"loss_cls": [],
"loss_bbox": [],
"loss_total": [],
}
interp = Interpretation(
losses_dict=_LOSSES_DICT,
valid_dl=valid_dl,
infer_dl=infer_dl,
predict_from_dl=predict_from_dl,
)
interp._loop = loop_mmdet
| 235 |
1,038 | <reponame>vickyi/data-algorithms-book<gh_stars>1000+
/**
* This package (org.dataalgorithms.chap09) contains source code for
* chapter 09 of the Data Algorithms book published by O'Reilly.
*
* @author <NAME>
*
*/
package org.dataalgorithms.chap09;
//rest of the file is empty
| 99 |
310 | <filename>gear/hardware/a/alpha-7r.json
{
"name": "Alpha 7R",
"description": "A 36.4 megapixel full-frame digital camera.",
"url": "http://store.sony.com/a7r-alpha-7r-interchangeable-lens-camera-zid27-ILCE7R/B/cat-27-catid-All-Alpha-NEX-Cameras"
} | 110 |
672 | /*
* Copyright (c) 2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <kern/debug.h>
#include <kern/kalloc.h>
#include <sys/param.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <libkern/libkern.h>
#include <kern/assert.h>
#include <kern/kpc.h>
#include <sys/ktrace.h>
#include <pexpert/pexpert.h>
#include <kperf/kperf.h>
/* Various sysctl requests */
#define REQ_CLASSES (1)
#define REQ_COUNTING (2)
#define REQ_THREAD_COUNTING (3)
#define REQ_CONFIG_COUNT (4)
#define REQ_COUNTER_COUNT (5)
#define REQ_THREAD_COUNTERS (6)
#define REQ_COUNTERS (7)
#define REQ_SHADOW_COUNTERS (8)
#define REQ_CONFIG (9)
#define REQ_PERIOD (10)
#define REQ_ACTIONID (11)
#define REQ_SW_INC (14)
#define REQ_PMU_VERSION (15)
/* Type-munging casts */
typedef int (*getint_t)(void);
typedef int (*setint_t)(int);
static int kpc_initted = 0;
static lck_grp_attr_t *sysctl_lckgrp_attr = NULL;
static lck_grp_t *sysctl_lckgrp = NULL;
static lck_mtx_t sysctl_lock;
/*
* Another element is needed to hold the CPU number when getting counter values.
*/
#define KPC_MAX_BUF_LEN (KPC_MAX_COUNTERS_COPIED + 1)
typedef int (*setget_func_t)(int);
void
kpc_init(void)
{
sysctl_lckgrp_attr = lck_grp_attr_alloc_init();
sysctl_lckgrp = lck_grp_alloc_init("kpc", sysctl_lckgrp_attr);
lck_mtx_init(&sysctl_lock, sysctl_lckgrp, LCK_ATTR_NULL);
kpc_arch_init();
kpc_common_init();
kpc_thread_init();
kpc_initted = 1;
}
static uint64_t *
kpc_get_bigarray(uint32_t *size_out)
{
static uint64_t *bigarray = NULL;
LCK_MTX_ASSERT(&sysctl_lock, LCK_MTX_ASSERT_OWNED);
uint32_t size = kpc_get_counterbuf_size() + sizeof(uint64_t);
*size_out = size;
if (bigarray) {
return bigarray;
}
/*
* Another element is needed to hold the CPU number when getting counter
* values.
*/
bigarray = kalloc_tag(size, VM_KERN_MEMORY_DIAG);
assert(bigarray != NULL);
return bigarray;
}
/* abstract sysctl handlers */
static int
sysctl_get_int( struct sysctl_oid *oidp, struct sysctl_req *req,
uint32_t value )
{
int error = 0;
/* copy out the old value */
error = sysctl_handle_int(oidp, &value, 0, req);
return error;
}
static int
sysctl_set_int( struct sysctl_req *req, int (*set_func)(int))
{
int error = 0;
int value = 0;
error = SYSCTL_IN( req, &value, sizeof(value) );
if( error )
return error;
error = set_func( value );
return error;
}
static int
sysctl_getset_int( struct sysctl_oid *oidp, struct sysctl_req *req,
int (*get_func)(void), int (*set_func)(int) )
{
int error = 0;
uint32_t value = 0;
/* get the old value and process it */
value = get_func();
/* copy out the old value, get the new value */
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || !req->newptr)
return (error);
/* if that worked, and we're writing... */
error = set_func( value );
return error;
}
static int
sysctl_setget_int( struct sysctl_req *req,
int (*setget_func)(int) )
{
int error = 0;
int value = 0;
error = SYSCTL_IN( req, &value, sizeof(value) );
if( error )
return error;
value = setget_func(value);
error = SYSCTL_OUT( req, &value, sizeof(value) );
return error;
}
static int
sysctl_kpc_get_counters(uint32_t counters,
uint32_t *size, void *buf)
{
uint64_t *ctr_buf = (uint64_t*)buf;
int curcpu;
uint32_t count;
count = kpc_get_cpu_counters(counters & KPC_ALL_CPUS,
counters,
&curcpu, &ctr_buf[1]);
if (!count)
return EINVAL;
ctr_buf[0] = curcpu;
*size = (count+1) * sizeof(uint64_t);
return 0;
}
static int
sysctl_kpc_get_shadow_counters(uint32_t counters,
uint32_t *size, void *buf)
{
uint64_t *ctr_buf = (uint64_t*)buf;
int curcpu;
uint32_t count;
count = kpc_get_shadow_counters(counters & KPC_ALL_CPUS,
counters,
&curcpu, &ctr_buf[1]);
if (!count)
return EINVAL;
ctr_buf[0] = curcpu;
*size = (count+1) * sizeof(uint64_t);
return 0;
}
static int
sysctl_kpc_get_thread_counters(uint32_t tid,
uint32_t *size, void *buf)
{
uint32_t count = *size / sizeof(uint64_t);
int r;
if( tid != 0 )
return EINVAL;
r = kpc_get_curthread_counters(&count, buf);
if( !r )
*size = count * sizeof(uint64_t);
return r;
}
static int
sysctl_kpc_get_config(uint32_t classes, void* buf)
{
return kpc_get_config( classes, buf );
}
static int
sysctl_kpc_set_config(uint32_t classes, void* buf)
{
/* userspace cannot reconfigure the power class */
if (classes & KPC_CLASS_POWER_MASK)
return (EPERM);
return kpc_set_config( classes, buf);
}
static int
sysctl_kpc_get_period(uint32_t classes, void* buf)
{
return kpc_get_period( classes, buf );
}
static int
sysctl_kpc_set_period(uint32_t classes, void* buf)
{
/* userspace cannot reconfigure the power class */
if (classes & KPC_CLASS_POWER_MASK)
return (EPERM);
return kpc_set_period( classes, buf);
}
static int
sysctl_kpc_get_actionid(uint32_t classes, void* buf)
{
return kpc_get_actionid( classes, buf );
}
static int
sysctl_kpc_set_actionid(uint32_t classes, void* buf)
{
return kpc_set_actionid( classes, buf);
}
static int
sysctl_get_bigarray(struct sysctl_req *req,
int (*get_fn)(uint32_t, uint32_t*, void*))
{
uint32_t bufsize = 0;
uint64_t *buf = kpc_get_bigarray(&bufsize);
uint32_t arg = 0;
/* get the argument */
int error = SYSCTL_IN(req, &arg, sizeof(arg));
if (error) {
return error;
}
error = get_fn(arg, &bufsize, buf);
if (!error) {
error = SYSCTL_OUT(req, buf, bufsize);
}
return error;
}
/* given a config word, how many bytes does it take? */
static int
sysctl_config_size( uint32_t config )
{
return kpc_get_config_count(config) * sizeof(kpc_config_t);
}
static int
sysctl_counter_size( uint32_t classes )
{
return kpc_get_counter_count(classes) * sizeof(uint64_t);
}
static int
sysctl_actionid_size( uint32_t classes )
{
return kpc_get_counter_count(classes) * sizeof(int32_t);
}
static int
sysctl_getset_bigarray(struct sysctl_req *req, int (*size_fn)(uint32_t arg),
int (*get_fn)(uint32_t, void*), int (*set_fn)(uint32_t, void*))
{
int error = 0;
uint64_t arg;
uint32_t bufsize = 0;
uint64_t *buf = kpc_get_bigarray(&bufsize);
/* get the config word */
error = SYSCTL_IN(req, &arg, sizeof(arg));
if (error) {
return error;
}
/* Determine the size of registers to modify. */
uint32_t regsize = size_fn((uint32_t)arg);
if (regsize == 0 || regsize > bufsize) {
return EINVAL;
}
/* if writing */
if (req->newptr) {
/* copy the rest -- SYSCTL_IN knows the copyin should be shifted */
error = SYSCTL_IN(req, buf, regsize);
/* SYSCTL_IN failure means only need to read */
if (!error) {
error = set_fn((uint32_t)arg, buf);
if (error) {
return error;
}
}
}
/* if reading */
if (req->oldptr) {
error = get_fn((uint32_t)arg, buf);
if (error) {
return error;
}
error = SYSCTL_OUT(req, buf, regsize);
}
return error;
}
static int
kpc_sysctl SYSCTL_HANDLER_ARGS
{
int ret;
// __unused struct sysctl_oid *unused_oidp = oidp;
(void)arg2;
if (!kpc_initted) {
panic("kpc_init not called");
}
if (!kpc_supported) {
return ENOTSUP;
}
ktrace_lock();
// Most sysctls require an access check, but a few are public.
switch( (uintptr_t) arg1 ) {
case REQ_CLASSES:
case REQ_CONFIG_COUNT:
case REQ_COUNTER_COUNT:
// These read-only sysctls are public.
break;
default:
// Require kperf access to read or write anything else.
// This is either root or the blessed pid.
if ((ret = ktrace_read_check())) {
ktrace_unlock();
return ret;
}
break;
}
ktrace_unlock();
lck_mtx_lock(&sysctl_lock);
/* which request */
switch( (uintptr_t) arg1 )
{
case REQ_CLASSES:
ret = sysctl_get_int( oidp, req,
kpc_get_classes() );
break;
case REQ_COUNTING:
ret = sysctl_getset_int( oidp, req,
(getint_t)kpc_get_running,
(setint_t)kpc_set_running );
break;
case REQ_THREAD_COUNTING:
ret = sysctl_getset_int( oidp, req,
(getint_t)kpc_get_thread_counting,
(setint_t)kpc_set_thread_counting );
break;
case REQ_CONFIG_COUNT:
ret = sysctl_setget_int( req,
(setget_func_t)kpc_get_config_count );
break;
case REQ_COUNTER_COUNT:
ret = sysctl_setget_int( req,
(setget_func_t)kpc_get_counter_count );
break;
case REQ_THREAD_COUNTERS:
ret = sysctl_get_bigarray( req, sysctl_kpc_get_thread_counters );
break;
case REQ_COUNTERS:
ret = sysctl_get_bigarray( req, sysctl_kpc_get_counters );
break;
case REQ_SHADOW_COUNTERS:
ret = sysctl_get_bigarray( req, sysctl_kpc_get_shadow_counters );
break;
case REQ_CONFIG:
ret = sysctl_getset_bigarray( req,
sysctl_config_size,
sysctl_kpc_get_config,
sysctl_kpc_set_config );
break;
case REQ_PERIOD:
ret = sysctl_getset_bigarray( req,
sysctl_counter_size,
sysctl_kpc_get_period,
sysctl_kpc_set_period );
break;
case REQ_ACTIONID:
ret = sysctl_getset_bigarray( req,
sysctl_actionid_size,
sysctl_kpc_get_actionid,
sysctl_kpc_set_actionid );
break;
case REQ_SW_INC:
ret = sysctl_set_int( req, (setget_func_t)kpc_set_sw_inc );
break;
case REQ_PMU_VERSION:
ret = sysctl_get_int(oidp, req, kpc_get_pmu_version());
break;
default:
ret = ENOENT;
break;
}
lck_mtx_unlock(&sysctl_lock);
return ret;
}
/*** sysctl definitions ***/
/* root kperf node */
SYSCTL_NODE(, OID_AUTO, kpc, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
"kpc");
/* values */
SYSCTL_PROC(_kpc, OID_AUTO, classes,
CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_CLASSES,
sizeof(int), kpc_sysctl, "I", "Available classes");
SYSCTL_PROC(_kpc, OID_AUTO, counting,
CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_COUNTING,
sizeof(int), kpc_sysctl, "I", "PMCs counting");
SYSCTL_PROC(_kpc, OID_AUTO, thread_counting,
CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_THREAD_COUNTING,
sizeof(int), kpc_sysctl, "I", "Thread accumulation");
SYSCTL_PROC(_kpc, OID_AUTO, pmu_version,
CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void *)REQ_PMU_VERSION,
sizeof(int), kpc_sysctl, "I", "PMU version for hardware");
/* faux values */
SYSCTL_PROC(_kpc, OID_AUTO, config_count,
CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_CONFIG_COUNT,
sizeof(int), kpc_sysctl, "S", "Config count");
SYSCTL_PROC(_kpc, OID_AUTO, counter_count,
CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_COUNTER_COUNT,
sizeof(int), kpc_sysctl, "S", "Counter count");
SYSCTL_PROC(_kpc, OID_AUTO, sw_inc,
CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_SW_INC,
sizeof(int), kpc_sysctl, "S", "Software increment");
/* arrays */
SYSCTL_PROC(_kpc, OID_AUTO, thread_counters,
CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_THREAD_COUNTERS,
sizeof(uint64_t), kpc_sysctl,
"QU", "Current thread counters");
SYSCTL_PROC(_kpc, OID_AUTO, counters,
CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_COUNTERS,
sizeof(uint64_t), kpc_sysctl,
"QU", "Current counters");
SYSCTL_PROC(_kpc, OID_AUTO, shadow_counters,
CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_SHADOW_COUNTERS,
sizeof(uint64_t), kpc_sysctl,
"QU", "Current shadow counters");
SYSCTL_PROC(_kpc, OID_AUTO, config,
CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_CONFIG,
sizeof(uint64_t), kpc_sysctl,
"QU", "Set counter configs");
SYSCTL_PROC(_kpc, OID_AUTO, period,
CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_PERIOD,
sizeof(uint64_t), kpc_sysctl,
"QU", "Set counter periods");
SYSCTL_PROC(_kpc, OID_AUTO, actionid,
CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED,
(void*)REQ_ACTIONID,
sizeof(uint32_t), kpc_sysctl,
"QU", "Set counter actionids");
| 6,902 |
326 | <filename>src/main/java/org/araymond/joal/core/ttorrent/client/announcer/response/AnnounceResponseHandlerChain.java
package org.araymond.joal.core.ttorrent.client.announcer.response;
import com.turn.ttorrent.common.protocol.TrackerMessage.AnnounceRequestMessage.RequestEvent;
import org.araymond.joal.core.ttorrent.client.announcer.Announcer;
import org.araymond.joal.core.ttorrent.client.announcer.exceptions.TooMuchAnnouncesFailedInARawException;
import org.araymond.joal.core.ttorrent.client.announcer.request.SuccessAnnounceResponse;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.List;
import static org.slf4j.LoggerFactory.getLogger;
public class AnnounceResponseHandlerChain implements AnnounceResponseCallback {
private static final Logger logger = getLogger(AnnounceResponseHandlerChain.class);
private final List<AnnounceResponseHandlerChainElement> chainElements;
public AnnounceResponseHandlerChain() {
chainElements = new ArrayList<>(4);
}
public void appendHandler(final AnnounceResponseHandlerChainElement element) {
this.chainElements.add(element);
}
@Override
public void onAnnounceWillAnnounce(final RequestEvent event, final Announcer announcer) {
for (final AnnounceResponseHandlerChainElement element : chainElements) {
element.onAnnouncerWillAnnounce(announcer, event);
}
}
@Override
public void onAnnounceSuccess(final RequestEvent event, final Announcer announcer, final SuccessAnnounceResponse result) {
for (final AnnounceResponseHandlerChainElement element : chainElements) {
switch (event) {
case STARTED: {
element.onAnnounceStartSuccess(announcer, result);
break;
}
case NONE: {
element.onAnnounceRegularSuccess(announcer, result);
break;
}
case STOPPED: {
element.onAnnounceStopSuccess(announcer, result);
break;
}
default: {
logger.warn("Event {} cannot be handled by {}", event.getEventName(), getClass().getSimpleName());
break;
}
}
}
}
@Override
public void onAnnounceFailure(final RequestEvent event, final Announcer announcer, final Throwable throwable) {
for (final AnnounceResponseHandlerChainElement element : chainElements) {
switch (event) {
case STARTED: {
element.onAnnounceStartFails(announcer, throwable);
break;
}
case NONE: {
element.onAnnounceRegularFails(announcer, throwable);
break;
}
case STOPPED: {
element.onAnnounceStopFails(announcer, throwable);
break;
}
default: {
logger.warn("Event {} cannot be handled by {}", event.getEventName(), getClass().getSimpleName());
break;
}
}
}
}
@Override
public void onTooManyAnnounceFailedInARaw(final RequestEvent event, final Announcer announcer, final TooMuchAnnouncesFailedInARawException e) {
for (final AnnounceResponseHandlerChainElement chainElement : chainElements) {
chainElement.onTooManyAnnounceFailedInARaw(announcer, e);
}
}
}
| 1,658 |
652 | <reponame>pyparallel/pyparallel<filename>Lib/px/command.py<gh_stars>100-1000
#===============================================================================
# Imports
#===============================================================================
from ctk.commandinvariant import (
InvariantAwareCommand,
)
#===============================================================================
# Helpers
#===============================================================================
#===============================================================================
# Base Ronda Command
#===============================================================================
class PxCommand(InvariantAwareCommand):
pass
class TCPServerCommand(PxCommand):
server = None
protocol = None
class TCPClientCommand(PxCommand):
pass
# vim:set ts=8 sw=4 sts=4 tw=78 et:
| 181 |
1,563 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.error module."""
from .base import Base
class Error(Base):
"""Error response of LINE messaging API.
https://developers.line.biz/en/reference/messaging-api/#error-response
"""
def __init__(self, message=None, details=None, **kwargs):
"""__init__ method.
:param str message: Summary of the error
:param details: ErrorDetail instance list
:type details: list[T <= :py:class:`linebot.models.error.ErrorDetail`]
:param kwargs:
"""
super(Error, self).__init__(**kwargs)
self.message = message
new_details = []
if details:
for detail in details:
new_details.append(
self.get_or_new_from_json_dict(detail, ErrorDetail)
)
self.details = new_details
class ErrorDetail(Base):
"""ErrorDetail response of LINE messaging API."""
def __init__(self, message=None, property=None, **kwargs):
"""__init__ method.
:param str message: Details of the error message
:param str property: Related property
:param kwargs:
"""
super(ErrorDetail, self).__init__(**kwargs)
self.message = message
self.property = property
| 698 |
14,668 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_CRONET_IOS_TEST_CRONET_TEST_BASE_H_
#define COMPONENTS_CRONET_IOS_TEST_CRONET_TEST_BASE_H_
#include <Cronet/Cronet.h>
#include "base/bind.h"
#include "net/cert/cert_verifier.h"
#include "net/cert/x509_certificate.h"
#include "testing/gtest/include/gtest/gtest.h"
#pragma mark
namespace base {
class Location;
class SingleThreadTaskRunner;
class Thread;
}
namespace {
typedef void (^BlockType)(void);
} // namespace
// Exposes private test-only methods of the Cronet class.
@interface Cronet (ExposedForTesting)
+ (void)shutdownForTesting;
+ (void)setMockCertVerifierForTesting:
(std::unique_ptr<net::CertVerifier>)certVerifier;
+ (void)setEnablePublicKeyPinningBypassForLocalTrustAnchors:(BOOL)enable;
+ (base::SingleThreadTaskRunner*)getFileThreadRunnerForTesting;
+ (base::SingleThreadTaskRunner*)getNetworkThreadRunnerForTesting;
+ (size_t)getMetricsMapSize;
@end
// NSURLSessionDataDelegate delegate implementation used by the tests to
// wait for a response and check its status.
@interface TestDelegate : NSObject<NSURLSessionDataDelegate>
// Error the request this delegate is attached to failed with, if any.
@property(retain, atomic)
NSMutableDictionary<NSURLSessionTask*, NSError*>* errorPerTask;
// Contains total amount of received data.
@property(readonly) NSMutableDictionary<NSURLSessionDataTask*, NSNumber*>*
totalBytesReceivedPerTask;
// Contains the expected amount of received data.
@property(readonly) NSMutableDictionary<NSURLSessionDataTask*, NSNumber*>*
expectedContentLengthPerTask;
// Contains metrics data.
@property(readonly) NSURLSessionTaskMetrics* taskMetrics NS_AVAILABLE_IOS(10.0);
// Contains NSHTTPURLResponses for the tasks.
@property(readonly)
NSMutableDictionary<NSURLSessionDataTask*, NSHTTPURLResponse*>*
responsePerTask;
// Resets the delegate, so it can be used again for another request.
- (void)reset;
// Contains the response body.
- (NSString*)responseBody:(NSURLSessionDataTask*)task;
/// Waits for a single request to complete.
/// @return |NO| if the request didn't complete and the method timed-out.
- (BOOL)waitForDone:(NSURLSessionDataTask*)task
withTimeout:(int64_t)deadline_ns;
// Convenience functions for single-task delegates
- (NSError*)error;
- (long)totalBytesReceived;
- (long)expectedContentLength;
- (NSString*)responseBody;
@end
// Forward declaration.
namespace net {
class MockCertVerifier;
}
namespace cronet {
// A base class that should be extended by all other Cronet tests.
// The class automatically starts and stops the test QUIC server.
class CronetTestBase : public ::testing::Test {
protected:
static bool CalculatePublicKeySha256(const net::X509Certificate& cert,
net::HashValue* out_hash_value);
void SetUp() override;
void TearDown() override;
bool StartDataTaskAndWaitForCompletion(NSURLSessionDataTask* task,
int64_t deadline_ns = 15 *
NSEC_PER_SEC);
std::unique_ptr<net::MockCertVerifier> CreateMockCertVerifier(
const std::vector<std::string>& certs,
bool known_root);
void PostBlockToFileThread(const base::Location& from_here, BlockType block);
void PostBlockToNetworkThread(const base::Location& from_here,
BlockType block);
::testing::AssertionResult IsResponseSuccessful(NSURLSessionDataTask* task);
::testing::AssertionResult IsResponseCanceled(NSURLSessionDataTask* task);
TestDelegate* delegate_;
private:
void ExecuteBlock(BlockType block);
}; // class CronetTestBase
} // namespace cronet
#endif // COMPONENTS_CRONET_IOS_TEST_CRONET_TEST_BASE_H_
| 1,395 |
342 | package com.alibaba.chaosblade.exec.plugin.lettuce;
/**
* @author yefei
* @create 2020-11-23 14:58
*/
public class LettuceConstants {
public final static String KEY = "key";
public final static String VALUE = "value";
public final static String CMD = "cmd";
public final static String CLASS = "io.lettuce.core.protocol.CommandHandler";
public final static String METHOD = "write";
}
| 133 |
435 | <filename>web-services/map-reduce-embedded/src/main/java/datawave/security/authorization/EmbeddedDatawaveUserService.java
package datawave.security.authorization;
import javax.annotation.Priority;
import javax.enterprise.inject.Alternative;
import javax.interceptor.Interceptor;
import java.util.Collection;
/**
* Lookup bean implementation supplied just for Embedded mode (e.g., inside of MapReduce jars). This archive should not be included for normal web applications.
*/
@Alternative
@Priority(Interceptor.Priority.APPLICATION)
public class EmbeddedDatawaveUserService implements DatawaveUserService {
@Override
public Collection<DatawaveUser> lookup(Collection<SubjectIssuerDNPair> dns) throws AuthorizationException {
throw new UnsupportedOperationException("Not supported in embedded mode.");
}
}
| 238 |
310 | <filename>gear/hardware/c/classic-grain-mill.json
{
"name": "Classic Grain Mill",
"description": "A flour grinder.",
"url": "https://pleasanthillgrain.com/komo-classic-grain-mill-flour-grinder-wood-stone"
}
| 78 |
6,342 | <filename>djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/keycloak/urls.py
# -*- coding: utf-8 -*-
from allauth.socialaccount.providers.keycloak.provider import KeycloakProvider
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
urlpatterns = default_urlpatterns(KeycloakProvider)
| 112 |
5,169 | {
"name": "CTidy",
"version": "0.1.0",
"license": "Simplified BSD License",
"summary": "libtidy Objective-C wrapper.",
"homepage": "https://github.com/siuying/CTidy",
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/siuying/CTidy.git",
"tag": "0.1.0"
},
"source_files": "CTidy/CTidy.{h,m}",
"libraries": "tidy",
"xcconfig": {
"HEADER_SEARCH_PATHS": "/usr/include/tidy"
},
"requires_arc": true
}
| 212 |
403 | package org.camunda.hackdays2016.BanMadInkProcesses;
import java.util.HashMap;
import java.util.Map;
import org.camunda.bpm.engine.delegate.BpmnError;
import org.camunda.bpm.engine.delegate.DelegateExecution;
import org.camunda.bpm.engine.delegate.JavaDelegate;
import org.camunda.bpm.engine.runtime.Execution;
import org.camunda.bpm.engine.task.Task;
public class CompleteDecideOnFurtherFraudCheckDelegate implements JavaDelegate {
@Override
public void execute(DelegateExecution execution) throws Exception {
Map<String, Object> vars = new HashMap<String, Object>();
String busKey = (String) execution.getVariable("busKey");
RandomGeneratorUtil randomUtil = new RandomGeneratorUtil();
vars.put("audit", randomUtil.randomBoolean(60));
Execution processExectuion = execution.getProcessEngineServices().getRuntimeService().createExecutionQuery().processInstanceBusinessKey(busKey).singleResult();
int fraudNumber = (Integer) execution.getProcessEngineServices().getRuntimeService().getVariable(processExectuion.getId(), "fraudScore");
if(fraudNumber < 0){
vars.put("audit", randomUtil.randomBoolean(95));
}else{
vars.put("audit", randomUtil.randomBoolean(10));
}
Task currentTask = execution.getProcessEngineServices().getTaskService().createTaskQuery().processInstanceBusinessKey(busKey).singleResult();
execution.getProcessEngineServices().getTaskService().complete(currentTask.getId(), vars);
}
}
| 486 |
1,007 | <reponame>AkshayJainG/veles
/*! @file engine.cc
* @brief Class which is responsible for scheduling unit runs.
* @author <NAME> <<EMAIL>>
* @version 1.0
*
* @section Notes
* This code partially conforms to <a href="http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml">Google C++ Style Guide</a>.
*
* @section Copyright
* Copyright © 2015 Samsung R&D Institute Russia
*
* @section License
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "src/engine.h"
#include "src/thread_pool.h"
#include "inc/veles/engine.h"
namespace veles {
void Engine::Finish() {
assert(this && "Engine is nullptr");
for (auto& cb : callbacks_) {
cb.second();
}
}
int Engine::RegisterOnFinish(const Callable& callback) noexcept {
assert(this && "Engine is nullptr");
callbacks_[counter_] = callback;
return counter_++;
}
bool Engine::UnregisterOnFinish(int key) noexcept {
auto it = callbacks_.find(key);
if (it == callbacks_.end()) {
return false;
}
callbacks_.erase(it);
return true;
}
namespace internal {
ThreadPoolEngine::ThreadPoolEngine(size_t threads_number)
: pool_(new ThreadPool(threads_number)) {
DBG("Launched a thread pool with %d threads",
static_cast<int>(threads_number));
}
void ThreadPoolEngine::Schedule(const Callable& callable) {
pool_->enqueue(callable);
}
} // namespace internal
std::shared_ptr<Engine> GetEngine() {
return std::make_shared<internal::ThreadPoolEngine>(2);
}
} // namespace veles
| 707 |
1,112 | package com.survivingwithandroid.weathermap;
import android.support.v4.app.FragmentActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.Spinner;
import com.google.android.gms.maps.GoogleMap;
import com.google.android.gms.maps.SupportMapFragment;
import com.google.android.gms.maps.model.LatLng;
import com.google.android.gms.maps.model.MarkerOptions;
import com.google.android.gms.maps.model.TileOverlay;
import com.google.android.gms.maps.model.TileOverlayOptions;
import com.google.android.gms.maps.model.TileProvider;
import com.google.android.gms.maps.model.UrlTileProvider;
import java.net.MalformedURLException;
import java.net.URL;
public class MapsActivity extends FragmentActivity {
private GoogleMap mMap; // Might be null if Google Play services APK is not available.
private static String OWM_TILE_URL = "http://tile.openweathermap.org/map/%s/%d/%d/%d.png";
private Spinner spinner;
private String tileType = "clouds";
private TileOverlay tileOver;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_maps);
spinner = (Spinner) findViewById(R.id.tileType);
String[] tileName = new String[]{"Clouds", "Temperature", "Precipitations", "Snow", "Rain", "Wind", "Sea level press."};
ArrayAdapter<String> adpt = new ArrayAdapter<String>(this, android.R.layout.simple_spinner_item, tileName);
spinner.setAdapter(adpt);
spinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onNothingSelected(AdapterView<?> parent) {
}
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
// Check click
switch (position) {
case 0:
tileType = "clouds";
break;
case 1:
tileType = "temp";
break;
case 2:
tileType = "precipitation";
break;
case 3:
tileType = "snow";
break;
case 4:
tileType = "rain";
break;
case 5:
tileType = "wind";
break;
case 6:
tileType = "pressure";
break;
}
if (mMap != null) {
tileOver.remove();
setUpMap();
}
}
});
setUpMapIfNeeded();
}
@Override
protected void onResume() {
super.onResume();
setUpMapIfNeeded();
}
/**
* Sets up the map if it is possible to do so (i.e., the Google Play services APK is correctly
* installed) and the map has not already been instantiated.. This will ensure that we only ever
* call {@link #setUpMap()} once when {@link #mMap} is not null.
* <p/>
* If it isn't installed {@link SupportMapFragment} (and
* {@link com.google.android.gms.maps.MapView MapView}) will show a prompt for the user to
* install/update the Google Play services APK on their device.
* <p/>
* A user can return to this FragmentActivity after following the prompt and correctly
* installing/updating/enabling the Google Play services. Since the FragmentActivity may not
* have been completely destroyed during this process (it is likely that it would only be
* stopped or paused), {@link #onCreate(Bundle)} may not be called again so we should call this
* method in {@link #onResume()} to guarantee that it will be called.
*/
private void setUpMapIfNeeded() {
// Do a null check to confirm that we have not already instantiated the map.
if (mMap == null) {
// Try to obtain the map from the SupportMapFragment.
mMap = ((SupportMapFragment) getSupportFragmentManager().findFragmentById(R.id.map))
.getMap();
// Check if we were successful in obtaining the map.
if (mMap != null) {
setUpMap();
}
}
}
/**
* This is where we can add markers or lines, add listeners or move the camera. In this case, we
* just add a marker near Africa.
* <p/>
* This should only be called once and when we are sure that {@link #mMap} is not null.
*/
private void setUpMap() {
// Add weather tile
//tileOver = mMap.addTileOverlay(new TileOverlayOptions().tileProvider(createTilePovider()));
tileOver = mMap.addTileOverlay(new TileOverlayOptions().tileProvider(createTransparentTileProvider()));
}
private TileProvider createTransparentTileProvider() {
return new TransparentTileOWM(tileType);
}
private TileProvider createTilePovider() {
TileProvider tileProvider = new UrlTileProvider(256, 256) {
@Override
public URL getTileUrl(int x, int y, int zoom) {
String fUrl = String.format(OWM_TILE_URL, tileType == null ? "clouds" : tileType, zoom, x, y);
URL url = null;
try {
url = new URL(fUrl);
}
catch(MalformedURLException mfe) {
mfe.printStackTrace();
}
return url;
}
} ;
return tileProvider;
}
}
| 2,588 |
854 | __________________________________________________________________________________________________
sample 4 ms submission
class Solution {
private:
int dfs(vector<int>& dp, int target, vector<int>& nums) {
if (target == 0)
return 1;
if (dp[target] != -1)
return dp[target];
int sum = 0;
for (auto num : nums) {
if (target >= num) {
sum += dfs(dp, target - num, nums);
}
}
return dp[target] = sum;
}
public:
int combinationSum4(vector<int>& nums, int target) {
vector<int> dp(target + 1, -1);
return dfs(dp, target, nums);
}
};
__________________________________________________________________________________________________
sample 8372 kb submission
class Solution {
public:
int combinationSum4(vector<int>& nums, int target) {
unsigned long long dp[target + 1];
memset(dp, 0, sizeof(dp));
dp[0] = 1;
for(int i = 1; i <= target; i++) {
for(int j = 0; j < nums.size(); j++) {
if(nums[j] <= i)
dp[i] += dp[i - nums[j]];
}
}
return dp[target];
}
};
__________________________________________________________________________________________________
| 556 |
1,471 | # TODO: Create VyperWarning class similarly to what is being done with exceptinos?
class ContractSizeLimitWarning(Warning):
pass
| 35 |
777 | <reponame>google-ar/chromium<filename>components/offline_pages/core/background/scheduler_stub.cc
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/offline_pages/core/background/scheduler_stub.h"
namespace offline_pages {
SchedulerStub::SchedulerStub()
: schedule_called_(false),
backup_schedule_called_(false),
unschedule_called_(false),
schedule_delay_(0L),
conditions_(false, 0, false) {}
SchedulerStub::~SchedulerStub() {}
void SchedulerStub::Schedule(const TriggerConditions& trigger_conditions) {
schedule_called_ = true;
conditions_ = trigger_conditions;
}
void SchedulerStub::BackupSchedule(const TriggerConditions& trigger_conditions,
long delay_in_seconds) {
backup_schedule_called_ = true;
schedule_delay_ = delay_in_seconds;
conditions_ = trigger_conditions;
}
void SchedulerStub::Unschedule() {
unschedule_called_ = true;
}
} // namespace offline_pages
| 389 |
1,099 | package com.example.commonlibrary.mvp.presenter;
import com.example.commonlibrary.mvp.view.IView;
import com.example.commonlibrary.mvp.model.BaseModel;
import io.reactivex.disposables.CompositeDisposable;
import io.reactivex.disposables.Disposable;
public class BasePresenter<V extends IView, M extends BaseModel> implements IPresenter {
private CompositeDisposable mCompositeDisposable;
protected V iView;
protected M baseModel;
public M getBaseModel() {
return baseModel;
}
public BasePresenter(V iView, M baseModel) {
this.iView = iView;
this.baseModel = baseModel;
}
@Override
public void onDestroy() {
unDispose();
if (iView != null) {
iView = null;
}
if (baseModel != null) {
baseModel.onDestroy();
}
this.mCompositeDisposable = null;
}
protected void addDispose(Disposable disposable) {
if (mCompositeDisposable == null) {
mCompositeDisposable = new CompositeDisposable();
}
mCompositeDisposable.add(disposable);//将所有disposable放入,集中处理
}
public CompositeDisposable getCompositeDisposable() {
if (mCompositeDisposable == null) {
mCompositeDisposable = new CompositeDisposable();
}
return mCompositeDisposable;
}
public void unDispose() {
if (mCompositeDisposable != null) {
if (!mCompositeDisposable.isDisposed()) {
mCompositeDisposable.dispose();
}
mCompositeDisposable.clear();//保证activity结束时取消所有正在执行的订阅
}
}
}
| 758 |
758 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from collections.abc import Iterator
from collections import OrderedDict
from io import BytesIO
import numpy as np
import numba as nb
import toolz as tz
import xarray as xr
import dask.array as da
from PIL.Image import fromarray
from datashader.colors import rgb, Sets1to3
from datashader.composite import composite_op_lookup, over, validate_operator
from datashader.utils import nansum_missing, ngjit, orient_array
try:
import cupy
except Exception:
cupy = None
__all__ = ['Image', 'stack', 'shade', 'set_background', 'spread', 'dynspread']
class Image(xr.DataArray):
__slots__ = ()
__array_priority__ = 70
border=1
def to_pil(self, origin='lower'):
data = self.data
if cupy:
data = cupy.asnumpy(data)
arr = np.flipud(data) if origin == 'lower' else data
return fromarray(arr, 'RGBA')
def to_bytesio(self, format='png', origin='lower'):
fp = BytesIO()
self.to_pil(origin).save(fp, format)
fp.seek(0)
return fp
def _repr_png_(self):
"""Supports rich PNG display in a Jupyter notebook"""
return self.to_pil()._repr_png_()
def _repr_html_(self):
"""Supports rich HTML display in a Jupyter notebook"""
# imported here to avoid depending on these packages unless actually used
from io import BytesIO
from base64 import b64encode
b = BytesIO()
self.to_pil().save(b, format='png')
h = """<img style="margin: auto; border:""" + str(self.border) + """px solid" """ + \
"""src='data:image/png;base64,{0}'/>""".\
format(b64encode(b.getvalue()).decode('utf-8'))
return h
class Images(object):
"""
A list of HTML-representable objects to display in a table.
Primarily intended for Image objects, but could be anything
that has _repr_html_.
"""
def __init__(self, *images):
"""Makes an HTML table from a list of HTML-representable arguments."""
for i in images:
assert hasattr(i,"_repr_html_")
self.images = images
self.num_cols = None
def cols(self,n):
"""
Set the number of columns to use in the HTML table.
Returns self for convenience.
"""
self.num_cols=n
return self
def _repr_html_(self):
"""Supports rich display in a Jupyter notebook, using an HTML table"""
htmls = []
col=0
tr="""<tr style="background-color:white">"""
for i in self.images:
label=i.name if hasattr(i,"name") and i.name is not None else ""
htmls.append("""<td style="text-align: center"><b>""" + label +
"""</b><br><br>{0}</td>""".format(i._repr_html_()))
col+=1
if self.num_cols is not None and col>=self.num_cols:
col=0
htmls.append("</tr>"+tr)
return """<table style="width:100%; text-align: center"><tbody>"""+ tr +\
"".join(htmls) + """</tr></tbody></table>"""
def stack(*imgs, **kwargs):
"""Combine images together, overlaying later images onto earlier ones.
Parameters
----------
imgs : iterable of Image
The images to combine.
how : str, optional
The compositing operator to combine pixels. Default is `'over'`.
"""
if not imgs:
raise ValueError("No images passed in")
shapes = []
for i in imgs:
if not isinstance(i, Image):
raise TypeError("Expected `Image`, got: `{0}`".format(type(i)))
elif not shapes:
shapes.append(i.shape)
elif shapes and i.shape not in shapes:
raise ValueError("The stacked images must have the same shape.")
name = kwargs.get('name', None)
op = composite_op_lookup[kwargs.get('how', 'over')]
if len(imgs) == 1:
return imgs[0]
imgs = xr.align(*imgs, copy=False, join='outer')
with np.errstate(divide='ignore', invalid='ignore'):
out = tz.reduce(tz.flip(op), [i.data for i in imgs])
return Image(out, coords=imgs[0].coords, dims=imgs[0].dims, name=name)
def eq_hist(data, mask=None, nbins=256*256):
"""Return a numpy array after histogram equalization.
For use in `shade`.
Parameters
----------
data : ndarray
mask : ndarray, optional
Boolean array of missing points. Where True, the output will be `NaN`.
nbins : int, optional
Maximum number of bins to use. If data is of type boolean or integer
this will determine when to switch from exact unique value counts to
a binned histogram.
Notes
-----
This function is adapted from the implementation in scikit-image [1]_.
References
----------
.. [1] http://scikit-image.org/docs/stable/api/skimage.exposure.html#equalize-hist
"""
if cupy and isinstance(data, cupy.ndarray):
from._cuda_utils import interp
elif not isinstance(data, np.ndarray):
raise TypeError("data must be an ndarray")
else:
interp = np.interp
data2 = data if mask is None else data[~mask]
# Run more accurate value counting if data is of boolean or integer type
# and unique value array is smaller than nbins.
if data2.dtype == bool or (np.issubdtype(data2.dtype, np.integer) and data2.ptp() < nbins):
values, counts = np.unique(data2, return_counts=True)
vmin, vmax = values[0], values[-1]
interval = vmax-vmin
bin_centers = np.arange(vmin, vmax+1)
hist = np.zeros(interval+1, dtype='uint64')
hist[values-vmin] = counts
discrete_levels = len(values)
else:
hist, bin_edges = np.histogram(data2, bins=nbins)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
discrete_levels = None
cdf = hist.cumsum()
cdf = cdf / float(cdf[-1])
out = interp(data, bin_centers, cdf).reshape(data.shape)
return out if mask is None else np.where(mask, np.nan, out), discrete_levels
_interpolate_lookup = {'log': lambda d, m: np.log1p(np.where(m, np.nan, d)),
'cbrt': lambda d, m: np.where(m, np.nan, d)**(1/3.),
'linear': lambda d, m: np.where(m, np.nan, d),
'eq_hist': eq_hist}
def _normalize_interpolate_how(how):
if callable(how):
return how
elif how in _interpolate_lookup:
return _interpolate_lookup[how]
raise ValueError("Unknown interpolation method: {0}".format(how))
def _rescale_discrete_levels(discrete_levels, span):
if discrete_levels is None:
raise ValueError("interpolator did not return a valid discrete_levels")
# Straight line y = mx + c through (2, 1.5) and (100, 1) where
# x is number of discrete_levels and y is lower span limit.
m = -0.5/98.0 # (y[1] - y[0]) / (x[1] - x[0])
c = 1.5 - 2*m # y[0] - m*x[0]
multiple = m*discrete_levels + c
if multiple > 1:
lower_span = max(span[1] - multiple*(span[1] - span[0]), 0)
span = (lower_span, 1)
return span
def _interpolate(agg, cmap, how, alpha, span, min_alpha, name, rescale_discrete_levels):
if cupy and isinstance(agg.data, cupy.ndarray):
from ._cuda_utils import masked_clip_2d, interp
else:
from ._cpu_utils import masked_clip_2d
interp = np.interp
if agg.ndim != 2:
raise ValueError("agg must be 2D")
interpolater = _normalize_interpolate_how(how)
data = orient_array(agg)
if isinstance(data, da.Array):
data = data.compute()
else:
data = data.copy()
# Compute mask
if np.issubdtype(data.dtype, np.bool_):
mask = ~data
data = data.astype(np.int8)
else:
if data.dtype.kind == 'u':
mask = data == 0
else:
mask = np.isnan(data)
# Handle case where everything is masked out
if mask.all():
return Image(np.zeros(shape=agg.data.shape,
dtype=np.uint32), coords=agg.coords,
dims=agg.dims, attrs=agg.attrs, name=name)
# Handle offset / clip
if span is None:
offset = np.nanmin(data[~mask])
else:
offset = np.array(span, dtype=data.dtype)[0]
masked_clip_2d(data, mask, *span)
# If log/cbrt, could case to float64 right away
# If linear, can keep current type
data -= offset
with np.errstate(invalid="ignore", divide="ignore"):
# Transform data (log, eq_hist, etc.)
data = interpolater(data, mask)
discrete_levels = None
if isinstance(data, (list, tuple)):
data, discrete_levels = data
# Transform span
if span is None:
masked_data = np.where(~mask, data, np.nan)
span = np.nanmin(masked_data), np.nanmax(masked_data)
if rescale_discrete_levels and discrete_levels is not None: # Only valid for how='eq_hist'
span = _rescale_discrete_levels(discrete_levels, span)
else:
if how == 'eq_hist':
# For eq_hist to work with span, we'd need to compute the histogram
# only on the specified span's range.
raise ValueError("span is not (yet) valid to use with eq_hist")
span = interpolater([0, span[1] - span[0]], 0)
if isinstance(cmap, Iterator):
cmap = list(cmap)
if isinstance(cmap, list):
rspan, gspan, bspan = np.array(list(zip(*map(rgb, cmap))))
span = np.linspace(span[0], span[1], len(cmap))
r = interp(data, span, rspan, left=255).astype(np.uint8)
g = interp(data, span, gspan, left=255).astype(np.uint8)
b = interp(data, span, bspan, left=255).astype(np.uint8)
a = np.where(np.isnan(data), 0, alpha).astype(np.uint8)
rgba = np.dstack([r, g, b, a])
elif isinstance(cmap, str) or isinstance(cmap, tuple):
color = rgb(cmap)
aspan = np.arange(min_alpha, alpha+1)
span = np.linspace(span[0], span[1], len(aspan))
r = np.full(data.shape, color[0], dtype=np.uint8)
g = np.full(data.shape, color[1], dtype=np.uint8)
b = np.full(data.shape, color[2], dtype=np.uint8)
a = interp(data, span, aspan, left=0, right=255).astype(np.uint8)
rgba = np.dstack([r, g, b, a])
elif callable(cmap):
# Assume callable is matplotlib colormap
scaled_data = (data - span[0])/(span[1] - span[0])
if cupy and isinstance(scaled_data, cupy.ndarray):
# Convert cupy array to numpy before passing to matplotlib colormap
scaled_data = cupy.asnumpy(scaled_data)
rgba = cmap(scaled_data, bytes=True)
rgba[:, :, 3] = np.where(np.isnan(scaled_data), 0, alpha).astype(np.uint8)
else:
raise TypeError("Expected `cmap` of `matplotlib.colors.Colormap`, "
"`list`, `str`, or `tuple`; got: '{0}'".format(type(cmap)))
img = rgba.view(np.uint32).reshape(data.shape)
if cupy and isinstance(img, cupy.ndarray):
# Convert cupy array to numpy for final image
img = cupy.asnumpy(img)
return Image(img, coords=agg.coords, dims=agg.dims, name=name)
def _colorize(agg, color_key, how, alpha, span, min_alpha, name, color_baseline, rescale_discrete_levels):
if cupy and isinstance(agg.data, cupy.ndarray):
array = cupy.array
else:
array = np.array
if not agg.ndim == 3:
raise ValueError("agg must be 3D")
cats = agg.indexes[agg.dims[-1]]
if not len(cats): # No categories and therefore no data; return an empty image
return Image(np.zeros(agg.shape[0:2], dtype=np.uint32), dims=agg.dims[:-1],
coords=OrderedDict([
(agg.dims[1], agg.coords[agg.dims[1]]),
(agg.dims[0], agg.coords[agg.dims[0]]) ]), name=name)
if color_key is None:
raise ValueError("Color key must be provided, with at least as many " +
"colors as there are categorical fields")
if not isinstance(color_key, dict):
color_key = dict(zip(cats, color_key))
if len(color_key) < len(cats):
raise ValueError("Insufficient colors provided ({}) for the categorical fields available ({})"
.format(len(color_key), len(cats)))
colors = [rgb(color_key[c]) for c in cats]
rs, gs, bs = map(array, zip(*colors))
# Reorient array (transposing the category dimension first)
agg_t = agg.transpose(*((agg.dims[-1],)+agg.dims[:2]))
data = orient_array(agg_t).transpose([1, 2, 0])
if isinstance(data, da.Array):
data = data.compute()
color_data = data.copy()
# subtract color_baseline if needed
baseline = np.nanmin(color_data) if color_baseline is None else color_baseline
with np.errstate(invalid='ignore'):
if baseline > 0:
color_data -= baseline
elif baseline < 0:
color_data += -baseline
if color_data.dtype.kind != 'u' and color_baseline is not None:
color_data[color_data<0]=0
color_total = nansum_missing(color_data, axis=2)
# dot does not handle nans, so replace with zeros
color_data[np.isnan(data)] = 0
# zero-count pixels will be 0/0, but it's safe to ignore that when dividing
with np.errstate(divide='ignore', invalid='ignore'):
r = (color_data.dot(rs)/color_total).astype(np.uint8)
g = (color_data.dot(gs)/color_total).astype(np.uint8)
b = (color_data.dot(bs)/color_total).astype(np.uint8)
# special case -- to give an appropriate color when min_alpha != 0 and data=0,
# take avg color of all non-nan categories
color_mask = ~np.isnan(data)
cmask_sum = np.sum(color_mask, axis=2)
with np.errstate(divide='ignore', invalid='ignore'):
r2 = (color_mask.dot(rs)/cmask_sum).astype(np.uint8)
g2 = (color_mask.dot(gs)/cmask_sum).astype(np.uint8)
b2 = (color_mask.dot(bs)/cmask_sum).astype(np.uint8)
missing_colors = np.sum(color_data, axis=2) == 0
r = np.where(missing_colors, r2, r)
g = np.where(missing_colors, g2, g)
b = np.where(missing_colors, b2, b)
total = nansum_missing(data, axis=2)
mask = np.isnan(total)
a = _interpolate_alpha(data, total, mask, how, alpha, span, min_alpha, rescale_discrete_levels)
values = np.dstack([r, g, b, a]).view(np.uint32).reshape(a.shape)
if cupy and isinstance(values, cupy.ndarray):
# Convert cupy array to numpy for final image
values = cupy.asnumpy(values)
return Image(values,
dims=agg.dims[:-1],
coords=OrderedDict([
(agg.dims[1], agg.coords[agg.dims[1]]),
(agg.dims[0], agg.coords[agg.dims[0]]),
]),
name=name)
def _interpolate_alpha(data, total, mask, how, alpha, span, min_alpha, rescale_discrete_levels):
if cupy and isinstance(data, cupy.ndarray):
from ._cuda_utils import interp, masked_clip_2d
array = cupy.array
else:
from ._cpu_utils import masked_clip_2d
interp = np.interp
array = np.array
# if span is provided, use it, otherwise produce a span based off the
# min/max of the data
if span is None:
offset = np.nanmin(total)
if total.dtype.kind == 'u' and offset == 0:
mask = mask | (total == 0)
# If at least one element is not masked, use the minimum as the offset
# otherwise the offset remains at zero
if not np.all(mask):
offset = total[total > 0].min()
total = np.where(~mask, total, np.nan)
a_scaled = _normalize_interpolate_how(how)(total - offset, mask)
discrete_levels = None
if isinstance(a_scaled, (list, tuple)):
a_scaled, discrete_levels = a_scaled
# All-NaN objects (e.g. chunks of arrays with no data) are valid in Datashader
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
norm_span = [np.nanmin(a_scaled).item(), np.nanmax(a_scaled).item()]
if rescale_discrete_levels and discrete_levels is not None: # Only valid for how='eq_hist'
norm_span = _rescale_discrete_levels(discrete_levels, norm_span)
else:
if how == 'eq_hist':
# For eq_hist to work with span, we'll need to compute the histogram
# only on the specified span's range.
raise ValueError("span is not (yet) valid to use with eq_hist")
# even in fixed-span mode cells with 0 should remain fully transparent
# i.e. a 0 will be fully transparent, but any non-zero number will
# be clipped to the span range and have min-alpha applied
offset = np.array(span, dtype=data.dtype)[0]
if total.dtype.kind == 'u' and np.nanmin(total) == 0:
mask = mask | (total <= 0)
total = np.where(~mask, total, np.nan)
masked_clip_2d(total, mask, *span)
a_scaled = _normalize_interpolate_how(how)(total - offset, mask)
if isinstance(a_scaled, (list, tuple)):
a_scaled = a_scaled[0] # Ignore discrete_levels
norm_span = _normalize_interpolate_how(how)([0, span[1] - span[0]], 0)
if isinstance(norm_span, (list, tuple)):
norm_span = norm_span[0] # Ignore discrete_levels
# Interpolate the alpha values
a = interp(a_scaled, array(norm_span), array([min_alpha, alpha]),
left=0, right=255).astype(np.uint8)
return a
def _apply_discrete_colorkey(agg, color_key, alpha, name, color_baseline):
# use the same approach as 3D case
if cupy and isinstance(agg.data, cupy.ndarray):
module = cupy
array = cupy.array
else:
module = np
array = np.array
if not agg.ndim == 2:
raise ValueError("agg must be 2D")
# validate color_key
if (color_key is None) or (not isinstance(color_key, dict)):
raise ValueError("Color key must be provided as a dictionary")
agg_data = agg.data
if isinstance(agg_data, da.Array):
agg_data = agg_data.compute()
cats = color_key.keys()
colors = [rgb(color_key[c]) for c in cats]
rs, gs, bs = map(array, zip(*colors))
data = module.empty_like(agg_data) * module.nan
r = module.zeros_like(data, dtype=module.uint8)
g = module.zeros_like(data, dtype=module.uint8)
b = module.zeros_like(data, dtype=module.uint8)
r2 = module.zeros_like(data, dtype=module.uint8)
g2 = module.zeros_like(data, dtype=module.uint8)
b2 = module.zeros_like(data, dtype=module.uint8)
for i, c in enumerate(cats):
value_mask = agg_data == c
data[value_mask] = 1
r2[value_mask] = rs[i]
g2[value_mask] = gs[i]
b2[value_mask] = bs[i]
color_data = data.copy()
# subtract color_baseline if needed
baseline = module.nanmin(color_data) if color_baseline is None else color_baseline
with np.errstate(invalid='ignore'):
if baseline > 0:
color_data -= baseline
elif baseline < 0:
color_data += -baseline
if color_data.dtype.kind != 'u' and color_baseline is not None:
color_data[color_data < 0] = 0
color_data[module.isnan(data)] = 0
if not color_data.any():
r[:] = r2
g[:] = g2
b[:] = b2
missing_colors = color_data == 0
r = module.where(missing_colors, r2, r)
g = module.where(missing_colors, g2, g)
b = module.where(missing_colors, b2, b)
# alpha channel
a = np.where(np.isnan(data), 0, alpha).astype(np.uint8)
values = module.dstack([r, g, b, a]).view(module.uint32).reshape(a.shape)
if cupy and isinstance(agg.data, cupy.ndarray):
# Convert cupy array to numpy for final image
values = cupy.asnumpy(values)
return Image(values,
dims=agg.dims,
coords=agg.coords,
name=name
)
def shade(agg, cmap=["lightblue", "darkblue"], color_key=Sets1to3,
how='eq_hist', alpha=255, min_alpha=40, span=None, name=None,
color_baseline=None, rescale_discrete_levels=False):
"""Convert a DataArray to an image by choosing an RGBA pixel color for each value.
Requires a DataArray with a single data dimension, here called the
"value", indexed using either 2D or 3D coordinates.
For a DataArray with 2D coordinates, the RGB channels are computed
from the values by interpolated lookup into the given colormap
``cmap``. The A channel is then set to the given fixed ``alpha``
value for all non-zero values, and to zero for all zero values.
A dictionary ``color_key`` that specifies categories (values in ``agg``)
and corresponding colors can be provided to support discrete coloring
2D aggregates, i.e aggregates with a single category per pixel,
with no mixing. The A channel is set the given ``alpha`` value for all
pixels in the categories specified in ``color_key``, and to zero otherwise.
DataArrays with 3D coordinates are expected to contain values
distributed over different categories that are indexed by the
additional coordinate. Such an array would reduce to the
2D-coordinate case if collapsed across the categories (e.g. if one
did ``aggc.sum(dim='cat')`` for a categorical dimension ``cat``).
The RGB channels for the uncollapsed, 3D case are mixed from separate
values over all categories. They are computed by averaging the colors
in the provided ``color_key`` (with one color per category),
weighted by the array's value for that category.
The A channel is then computed from the array's total value
collapsed across all categories at that location, ranging from the
specified ``min_alpha`` to the maximum alpha value (255).
Parameters
----------
agg : DataArray
cmap : list of colors or matplotlib.colors.Colormap, optional
The colormap to use for 2D agg arrays. Can be either a list of
colors (specified either by name, RGBA hexcode, or as a tuple
of ``(red, green, blue)`` values.), or a matplotlib colormap
object. Default is ``["lightblue", "darkblue"]``.
color_key : dict or iterable
The colors to use for a categorical agg array. In 3D case, it can be
either a ``dict`` mapping from field name to colors, or an
iterable of colors in the same order as the record fields,
and including at least that many distinct colors. In 2D case,
``color_key`` must be a ``dict`` where all keys are categories,
and values are corresponding colors. Number of categories does not
necessarily equal to the number of unique values in the agg DataArray.
how : str or callable, optional
The interpolation method to use, for the ``cmap`` of a 2D
DataArray or the alpha channel of a 3D DataArray. Valid
strings are 'eq_hist' [default], 'cbrt' (cube root), 'log'
(logarithmic), and 'linear'. Callables take 2 arguments - a
2-dimensional array of magnitudes at each pixel, and a boolean
mask array indicating missingness. They should return a numeric
array of the same shape, with ``NaN`` values where the mask was
True.
alpha : int, optional
Value between 0 - 255 representing the alpha value to use for
colormapped pixels that contain data (i.e. non-NaN values).
Also used as the maximum alpha value when alpha is indicating
data value, such as for single colors or categorical plots.
Regardless of this value, ``NaN`` values are set to be fully
transparent when doing colormapping.
min_alpha : float, optional
The minimum alpha value to use for non-empty pixels when
alpha is indicating data value, in [0, 255]. Use a higher value
to avoid undersaturation, i.e. poorly visible low-value datapoints,
at the expense of the overall dynamic range. Note that ``min_alpha``
will not take any effect when doing discrete categorical coloring
for 2D case as the aggregate can have only a single value to denote
the category.
span : list of min-max range, optional
Min and max data values to use for 2D colormapping,
and 3D alpha interpolation, when wishing to override autoranging.
name : string name, optional
Optional string name to give to the Image object to return,
to label results for display.
color_baseline : float or None
Baseline for calculating how categorical data mixes to
determine the color of a pixel. The color for each category is
weighted by how far that category's value is above this
baseline value, out of the total sum across all categories'
values. A value of zero is appropriate for counts and for
other physical quantities for which zero is a meaningful
reference; each category then contributes to the final color
in proportion to how much each category contributes to the
final sum. However, if values can be negative or if they are
on an interval scale where values e.g. twice as far from zero
are not twice as high (such as temperature in Farenheit), then
you will need to provide a suitable baseline value for use in
calculating color mixing. A value of None (the default) means
to take the minimum across the entire aggregate array, which
is safe but may not weight the colors as you expect; any
categories with values near this baseline will contribute
almost nothing to the final color. As a special case, if the
only data present in a pixel is at the baseline level, the
color will be an evenly weighted average of all such
categories with data (to avoid the color being undefined in
this case).
rescale_discrete_levels : boolean, optional
If ``how='eq_hist`` and there are only a few discrete values,
then ``rescale_discrete_levels=True`` decreases the lower
limit of the autoranged span so that the values are rendering
towards the (more visible) top of the ``cmap`` range, thus
avoiding washout of the lower values. Has no effect if
``how!=`eq_hist``. Default is False.
"""
if not isinstance(agg, xr.DataArray):
raise TypeError("agg must be instance of DataArray")
name = agg.name if name is None else name
if not ((0 <= min_alpha <= 255) and (0 <= alpha <= 255)):
raise ValueError("min_alpha ({}) and alpha ({}) must be between 0 and 255".format(min_alpha,alpha))
if rescale_discrete_levels and how != 'eq_hist':
rescale_discrete_levels = False
if agg.ndim == 2:
if color_key is not None and isinstance(color_key, dict):
return _apply_discrete_colorkey(
agg, color_key, alpha, name, color_baseline
)
else:
return _interpolate(agg, cmap, how, alpha, span, min_alpha, name, rescale_discrete_levels)
elif agg.ndim == 3:
return _colorize(agg, color_key, how, alpha, span, min_alpha, name, color_baseline, rescale_discrete_levels)
else:
raise ValueError("agg must use 2D or 3D coordinates")
def set_background(img, color=None, name=None):
"""Return a new image, with the background set to `color`.
Parameters
----------
img : Image
color : color name or tuple, optional
The background color. Can be specified either by name, hexcode, or as a
tuple of ``(red, green, blue)`` values.
"""
if not isinstance(img, Image):
raise TypeError("Expected `Image`, got: `{0}`".format(type(img)))
name = img.name if name is None else name
if color is None:
return img
background = np.uint8(rgb(color) + (255,)).view('uint32')[0]
data = over(img.data, background)
return Image(data, coords=img.coords, dims=img.dims, name=name)
def spread(img, px=1, shape='circle', how=None, mask=None, name=None):
"""Spread pixels in an image.
Spreading expands each pixel a certain number of pixels on all sides
according to a given shape, merging pixels using a specified compositing
operator. This can be useful to make sparse plots more visible.
Parameters
----------
img : Image or other DataArray
px : int, optional
Number of pixels to spread on all sides
shape : str, optional
The shape to spread by. Options are 'circle' [default] or 'square'.
how : str, optional
The name of the compositing operator to use when combining
pixels. Default of None uses 'over' operator for Image objects
and 'add' operator otherwise.
mask : ndarray, shape (M, M), optional
The mask to spread over. If provided, this mask is used instead of
generating one based on `px` and `shape`. Must be a square array
with odd dimensions. Pixels are spread from the center of the mask to
locations where the mask is True.
name : string name, optional
Optional string name to give to the Image object to return,
to label results for display.
"""
if not isinstance(img, xr.DataArray):
raise TypeError("Expected `xr.DataArray`, got: `{0}`".format(type(img)))
is_image = isinstance(img, Image)
name = img.name if name is None else name
if mask is None:
if not isinstance(px, int) or px < 0:
raise ValueError("``px`` must be an integer >= 0")
if px == 0:
return img
mask = _mask_lookup[shape](px)
elif not (isinstance(mask, np.ndarray) and mask.ndim == 2 and
mask.shape[0] == mask.shape[1] and mask.shape[0] % 2 == 1):
raise ValueError("mask must be a square 2 dimensional ndarray with "
"odd dimensions.")
mask = mask if mask.dtype == 'bool' else mask.astype('bool')
if how is None:
how = 'over' if is_image else 'add'
w = mask.shape[0]
extra = w // 2
M, N = img.shape[:2]
padded_shape = (M + 2*extra, N + 2*extra)
float_type = img.dtype in [np.float32, np.float64]
fill_value = np.nan if float_type else 0
if cupy and isinstance(img.data, cupy.ndarray):
# Convert img.data to numpy array before passing to nb.jit kernels
img.data = cupy.asnumpy(img.data)
if is_image:
kernel = _build_spread_kernel(how, is_image)
elif float_type:
kernel = _build_float_kernel(how, w)
else:
kernel = _build_int_kernel(how, w, img.dtype == np.uint32)
def apply_kernel(layer):
buf = np.full(padded_shape, fill_value, dtype=layer.dtype)
kernel(layer.data, mask, buf)
return buf[extra:-extra, extra:-extra].copy()
if len(img.shape)==2:
out = apply_kernel(img)
else:
out = np.dstack([apply_kernel(img[:,:,category])
for category in range(img.shape[2])])
return img.__class__(out, dims=img.dims, coords=img.coords, name=name)
@tz.memoize
def _build_int_kernel(how, mask_size, ignore_zeros):
"""Build a spreading kernel for a given composite operator"""
validate_operator(how, is_image=False)
op = composite_op_lookup[how + "_arr"]
@ngjit
def stencilled(arr, mask, out):
M, N = arr.shape
for y in range(M):
for x in range(N):
el = arr[y, x]
for i in range(mask_size):
for j in range(mask_size):
if mask[i, j]:
if ignore_zeros and el==0:
result = out[i + y, j + x]
elif ignore_zeros and out[i + y, j + x]==0:
result = el
else:
result = op(el, out[i + y, j + x])
out[i + y, j + x] = result
return stencilled
@tz.memoize
def _build_float_kernel(how, mask_size):
"""Build a spreading kernel for a given composite operator"""
validate_operator(how, is_image=False)
op = composite_op_lookup[how + "_arr"]
@ngjit
def stencilled(arr, mask, out):
M, N = arr.shape
for y in range(M):
for x in range(N):
el = arr[y, x]
for i in range(mask_size):
for j in range(mask_size):
if mask[i, j]:
if np.isnan(el):
result = out[i + y, j + x]
elif np.isnan(out[i + y, j + x]):
result = el
else:
result = op(el, out[i + y, j + x])
out[i + y, j + x] = result
return stencilled
@tz.memoize
def _build_spread_kernel(how, is_image):
"""Build a spreading kernel for a given composite operator"""
validate_operator(how, is_image=True)
op = composite_op_lookup[how + ("" if is_image else "_arr")]
@ngjit
def kernel(arr, mask, out):
M, N = arr.shape
w = mask.shape[0]
for y in range(M):
for x in range(N):
el = arr[y, x]
# Skip if data is transparent
process_image = is_image and ((int(el) >> 24) & 255) # Transparent pixel
process_array = (not is_image) and (not np.isnan(el))
if process_image or process_array:
for i in range(w):
for j in range(w):
# Skip if mask is False at this value
if mask[i, j]:
if el==0:
result = out[i + y, j + x]
if out[i + y, j + x]==0:
result = el
else:
result = op(el, out[i + y, j + x])
out[i + y, j + x] = result
return kernel
def _square_mask(px):
"""Produce a square mask with sides of length ``2 * px + 1``"""
px = int(px)
w = 2 * px + 1
return np.ones((w, w), dtype='bool')
def _circle_mask(r):
"""Produce a circular mask with a diameter of ``2 * r + 1``"""
x = np.arange(-r, r + 1, dtype='i4')
return np.where(np.sqrt(x**2 + x[:, None]**2) <= r+0.5, True, False)
_mask_lookup = {'square': _square_mask,
'circle': _circle_mask}
def dynspread(img, threshold=0.5, max_px=3, shape='circle', how=None, name=None):
"""Spread pixels in an image dynamically based on the image density.
Spreading expands each pixel a certain number of pixels on all sides
according to a given shape, merging pixels using a specified compositing
operator. This can be useful to make sparse plots more visible. Dynamic
spreading determines how many pixels to spread based on a density
heuristic. Spreading starts at 1 pixel, and stops when the fraction
of adjacent non-empty pixels reaches the specified threshold, or
the max_px is reached, whichever comes first.
Parameters
----------
img : Image
threshold : float, optional
A tuning parameter in [0, 1], with higher values giving more
spreading.
max_px : int, optional
Maximum number of pixels to spread on all sides.
shape : str, optional
The shape to spread by. Options are 'circle' [default] or 'square'.
how : str, optional
The name of the compositing operator to use when combining
pixels. Default of None uses 'over' operator for Image objects
and 'add' operator otherwise.
"""
is_image = isinstance(img, Image)
if not 0 <= threshold <= 1:
raise ValueError("threshold must be in [0, 1]")
if not isinstance(max_px, int) or max_px < 0:
raise ValueError("max_px must be >= 0")
# Simple linear search. Not super efficient, but max_px is usually small.
float_type = img.dtype in [np.float32, np.float64]
if cupy and isinstance(img.data, cupy.ndarray):
# Convert img.data to numpy array before passing to nb.jit kernels
img.data = cupy.asnumpy(img.data)
px_=0
for px in range(1, max_px + 1):
px_=px
if is_image:
density = _rgb_density(img.data, px*2)
elif len(img.shape) == 2:
density = _array_density(img.data, float_type, px*2)
else:
masked = np.logical_not(np.isnan(img)) if float_type else (img != 0)
flat_mask = np.sum(masked, axis=2, dtype='uint32')
density = _array_density(flat_mask.data, False, px*2)
if density > threshold:
px_=px_-1
break
if px_>=1:
return spread(img, px_, shape=shape, how=how, name=name)
else:
return img
@nb.jit(nopython=True, nogil=True, cache=True)
def _array_density(arr, float_type, px=1):
"""Compute a density heuristic of an array.
The density is a number in [0, 1], and indicates the normalized mean number
of non-empty pixels that have neighbors in the given px radius.
"""
M, N = arr.shape
cnt = has_neighbors = 0
for y in range(0, M):
for x in range(0, N):
el = arr[y, x]
if (float_type and not np.isnan(el)) or (not float_type and el!=0):
cnt += 1
neighbors = 0
for i in range(max(0, y - px), min(y + px + 1, M)):
for j in range(max(0, x - px), min(x + px + 1, N)):
if ((float_type and not np.isnan(arr[i, j])) or
(not float_type and arr[i, j] != 0)):
neighbors += 1
if neighbors>1: # (excludes self)
has_neighbors += 1
return has_neighbors/cnt if cnt else np.inf
@nb.jit(nopython=True, nogil=True, cache=True)
def _rgb_density(arr, px=1):
"""Compute a density heuristic of an image.
The density is a number in [0, 1], and indicates the normalized mean number
of non-empty pixels that have neighbors in the given px radius.
"""
M, N = arr.shape
cnt = has_neighbors = 0
for y in range(0, M):
for x in range(0, N):
if (arr[y, x] >> 24) & 255:
cnt += 1
neighbors = 0
for i in range(max(0, y - px), min(y + px + 1, M)):
for j in range(max(0, x - px), min(x + px + 1, N)):
if (arr[i, j] >> 24) & 255:
neighbors += 1
if neighbors>1: # (excludes self)
has_neighbors += 1
return has_neighbors/cnt if cnt else np.inf
| 16,857 |
1,821 | <reponame>hangqiu/pixie
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <memory>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "src/carnot/planner/distributed/distributed_stitcher_rules.h"
#include "src/carnot/planner/ir/grpc_sink_ir.h"
#include "src/carnot/planner/ir/grpc_source_group_ir.h"
namespace px {
namespace carnot {
namespace planner {
namespace distributed {
StatusOr<bool> SetSourceGroupGRPCAddressRule::Apply(IRNode* ir_node) {
if (Match(ir_node, GRPCSourceGroup())) {
static_cast<GRPCSourceGroupIR*>(ir_node)->SetGRPCAddress(grpc_address_);
static_cast<GRPCSourceGroupIR*>(ir_node)->SetSSLTargetName(ssl_targetname_);
return true;
}
return false;
}
StatusOr<bool> AssociateDistributedPlanEdgesRule::Apply(CarnotInstance* from_carnot_instance) {
bool did_connect_any_graph = false;
for (int64_t to_carnot_instance_id :
from_carnot_instance->distributed_plan()->dag().DependenciesOf(from_carnot_instance->id())) {
CarnotInstance* to_carnot_instance =
from_carnot_instance->distributed_plan()->Get(to_carnot_instance_id);
PL_ASSIGN_OR_RETURN(bool did_connect_this_graph,
ConnectGraphs(from_carnot_instance->plan(), {from_carnot_instance->id()},
to_carnot_instance->plan()));
did_connect_any_graph |= did_connect_this_graph;
}
// Make sure we can connect to self.
PL_ASSIGN_OR_RETURN(bool did_connect_graph_to_self,
ConnectGraphs(from_carnot_instance->plan(), {from_carnot_instance->id()},
from_carnot_instance->plan()));
did_connect_any_graph |= did_connect_graph_to_self;
return did_connect_any_graph;
}
StatusOr<bool> AssociateDistributedPlanEdgesRule::ConnectGraphs(
IR* from_graph, const absl::flat_hash_set<int64_t>& from_agents, IR* to_graph) {
// In this, we find the bridge ids that overlap between the from_graph and to_graph.
// 1. We get a mapping of bridge id to grpc sink in the from_graph
// 2. We iterate through the GRPCSourceGroups, if any are connected to existing sinks, we add them
// to the bridge list with the sink.
// 3. We connect the bridges we find between the two of them.
absl::flat_hash_map<int64_t, GRPCSinkIR*> bridge_id_to_grpc_sink;
std::vector<std::pair<GRPCSourceGroupIR*, GRPCSinkIR*>> grpc_bridges;
for (IRNode* ir_node : from_graph->FindNodesOfType(IRNodeType::kGRPCSink)) {
DCHECK(Match(ir_node, GRPCSink()));
auto sink = static_cast<GRPCSinkIR*>(ir_node);
bridge_id_to_grpc_sink[sink->destination_id()] = sink;
}
// Make a map of from's source_ids to : GRPC source group ids.
for (IRNode* ir_node : to_graph->FindNodesOfType(IRNodeType::kGRPCSourceGroup)) {
DCHECK(Match(ir_node, GRPCSourceGroup()));
auto source = static_cast<GRPCSourceGroupIR*>(ir_node);
// Only add sources that have a matching grpc sink, otherwise the bridge is for another plan.
if (!bridge_id_to_grpc_sink.contains(source->source_id())) {
continue;
}
grpc_bridges.push_back({source, bridge_id_to_grpc_sink[source->source_id()]});
}
bool did_connect_graph = false;
for (const auto& [source_group, sink] : grpc_bridges) {
PL_RETURN_IF_ERROR(source_group->AddGRPCSink(sink, from_agents));
did_connect_graph = true;
}
return did_connect_graph;
}
} // namespace distributed
} // namespace planner
} // namespace carnot
} // namespace px
| 1,539 |
10,125 | <filename>OpenCorePkg/Library/OcHiiDatabaseLib/HiiDatabaseEntry.c
/** @file
This file contains the entry code to the HII database, which is defined by
UEFI 2.1 specification.
Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#include "HiiDatabase.h"
#include <Library/OcBootServicesTableLib.h>
//
// Global variables
//
EFI_EVENT gHiiKeyboardLayoutChanged;
BOOLEAN gExportAfterReadyToBoot = FALSE;
HII_DATABASE_PRIVATE_DATA mPrivate = {
HII_DATABASE_PRIVATE_DATA_SIGNATURE,
{
(LIST_ENTRY *) NULL,
(LIST_ENTRY *) NULL
},
{
(LIST_ENTRY *) NULL,
(LIST_ENTRY *) NULL
},
{
HiiStringToImage,
HiiStringIdToImage,
HiiGetGlyph,
HiiGetFontInfo
},
{
HiiNewImage,
HiiGetImage,
HiiSetImage,
HiiDrawImage,
HiiDrawImageId
},
{
HiiNewImageEx,
HiiGetImageEx,
HiiSetImageEx,
HiiDrawImageEx,
HiiDrawImageIdEx,
HiiGetImageInfo
},
{
HiiNewString,
InternalHiiGetString,
InternalHiiSetString,
HiiGetLanguages,
HiiGetSecondaryLanguages
},
{
HiiNewPackageList,
HiiRemovePackageList,
HiiUpdatePackageList,
HiiListPackageLists,
HiiExportPackageLists,
HiiRegisterPackageNotify,
HiiUnregisterPackageNotify,
HiiFindKeyboardLayouts,
HiiGetKeyboardLayout,
HiiSetKeyboardLayout,
HiiGetPackageListHandle
},
{
HiiConfigRoutingExtractConfig,
HiiConfigRoutingExportConfig,
HiiConfigRoutingRouteConfig,
HiiBlockToConfig,
HiiConfigToBlock,
HiiGetAltCfg
},
{
EfiConfigKeywordHandlerSetData,
EfiConfigKeywordHandlerGetData
},
{
(LIST_ENTRY *) NULL,
(LIST_ENTRY *) NULL
},
0,
{
(LIST_ENTRY *) NULL,
(LIST_ENTRY *) NULL
},
EFI_TEXT_ATTR (EFI_LIGHTGRAY, EFI_BLACK),
{
0x00000000,
0x0000,
0x0000,
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
NULL
};
/**
The default event handler for gHiiKeyboardLayoutChanged
event group.
This is internal function.
@param Event The event that triggered this notification function.
@param Context Pointer to the notification functions context.
**/
VOID
EFIAPI
KeyboardLayoutChangeNullEvent (
IN EFI_EVENT Event,
IN VOID *Context
)
{
return;
}
/**
On Ready To Boot Services Event notification handler.
To trigger the function that to export the Hii Configuration setting.
@param[in] Event Event whose notification function is being invoked
@param[in] Context Pointer to the notification function's context
**/
VOID
EFIAPI
OnReadyToBoot (
IN EFI_EVENT Event,
IN VOID *Context
)
{
//
// When ready to boot, we begin to export the HiiDatabase date.
// And hook all the possible HiiDatabase change actions to export data.
//
HiiGetDatabaseInfo (&mPrivate.HiiDatabase);
HiiGetConfigRespInfo (&mPrivate.HiiDatabase);
gExportAfterReadyToBoot = TRUE;
gBS->CloseEvent (Event);
}
/**
Initialize HII Database.
@param ImageHandle The image handle.
@param SystemTable The system table.
@retval EFI_SUCCESS The Hii database is setup correctly.
@return Other value if failed to create the default event for
gHiiKeyboardLayoutChanged. Check gBS->CreateEventEx for
details. Or failed to install the protocols.
Check gBS->InstallMultipleProtocolInterfaces for details.
Or failed to create Ready To Boot Event.
Check EfiCreateEventReadyToBootEx for details.
**/
EFI_STATUS
EFIAPI
OcHiiDatabaseLocalLibConstructor (
IN EFI_HANDLE ImageHandle,
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
VOID *HiiDatabase;
//
// There will be only one HII Database in the system
// If there is another out there, someone is trying to install us
// again. Fail that scenario.
//
Status = gBS->LocateProtocol (
&gEfiHiiDatabaseProtocolGuid,
NULL,
&HiiDatabase
);
if (!EFI_ERROR (Status)) {
return Status;
}
InitializeListHead (&mPrivate.DatabaseList);
InitializeListHead (&mPrivate.DatabaseNotifyList);
InitializeListHead (&mPrivate.HiiHandleList);
InitializeListHead (&mPrivate.FontInfoList);
#if 0
//
// Create a event with EFI_HII_SET_KEYBOARD_LAYOUT_EVENT_GUID group type.
// FIXME: Check UEFI version and install on supported configurations.
//
Status = gBS->CreateEventEx (
EVT_NOTIFY_SIGNAL,
TPL_NOTIFY,
KeyboardLayoutChangeNullEvent,
NULL,
&gEfiHiiKeyBoardLayoutGuid,
&gHiiKeyboardLayoutChanged
);
if (EFI_ERROR (Status)) {
return Status;
}
#endif
Status = OcRegisterBootServicesProtocol (&gEfiHiiFontProtocolGuid, &mPrivate.HiiFont, FALSE);
ASSERT_EFI_ERROR (Status);
Status = OcRegisterBootServicesProtocol (&gEfiHiiStringProtocolGuid, &mPrivate.HiiString, FALSE);
ASSERT_EFI_ERROR (Status);
Status = OcRegisterBootServicesProtocol (&gEfiHiiDatabaseProtocolGuid, &mPrivate.HiiDatabase, FALSE);
ASSERT_EFI_ERROR (Status);
Status = OcRegisterBootServicesProtocol (&gEfiHiiConfigRoutingProtocolGuid, &mPrivate.ConfigRouting, FALSE);
ASSERT_EFI_ERROR (Status);
Status = OcRegisterBootServicesProtocol (&gEfiConfigKeywordHandlerProtocolGuid, &mPrivate.ConfigKeywordHandler, FALSE);
ASSERT_EFI_ERROR (Status);
if (FeaturePcdGet (PcdSupportHiiImageProtocol)) {
Status = OcRegisterBootServicesProtocol (&gEfiHiiImageProtocolGuid, &mPrivate.HiiImage, FALSE);
ASSERT_EFI_ERROR (Status);
Status = OcRegisterBootServicesProtocol (&gEfiHiiImageExProtocolGuid, &mPrivate.HiiImageEx, FALSE);
ASSERT_EFI_ERROR (Status);
}
return EFI_SUCCESS;
}
| 2,419 |
486 | <reponame>yncxcw/MHiBench<filename>stormbench/streaming/src/main/java/com/intel/hibench/stormbench/topologies/SingleSpoutTops.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.hibench.stormbench.topologies;
import com.intel.hibench.stormbench.spout.KafkaSpoutFactory;
import com.intel.hibench.stormbench.util.StormBenchConfig;
import org.apache.storm.Config;
import org.apache.storm.StormSubmitter;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichSpout;
public abstract class SingleSpoutTops {
protected StormBenchConfig config;
public SingleSpoutTops(StormBenchConfig config) {
this.config = config;
}
public void run() throws Exception {
StormSubmitter.submitTopology(config.benchName, getConf(), getBuilder().createTopology());
}
private Config getConf() {
Config conf = new Config();
conf.setNumWorkers(config.workerCount);
conf.put(Config.TOPOLOGY_BACKPRESSURE_ENABLE, false);
if (!config.ackon) {
conf.setNumAckers(0);
}
return conf;
}
public TopologyBuilder getBuilder() {
TopologyBuilder builder = new TopologyBuilder();
BaseRichSpout spout = KafkaSpoutFactory.getSpout(config);
builder.setSpout("spout", spout, config.spoutThreads);
setBolts(builder);
return builder;
}
public abstract void setBolts(TopologyBuilder builder);
}
| 645 |
32,544 | package com.baeldung.reactorbus;
import com.baeldung.reactorbus.consumer.NotificationConsumer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import reactor.bus.EventBus;
import static reactor.bus.selector.Selectors.$;
@SpringBootApplication
public class NotificationApplication implements CommandLineRunner {
@Autowired
private EventBus eventBus;
@Autowired
private NotificationConsumer notificationConsumer;
@Override
public void run(String... args) throws Exception {
eventBus.on($("notificationConsumer"), notificationConsumer);
}
public static void main(String[] args) {
SpringApplication.run(NotificationApplication.class, args);
}
}
| 259 |
342 | <reponame>gspu/bitkeeper
/*
* Copyright 2000-2004,2006,2009-2011,2015-2016 BitMover, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bkd.h"
#include "nested.h"
private void
send_keyerror(char *key, char *path)
{
out("ERROR-cannot use key ");
out(key);
out(" inside repository ");
out(path);
out(" (nonexistent, or not a ensemble rootkey)\n");
}
private void
send_cderror(char *path)
{
out("ERROR-cannot cd to ");
out(path);
out(" (illegal, nonexistent, or not package root)\n");
}
/*
* like fullname() but it also expands if the final component of the
* pathname is a symlink
*/
private void
fullname_expand(char *dir, char *buf)
{
int i;
char sym[MAXPATH];
while (1) {
/* convert dir to a full pathname and expand symlinks */
dir = fullname(dir, buf);
unless (isSymlnk(dir)) return;
/*
* fullname() doesn't expand symlinks in the last
* component so fix that.
*/
i = readlink(dir, sym, sizeof(sym));
assert(i < sizeof(sym));
sym[i] = 0;
if (IsFullPath(sym)) {
strcpy(buf, sym);
} else {
concat_path(buf, dirname(dir), sym);
}
}
}
/*
* return 1 if the path is under a directory where the bkd start or if
* Opts.safe_cd is not enabled
*/
int
bkd_isSafe(char *file)
{
char *p;
char a[MAXPATH];
char b[MAXPATH];
unless (Opts.safe_cd || ((p = getenv("BKD_DAEMON")) && *p)) return (1);
fullname_expand(start_cwd, a);
fullname_expand(file, b);
if ((strlen(b) >= strlen(a)) && pathneq(a, b, strlen(a))) return (1);
if (Opts.symlink_ok) {
fullLink(file, b, 0);
if ((strlen(b) >= strlen(a)) && pathneq(a, b, strlen(a))) {
return (1);
}
}
return (0);
}
/*
* Change to the directory, making sure it is at or below where we are.
* On failure, do not tell them why, that's an information leak.
*/
int
unsafe_cd(char *path)
{
char *p;
char a[MAXPATH];
char b[MAXPATH];
strcpy(a, proj_cwd());
if (chdir(path)) return (1);
unless (Opts.safe_cd || ((p = getenv("BKD_DAEMON")) && *p)) return (0);
strcpy(b, proj_cwd());
if ((strlen(b) >= strlen(a)) && pathneq(a, b, strlen(a))) return (0);
unless (Opts.symlink_ok) goto err;
if (chdir(start_cwd)) return (1);
fullLink(path, b, 0);
if (chdir(path)) return (1);
if ((strlen(b) >= strlen(a)) && pathneq(a, b, strlen(a))) return (0);
unless (IsFullPath(path)) goto err;
err: send_cderror(path);
return (1);
}
int
cmd_cd(int ac, char **av)
{
char *p = av[1];
char *t, *u;
MDBM *idDB;
char *rootkey, buf[MAXPATH];
unless (p) {
out("ERROR-cd command must have path argument\n");
return (1);
}
/*
* av[1] = <path>[|<rootkey>]
* The path part gets you to the product, the rootkey which is
* postfixed, tells you to go to that component.
*/
if (rootkey = strchr(p, '|')) *rootkey++ = 0;
if (*p && unsafe_cd(p)) {
send_cderror(p);
return (1);
}
if (rootkey && proj_isProduct(0) && !streq(rootkey, proj_rootkey(0))) {
unless ((idDB = loadDB(IDCACHE, 0, DB_IDCACHE)) &&
(t = mdbm_fetch_str(idDB, rootkey))) {
send_keyerror(rootkey, p);
if (idDB) mdbm_close(idDB);
return (1);
}
/* csetChomp(t); -- but need access to innards */
unless ((u = strrchr(t, '/')) && streq(u+1, GCHANGESET)) {
send_keyerror(rootkey, p);
return (1);
}
*u = 0;
strcpy(buf, t);
rootkey[-1] = '|';
mdbm_close(idDB);
if (chdir(buf)) {
send_cderror(buf);
return (1);
}
}
/*
* XXX TODO need to check for permission error here
*/
unless (exists("BitKeeper/etc")) {
if (errno == ENOENT) {
send_cderror(p);
} else if (errno == EACCES) {
out("ERROR-");
out(p);
out(" access denied\n");
} else {
out("ERROR-unknown error");
sprintf(buf, "errno = %d\n", errno);
out(buf);
}
return (1);
}
return (0);
}
| 1,782 |
1,861 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package com.facebook.spectrum.plugins.png;
import static com.facebook.spectrum.image.EncodedImageFormat.PNG;
import com.facebook.spectrum.DefaultPlugins;
import com.facebook.spectrum.Spectrum;
import com.facebook.spectrum.image.ImageSize;
import com.facebook.spectrum.logging.BaseSpectrumLogger;
import com.facebook.spectrum.options.TranscodeOptions;
import com.facebook.spectrum.requirements.EncodeRequirement;
import com.facebook.spectrum.requirements.ResizeRequirement;
import com.facebook.spectrum.testutils.SpectrumAssertUtils;
import com.facebook.spectrum.testutils.TestData;
import com.facebook.spectrum.testutils.TestSoLoader;
import org.junit.Before;
import org.junit.Test;
public class PngTranscodeTest {
private Spectrum mSpectrum;
@Before
public void setup() {
TestSoLoader.init();
mSpectrum = Spectrum.make(new BaseSpectrumLogger(), DefaultPlugins.get());
}
@Test
public void testTranscode_whenCanPassthrough_thenPerfectOutput() throws Exception {
final TranscodeOptions transcodeOptions =
TranscodeOptions.Builder(new EncodeRequirement(PNG)).build();
SpectrumAssertUtils.executeAndAssert(
mSpectrum,
SpectrumAssertUtils.Builder.withTestImage(TestData.PNG.PATH_128x85)
.transcoding(transcodeOptions)
.assertingOutputFormat(PNG)
.comparingAgainstTestFile(TestData.PNG.PATH_128x85)
.usingSsimTarget(1f)
.usingSsimTolerance(0f));
}
@Test
public void testTranscode_whenScaledDown_thenMatchesExpected() throws Exception {
final TranscodeOptions transcodeOptions =
TranscodeOptions.Builder(new EncodeRequirement(PNG))
.resize(ResizeRequirement.Mode.EXACT, new ImageSize(128, 128))
.build();
SpectrumAssertUtils.executeAndAssert(
mSpectrum,
SpectrumAssertUtils.Builder.withTestImage(TestData.PNG.PATH_800x530)
.transcoding(transcodeOptions)
.assertingOutputFormat(PNG)
.comparingAgainstTestFile(TestData.PNG.PATH_128x85)
.usingSsimTarget(.88f));
}
}
| 835 |
402 | // Copyright (c) 2017 Samsung Electronics Co., LTD
// Distributed under the MIT License.
// See the LICENSE file in the project root for more information.
/// \file platform.h This file contains platform-specific (Windows/Unix) definitions.
#pragma once
// TODO replace in code, remove this macros
#ifdef _MSC_VER
#define W(s) L##s
#else
#define W(s) u##s
#endif
/// @{ PLATFORM_TAG is the macros, which defines a platform, for which code is currently compiling,
/// macros value should expand to C++ type, which defines platform too (see below).
#ifdef WIN32
#define PLATFORM_TAG Win32PlatformTag
#else
#define PLATFORM_TAG UnixPlatformTag
#endif
/// @}
namespace netcoredbg
{
struct Win32PlatformTag {}; /// PlatformTag for Windows (see below)
struct UnixPlatformTag {}; /// PlatformTAg for Unix and MacOS.
/// PlatformTag is the type, which determines platform, for which code is currenly compilih.
/// This tag might be used to select proper template specialization.
using PlatformTag = PLATFORM_TAG;
/// Function returns memory mapping page size (like sysconf(_SC_PAGESIZE) on Unix).
unsigned long OSPageSize();
/// Function suspends process execution for specified amount of time (in microseconds)
void USleep(unsigned long usec);
/// Function returns list of environment variables (like char **environ).
char** GetSystemEnvironment();
} // ::netcoredbg
| 415 |
511 | <reponame>SenthilKumarGS/TizenRT
/******************************************************************
*
* Copyright 2018 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************/
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/*
Pathname: s_TNSfilt.h
------------------------------------------------------------------------------
REVISION HISTORY
Description: Added lpc, start, & size, so the data from
tns_inv_subblock can be shared with tns_decode_subblock.
Description: Removed lpc to save 2KB of memory (on 32-bit machines.)
This change requires tns_decode_coef.c to perform calculations in place.
Description: Removed start & size. start_band and stop_band can simply
take on a new meaning after this function. (coef index, rather than
scalefactor band index.)
Description: Had to add "start_coef" and "stop_coef" in order to preserve
values "start_band" and "stop_band." This required a change to
tns_setup_filter.c also.
Description: Had to add element "q_lpc" to store the q-format of the lpc
coefficients passed via "coef."
Description: Moved lpc_coef array up to the s_TNS_frame_info.h structure.
Description:
(1) Modified to include the lines...
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
(2) Updated the copyright header.
Who: Date:
Description:
------------------------------------------------------------------------------
INCLUDE DESCRIPTION
This include file defines the structure, s_TNSfilt
------------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------
; CONTINUE ONLY IF NOT ALREADY DEFINED
----------------------------------------------------------------------------*/
#ifndef S_TNSFILT_H
#define S_TNSFILT_H
#ifdef __cplusplus
extern "C"
{
#endif
/*----------------------------------------------------------------------------
; INCLUDES
----------------------------------------------------------------------------*/
#include "pv_audio_type_defs.h"
#include "e_tns_const.h"
/*----------------------------------------------------------------------------
; MACROS
; Define module specific macros here
----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
; DEFINES
; Include all pre-processor statements here.
----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
; EXTERNAL VARIABLES REFERENCES
; Declare variables used in this module but defined elsewhere
----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
; SIMPLE TYPEDEF'S
----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
; ENUMERATED TYPEDEF'S
----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
; STRUCTURES TYPEDEF'S
----------------------------------------------------------------------------*/
typedef struct
{
Int start_band;
Int stop_band;
Int start_coef;
Int stop_coef;
UInt order;
Int direction;
Int q_lpc;
} TNSfilt;
/*----------------------------------------------------------------------------
; GLOBAL FUNCTION DEFINITIONS
; Function Prototype declaration
----------------------------------------------------------------------------*/
#ifdef __cplusplus
}
#endif
#endif /* S_TNSFILT_H */
| 1,227 |
1,405 | package com.lenovo.safecenter.antivirus.utils;
import java.io.File;
import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
public class SHA1Util {
protected static char[] hexDigits = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
protected static MessageDigest messagedigest;
static {
messagedigest = null;
try {
messagedigest = MessageDigest.getInstance("SHA-1");
} catch (NoSuchAlgorithmException nsaex) {
System.err.println(SHA1Util.class.getName() + "初始化失败,MessageDigest不支持MD5Util。");
nsaex.printStackTrace();
}
}
public static String getFileMD5String(File file) throws IOException {
return getFileSha1(file);
}
public static String getMD5String(String s) {
return getMD5String(s.getBytes());
}
public static String getMD5String(byte[] bytes) {
messagedigest.update(bytes);
return a(messagedigest.digest());
}
private static String a(byte[] bytes) {
int length = bytes.length;
StringBuffer stringBuffer = new StringBuffer(length * 2);
int i = length + 0;
for (int i2 = 0; i2 < i; i2++) {
byte b = bytes[i2];
char c = hexDigits[(b & 240) >> 4];
char c2 = hexDigits[b & 15];
stringBuffer.append(c);
stringBuffer.append(c2);
}
return stringBuffer.toString();
}
public static boolean checkPassword(String password, String md5PwdStr) {
return getMD5String(password).equals(md5PwdStr);
}
public static void main(String[] args) throws IOException {
long begin = System.currentTimeMillis();
System.out.println("sha1:" + getMD5String("我爱你") + " time:" + ((System.currentTimeMillis() - begin) / 1000) + "s");
}
/* JADX WARNING: Removed duplicated region for block: B:14:0x001f A[SYNTHETIC, Splitter:B:14:0x001f] */
/* JADX WARNING: Removed duplicated region for block: B:28:0x0043 A[SYNTHETIC, Splitter:B:28:0x0043] */
/* JADX WARNING: Removed duplicated region for block: B:34:0x004f A[SYNTHETIC, Splitter:B:34:0x004f] */
/* Code decompiled incorrectly, please refer to instructions dump. */
public static java.lang.String getFileSha1(java.io.File r7) {
/*
r2 = 0
java.io.FileInputStream r3 = new java.io.FileInputStream // Catch:{ Exception -> 0x005e, OutOfMemoryError -> 0x003d }
r3.<init>(r7) // Catch:{ Exception -> 0x005e, OutOfMemoryError -> 0x003d }
r5 = 5242880(0x500000, float:7.34684E-39)
byte[] r0 = new byte[r5] // Catch:{ Exception -> 0x0018, OutOfMemoryError -> 0x005b, all -> 0x0058 }
r4 = 0
L_0x000b:
int r4 = r3.read(r0) // Catch:{ Exception -> 0x0018, OutOfMemoryError -> 0x005b, all -> 0x0058 }
if (r4 <= 0) goto L_0x0024
java.security.MessageDigest r5 = com.lenovo.safecenter.antivirus.utils.SHA1Util.messagedigest // Catch:{ Exception -> 0x0018, OutOfMemoryError -> 0x005b, all -> 0x0058 }
r6 = 0
r5.update(r0, r6, r4) // Catch:{ Exception -> 0x0018, OutOfMemoryError -> 0x005b, all -> 0x0058 }
goto L_0x000b
L_0x0018:
r1 = move-exception
r2 = r3
L_0x001a:
r1.printStackTrace() // Catch:{ all -> 0x004c }
if (r2 == 0) goto L_0x0022
r2.close() // Catch:{ IOException -> 0x0038 }
L_0x0022:
r5 = 0
L_0x0023:
return r5
L_0x0024:
java.security.MessageDigest r5 = com.lenovo.safecenter.antivirus.utils.SHA1Util.messagedigest
byte[] r5 = r5.digest()
java.lang.String r5 = a(r5)
r3.close() // Catch:{ IOException -> 0x0033 }
L_0x0031:
r2 = r3
goto L_0x0023
L_0x0033:
r1 = move-exception
r1.printStackTrace()
goto L_0x0031
L_0x0038:
r1 = move-exception
r1.printStackTrace()
goto L_0x0022
L_0x003d:
r1 = move-exception
L_0x003e:
r1.printStackTrace()
if (r2 == 0) goto L_0x0022
r2.close() // Catch:{ IOException -> 0x0047 }
goto L_0x0022
L_0x0047:
r1 = move-exception
r1.printStackTrace()
goto L_0x0022
L_0x004c:
r5 = move-exception
L_0x004d:
if (r2 == 0) goto L_0x0052
r2.close() // Catch:{ IOException -> 0x0053 }
L_0x0052:
throw r5
L_0x0053:
r1 = move-exception
r1.printStackTrace()
goto L_0x0052
L_0x0058:
r5 = move-exception
r2 = r3
goto L_0x004d
L_0x005b:
r1 = move-exception
r2 = r3
goto L_0x003e
L_0x005e:
r1 = move-exception
goto L_0x001a
*/
throw new UnsupportedOperationException("Method not decompiled: com.lenovo.safecenter.antivirus.utils.SHA1Util.getFileSha1(java.io.File):java.lang.String");
}
}
| 2,758 |
3,084 | <gh_stars>1000+
#include <pch.h>
/*
** Driver TODO: Add code to EvtDeviceOwnershipChange to reset the device state to a default.
**
** PosCx calls this callback to signal that the ownership of the device has transitioned from one file handle
** to another. When this happens, app developers expect that the settings for the device are restored to a
** "default" state, so the driver should do what is necessary to satisfy that expectation here.
**
*/
VOID EvtDeviceOwnershipChange(_In_ WDFDEVICE Device, _In_opt_ WDFFILEOBJECT OldOwnerFileObj, _In_opt_ WDFFILEOBJECT NewOwnerFileObj)
{
UNREFERENCED_PARAMETER(Device);
UNREFERENCED_PARAMETER(OldOwnerFileObj);
UNREFERENCED_PARAMETER(NewOwnerFileObj);
// This function signals that ownership has transitioned from one file handle to another (typically from one app to another).
// As a result, the driver is expected to reset all settings to a default state.
}
/*
** Driver TODO:
**
** This part of the sample demonstrates how the driver will return data to the runtime. How this method would be called depends
** on the driver implementation.
*/
VOID EvtOnBarcodeScanDataRetrieved(_In_ WDFDEVICE Device)
{
// Events that the runtime can handle for a barcode scanner device are:
// PosEventType::BarcodeScannerDataReceived -- the standard event with barcode scan data
// PosEventType::BarcodeScannerErrorOccurred -- an event that should be sent if an error occured while scanning data
// PosEventType::BarcodeScannerImagePreviewReceived -- an event that sends a complete .bmp image to the runtime for imagers
// PosEventType::BarcodeScannerTriggerPressed -- an event indicating that the trigger on the device has been pushed and the device is looking for data
// PosEventType::BarcodeScannerTriggerReleased -- an event indicating that the trigger on the device has been released and the device is no longer looking for data
// PosEventType::StatusUpdated -- an event to indicate changes to power state
//
// Additionally, the following event is sent to the runtime, but is handled entirely by PosCx
// PosEventType::ReleaseDeviceRequested
// The following shows an example of sending barcode scan data
CHAR exampleData[] = "]0A12345";
CHAR exampleDataLabel[] = "12345";
// total size is the struct plus the size of the two strings (minus the null terminators which aren't transmitted).
size_t totalSize = sizeof(PosBarcodeScannerDataReceivedEventData) + sizeof(exampleData) - sizeof(CHAR) + sizeof(exampleDataLabel) - sizeof(CHAR);
// PosCx supports two methods of pending the event data -- one where it takes the WDFMEMORY for the event, and the other where it creates it
// The subtle difference between the two is that the one that takes the WDFMEMORY must have the event header information already added.
//
// Since that's the case with the PosBarcodeScannerDataReceivedEventData data structure, barcode scanner drivers should create the WDFMEMORY objects
// and pass them to PosCx.
WDF_OBJECT_ATTRIBUTES Attributes;
WDF_OBJECT_ATTRIBUTES_INIT(&Attributes);
Attributes.ParentObject = Device;
BYTE* eventData = nullptr;
WDFMEMORY eventMemory = NULL;
NTSTATUS status = WdfMemoryCreate(
&Attributes,
NonPagedPoolNx,
(ULONG)'eSOP',
totalSize,
&eventMemory,
(PVOID*)&eventData
);
if (!NT_SUCCESS(status))
{
// handle out of memory error
return;
}
// Calculate data and label sizes
size_t exampleDataByteCount = sizeof(exampleData) - sizeof(CHAR);
size_t exampleLabelByteCount = sizeof(exampleDataLabel) - sizeof(CHAR);
PosBarcodeScannerDataReceivedEventData* eventHeader = (PosBarcodeScannerDataReceivedEventData*)eventData;
eventHeader->Header.EventType = PosEventType::BarcodeScannerDataReceived;
eventHeader->Header.DataLength = (UINT32)totalSize;
eventHeader->DataType = BarcodeSymbology::Ean13;
eventHeader->ScanDataLength = (UINT32)exampleDataByteCount;
eventHeader->ScanDataLabelLength = (UINT32)exampleLabelByteCount;
// These use memcpy because wcscpy would append the null terminator and the event data doesn't use it
BYTE* eventScanData = eventData + sizeof(PosBarcodeScannerDataReceivedEventData);
memcpy(eventScanData, exampleData, exampleDataByteCount);
BYTE* eventLabelData = eventScanData + exampleDataByteCount;
memcpy(eventLabelData, exampleDataLabel, exampleLabelByteCount);
// This call actually pends the data to be send to the WinRT APIs.
status = PosCxPutPendingEventMemory(Device, SCANNER_INTERFACE_TAG, eventMemory, POS_CX_EVENT_ATTR_DATA);
if (!NT_SUCCESS(status))
{
// This should only happen in rare cases such as out of memory (or that the device or interface tag isn't found). The driver should most likely drop the event.
WdfObjectDelete(eventMemory);
}
} | 1,665 |
500 | /*
* Copyright 2017 The Native Object Protocols Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBNOP_INCLUDE_NOP_RPC_SIMPLE_METHOD_RECEIVER_H_
#define LIBNOP_INCLUDE_NOP_RPC_SIMPLE_METHOD_RECEIVER_H_
#include <tuple>
namespace nop {
// SimpleMethodReceiver is a minimal implementation of the Receiver type
// required by the remote interface supprt in nop/prc/interface.h. This class
// simply deserializes the method selector and argument tuple from the given
// deserializer and serializes the return value using the given serializer,
// leaving all transport-level concerns to the serializer and deserializer type.
template <typename Serializer, typename Deserializer>
class SimpleMethodReceiver {
public:
constexpr SimpleMethodReceiver(Serializer* serializer,
Deserializer* deserializer)
: serializer_{serializer}, deserializer_{deserializer} {}
template <typename MethodSelector>
constexpr Status<void> GetMethodSelector(MethodSelector* method_selector) {
return deserializer_->Read(method_selector);
}
template <typename... Args>
constexpr Status<void> GetArgs(std::tuple<Args...>* args) {
return deserializer_->Read(args);
}
template <typename Return>
constexpr Status<void> SendReturn(const Return& return_value) {
return serializer_->Write(return_value);
}
constexpr const Serializer& serializer() const { return *serializer_; }
constexpr Serializer& serializer() { return *serializer_; }
constexpr const Deserializer& deserializer() const { return *deserializer_; }
constexpr Deserializer& deserializer() { return *deserializer_; }
private:
Serializer* serializer_;
Deserializer* deserializer_;
};
template <typename Serializer, typename Deserializer>
SimpleMethodReceiver<Serializer, Deserializer> MakeSimpleMethodReceiver(
Serializer* serializer, Deserializer* deserializer) {
return {serializer, deserializer};
}
} // namespace nop
#endif // LIBNOP_INCLUDE_NOP_RPC_SIMPLE_METHOD_RECEIVER_H_
| 788 |
1,909 | <reponame>grmkris/XChange
package org.knowm.xchange.examples.gateio.marketdata;
import java.io.IOException;
import org.knowm.xchange.Exchange;
import org.knowm.xchange.ExchangeFactory;
import org.knowm.xchange.ExchangeSpecification;
import org.knowm.xchange.gateio.GateioExchange;
public class GateioMetaDataDemo {
public static void main(String[] args) throws IOException {
ExchangeSpecification exSpec = new GateioExchange().getDefaultExchangeSpecification();
exSpec.setShouldLoadRemoteMetaData(true);
Exchange exchange = ExchangeFactory.INSTANCE.createExchange(exSpec);
System.out.println(exchange.getExchangeSpecification().isShouldLoadRemoteMetaData());
System.out.println(exchange.getExchangeMetaData().toString());
System.out.println(exchange.getExchangeSymbols());
}
}
| 259 |
20,295 | /**
* @file
* @brief [Travelling Salesman Problem]
* (https://en.wikipedia.org/wiki/Travelling_salesman_problem) implementation
*
* @author [<NAME>](http://github.com/Mayank17M)
*
* @details
* Travelling salesman problem asks:
* Given a list of cities and the distances between each pair of cities, what is
* the shortest possible route that visits each city exactly once and returns to
* the origin city?
* TSP can be modeled as an undirected weighted graph, such that cities are the
* graph's vertices, paths are the graph's edges, and a path's distance is the
* edge's weight. It is a minimization problem starting and finishing at a
* specified vertex after having visited each other vertex exactly once.
* This is the naive implementation of the problem.
*/
#include <algorithm> /// for std::min
#include <cassert> /// for assert
#include <iostream> /// for IO operations
#include <limits> /// for limits of integral types
#include <vector> /// for std::vector
/**
* @namespace graph
* @brief Graph Algorithms
*/
namespace graph {
/**
* @brief Function calculates the minimum path distance that will cover all the
* cities starting from the source.
*
* @param cities matrix representation of cities
* @param src Point from where salesman is starting
* @param V number of vertices in the graph
*
*/
int TravellingSalesmanProblem(std::vector<std::vector<uint32_t>> *cities,
int32_t src, uint32_t V) {
//// vtx stores the vertexs of the graph
std::vector<uint32_t> vtx;
for (uint32_t i = 0; i < V; i++) {
if (i != src) {
vtx.push_back(i);
}
}
//// store minimum weight Hamiltonian Cycle.
int32_t min_path = 2147483647;
do {
//// store current Path weight(cost)
int32_t curr_weight = 0;
//// compute current path weight
int k = src;
for (int i : vtx) {
curr_weight += (*cities)[k][i];
k = i;
}
curr_weight += (*cities)[k][src];
//// update minimum
min_path = std::min(min_path, curr_weight);
} while (next_permutation(vtx.begin(), vtx.end()));
return min_path;
}
} // namespace graph
/**
* @brief Self-test implementations
* @returns void
*/
static void tests() {
std::cout << "Initiatinig Predefined Tests..." << std::endl;
std::cout << "Initiating Test 1..." << std::endl;
std::vector<std::vector<uint32_t>> cities = {
{0, 20, 42, 35}, {20, 0, 30, 34}, {42, 30, 0, 12}, {35, 34, 12, 0}};
uint32_t V = cities.size();
assert(graph::TravellingSalesmanProblem(&cities, 0, V) == 97);
std::cout << "1st test passed..." << std::endl;
std::cout << "Initiating Test 2..." << std::endl;
cities = {{0, 5, 10, 15}, {5, 0, 20, 30}, {10, 20, 0, 35}, {15, 30, 35, 0}};
V = cities.size();
assert(graph::TravellingSalesmanProblem(&cities, 0, V) == 75);
std::cout << "2nd test passed..." << std::endl;
std::cout << "Initiating Test 3..." << std::endl;
cities = {
{0, 10, 15, 20}, {10, 0, 35, 25}, {15, 35, 0, 30}, {20, 25, 30, 0}};
V = cities.size();
assert(graph::TravellingSalesmanProblem(&cities, 0, V) == 80);
std::cout << "3rd test passed..." << std::endl;
}
/**
* @brief Main function
* @returns 0 on exit
*/
int main() {
tests(); // run self-test implementations
std::vector<std::vector<uint32_t>> cities = {
{0, 5, 10, 15}, {5, 0, 20, 30}, {10, 20, 0, 35}, {15, 30, 35, 0}};
uint32_t V = cities.size();
std::cout << graph::TravellingSalesmanProblem(&cities, 0, V) << std::endl;
return 0;
}
| 1,405 |
14,668 | <filename>chrome/android/java/src/org/chromium/chrome/browser/compositor/layouts/components/LayoutTab.java
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.compositor.layouts.components;
import android.content.Context;
import android.content.res.Resources;
import android.graphics.Color;
import android.graphics.RectF;
import org.chromium.base.MathUtils;
import org.chromium.chrome.browser.tab.Tab;
import org.chromium.ui.modelutil.PropertyKey;
import org.chromium.ui.modelutil.PropertyModel;
/**
* {@link LayoutTab} is used to keep track of a thumbnail's bitmap and position and to
* draw itself onto the GL canvas at the desired Y Offset.
*/
public class LayoutTab extends PropertyModel {
public static final float ALPHA_THRESHOLD = 1.0f / 255.0f;
// TODO(crbug.com/1070284): Make the following properties be part of the PropertyModel.
// Begin section --------------
// Public Layout constants.
public static final float CLOSE_BUTTON_WIDTH_DP = 36.f;
public static final float SHADOW_ALPHA_ON_LIGHT_BG = 0.8f;
public static final float SHADOW_ALPHA_ON_DARK_BG = 1.0f;
public static float sDpToPx;
private static float sPxToDp;
// End section --------------
// TODO(crbug.com/1070284): Maybe make this a ReadableIntPropertyKey
public static final WritableIntPropertyKey TAB_ID = new WritableIntPropertyKey();
// TODO(crbug.com/1070284): Maybe make this a ReadableIntPropertyKey
public static final WritableBooleanPropertyKey IS_INCOGNITO = new WritableBooleanPropertyKey();
// Fields initialized in init()
public static final WritableFloatPropertyKey SCALE = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey TILT_X_IN_DEGREES = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey TILT_Y_IN_DEGREES = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey TILT_X_PIVOT_OFFSET =
new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey TILT_Y_PIVOT_OFFSET =
new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey X = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey Y = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey RENDER_X = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey RENDER_Y = new WritableFloatPropertyKey();
// The top left X offset of the clipped rectangle.
public static final WritableFloatPropertyKey CLIPPED_X = new WritableFloatPropertyKey();
// The top left Y offset of the clipped rectangle.
public static final WritableFloatPropertyKey CLIPPED_Y = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey CLIPPED_WIDTH = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey CLIPPED_HEIGHT = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey ALPHA = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey SATURATION = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey BORDER_ALPHA = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey BORDER_CLOSE_BUTTON_ALPHA =
new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey BORDER_SCALE = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey ORIGINAL_CONTENT_WIDTH_IN_DP =
new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey ORIGINAL_CONTENT_HEIGHT_IN_DP =
new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey MAX_CONTENT_WIDTH = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey MAX_CONTENT_HEIGHT =
new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey STATIC_TO_VIEW_BLEND =
new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey BRIGHTNESS = new WritableFloatPropertyKey();
public static final WritableBooleanPropertyKey IS_VISIBLE = new WritableBooleanPropertyKey();
public static final WritableBooleanPropertyKey SHOULD_STALL = new WritableBooleanPropertyKey();
public static final WritableBooleanPropertyKey CAN_USE_LIVE_TEXTURE =
new WritableBooleanPropertyKey();
public static final WritableBooleanPropertyKey SHOW_TOOLBAR = new WritableBooleanPropertyKey();
public static final WritableBooleanPropertyKey ANONYMIZE_TOOLBAR =
new WritableBooleanPropertyKey();
public static final WritableFloatPropertyKey TOOLBAR_ALPHA = new WritableFloatPropertyKey();
public static final WritableBooleanPropertyKey INSET_BORDER_VERTICAL =
new WritableBooleanPropertyKey();
public static final WritableFloatPropertyKey TOOLBAR_Y_OFFSET = new WritableFloatPropertyKey();
public static final WritableFloatPropertyKey SIDE_BORDER_SCALE = new WritableFloatPropertyKey();
public static final WritableBooleanPropertyKey CLOSE_BUTTON_IS_ON_RIGHT =
new WritableBooleanPropertyKey();
public static final WritableObjectPropertyKey<RectF> BOUNDS = new WritableObjectPropertyKey<>();
public static final WritableObjectPropertyKey<RectF> CLOSE_PLACEMENT =
new WritableObjectPropertyKey<>();
/** Whether we need to draw the decoration (border, shadow, ..) at all. */
public static final WritableFloatPropertyKey DECORATION_ALPHA = new WritableFloatPropertyKey();
/**
* Whether this tab need to have its title texture generated. As this is not a free operation
* knowing that we won't show it might save a few cycles and memory.
*/
public static final WritableBooleanPropertyKey IS_TITLE_NEEDED =
new WritableBooleanPropertyKey();
/**
* Whether initFromHost() has been called since the last call to init().
*/
public static final WritableBooleanPropertyKey INIT_FROM_HOST_CALLED =
new WritableBooleanPropertyKey();
// All the members bellow are initialized from the delayed initialization.
//
// Begin section --------------
/** The color of the background of the tab. Used as the best approximation to fill in. */
public static final WritableIntPropertyKey BACKGROUND_COLOR = new WritableIntPropertyKey();
public static final WritableIntPropertyKey TOOLBAR_BACKGROUND_COLOR =
new WritableIntPropertyKey();
public static final WritableIntPropertyKey TEXT_BOX_BACKGROUND_COLOR =
new WritableIntPropertyKey();
public static final WritableFloatPropertyKey TEXT_BOX_ALPHA = new WritableFloatPropertyKey();
// End section --------------
public static final PropertyModel.WritableFloatPropertyKey CONTENT_OFFSET =
new PropertyModel.WritableFloatPropertyKey();
public static final PropertyKey[] ALL_KEYS = new PropertyKey[] {TAB_ID, IS_INCOGNITO, SCALE,
TILT_X_IN_DEGREES, TILT_Y_IN_DEGREES, TILT_X_PIVOT_OFFSET, TILT_Y_PIVOT_OFFSET, X, Y,
RENDER_X, RENDER_Y, CLIPPED_X, CLIPPED_Y, CLIPPED_WIDTH, CLIPPED_HEIGHT, ALPHA,
SATURATION, BORDER_ALPHA, BORDER_CLOSE_BUTTON_ALPHA, BORDER_SCALE,
ORIGINAL_CONTENT_WIDTH_IN_DP, ORIGINAL_CONTENT_HEIGHT_IN_DP, MAX_CONTENT_WIDTH,
MAX_CONTENT_HEIGHT, STATIC_TO_VIEW_BLEND, BRIGHTNESS, IS_VISIBLE, SHOULD_STALL,
CAN_USE_LIVE_TEXTURE, SHOW_TOOLBAR, ANONYMIZE_TOOLBAR, TOOLBAR_ALPHA,
INSET_BORDER_VERTICAL, TOOLBAR_Y_OFFSET, SIDE_BORDER_SCALE, CLOSE_BUTTON_IS_ON_RIGHT,
BOUNDS, CLOSE_PLACEMENT, DECORATION_ALPHA, IS_TITLE_NEEDED, INIT_FROM_HOST_CALLED,
BACKGROUND_COLOR, TOOLBAR_BACKGROUND_COLOR, TEXT_BOX_BACKGROUND_COLOR, TEXT_BOX_ALPHA,
CONTENT_OFFSET};
/**
* Default constructor for a {@link LayoutTab}.
*
* @param tabId The id of the source {@link Tab}.
* @param isIncognito Whether the tab in the in the incognito stack.
* @param maxContentTextureWidth The maximum width for drawing the content in px.
* @param maxContentTextureHeight The maximum height for drawing the content in px.
* @param showCloseButton Whether a close button should be displayed in the corner.
* @param isTitleNeeded Whether that tab need a title texture. This is an
* optimization to save cycles and memory. This is
* ignored if the title texture is already set.
*/
public LayoutTab(int tabId, boolean isIncognito, int maxContentTextureWidth,
int maxContentTextureHeight, boolean showCloseButton, boolean isTitleNeeded) {
super(ALL_KEYS);
set(TAB_ID, tabId);
set(IS_INCOGNITO, isIncognito);
set(BOUNDS, new RectF());
set(CLOSE_PLACEMENT, new RectF());
set(BACKGROUND_COLOR, Color.WHITE);
set(TOOLBAR_BACKGROUND_COLOR, 0xfff2f2f2);
set(TEXT_BOX_BACKGROUND_COLOR, Color.WHITE);
set(TEXT_BOX_ALPHA, 1.0f);
init(maxContentTextureWidth, maxContentTextureHeight, showCloseButton, isTitleNeeded);
}
/**
* Initializes a {@link LayoutTab} to its default value so it can be reused.
*
* @param maxContentTextureWidth The maximum width of the page content in px.
* @param maxContentTextureHeight The maximum height of the page content in px.
* @param showCloseButton Whether to show the close button on the tab border.
* @param isTitleNeeded Whether that tab need a title texture. This is an
* optimization to save cycles and memory. This is
* ignored if the title texture is already set.
*/
public void init(int maxContentTextureWidth, int maxContentTextureHeight,
boolean showCloseButton, boolean isTitleNeeded) {
set(ALPHA, 1.0f);
set(SATURATION, 1.0f);
set(BRIGHTNESS, 1.0f);
set(BORDER_ALPHA, 1.0f);
set(BORDER_CLOSE_BUTTON_ALPHA, showCloseButton ? 1.f : 0.f);
set(BORDER_SCALE, 1.0f);
set(CLIPPED_X, 0.0f);
set(CLIPPED_Y, 0.0f);
set(CLIPPED_WIDTH, Float.MAX_VALUE);
set(CLIPPED_HEIGHT, Float.MAX_VALUE);
set(SCALE, 1.0f);
set(TILT_X_IN_DEGREES, 0.0f);
set(TILT_Y_IN_DEGREES, 0.0f);
set(IS_VISIBLE, true);
set(X, 0.0f);
set(Y, 0.0f);
set(RENDER_X, 0.0f);
set(RENDER_Y, 0.0f);
set(STATIC_TO_VIEW_BLEND, 0.0f);
set(DECORATION_ALPHA, 1.0f);
set(IS_TITLE_NEEDED, isTitleNeeded);
set(CAN_USE_LIVE_TEXTURE, true);
set(SHOW_TOOLBAR, false);
set(ANONYMIZE_TOOLBAR, false);
set(TOOLBAR_ALPHA, 1.0f);
set(INSET_BORDER_VERTICAL, false);
set(TOOLBAR_Y_OFFSET, 0.f);
set(SIDE_BORDER_SCALE, 1.f);
set(ORIGINAL_CONTENT_WIDTH_IN_DP, maxContentTextureWidth * sPxToDp);
set(ORIGINAL_CONTENT_HEIGHT_IN_DP, maxContentTextureHeight * sPxToDp);
set(MAX_CONTENT_WIDTH, maxContentTextureWidth * sPxToDp);
set(MAX_CONTENT_HEIGHT, maxContentTextureHeight * sPxToDp);
set(INIT_FROM_HOST_CALLED, false);
}
/**
* Initializes the {@link LayoutTab} from data extracted from a {@link Tab}.
* As this function may be expensive and can be delayed we initialize it as a separately.
*
* @param backgroundColor The color of the page background.
* @param fallbackThumbnailId The id of a cached thumbnail to show if the current
* thumbnail is unavailable, or {@link Tab.INVALID_TAB_ID}
* if none exists.
* @param shouldStall Whether the tab should display a desaturated thumbnail and
* wait for the content layer to load.
* @param canUseLiveTexture Whether the tab can use a live texture when being displayed.
*/
public void initFromHost(int backgroundColor, boolean shouldStall, boolean canUseLiveTexture,
int toolbarBackgroundColor, int textBoxBackgroundColor, float textBoxAlpha) {
set(BACKGROUND_COLOR, backgroundColor);
set(TOOLBAR_BACKGROUND_COLOR, toolbarBackgroundColor);
set(TEXT_BOX_BACKGROUND_COLOR, textBoxBackgroundColor);
set(TEXT_BOX_ALPHA, textBoxAlpha);
set(SHOULD_STALL, shouldStall);
set(CAN_USE_LIVE_TEXTURE, canUseLiveTexture);
set(INIT_FROM_HOST_CALLED, true);
}
/**
* @return Whether {@link #initFromHost} needs to be called on this {@link LayoutTab}.
*/
public boolean isInitFromHostNeeded() {
return !get(INIT_FROM_HOST_CALLED);
}
/**
* Helper function that gather the static constants from values/dimens.xml.
*
* @param context The Android Context.
*/
public static void resetDimensionConstants(Context context) {
Resources res = context.getResources();
sDpToPx = res.getDisplayMetrics().density;
sPxToDp = 1.0f / sDpToPx;
}
/**
* @return The scale applied to the the content of the tab. Default is 1.0.
*/
public float getScale() {
return get(SCALE);
}
/**
* @param scale The scale to apply on the content of the tab (everything except the borders).
*/
public void setScale(float scale) {
set(SCALE, scale);
}
/**
* @param tilt The tilt angle around the X axis of the tab in degree.
* @param pivotOffset The offset of the X axis of the tilt pivot.
*/
public void setTiltX(float tilt, float pivotOffset) {
set(TILT_X_IN_DEGREES, tilt);
set(TILT_X_PIVOT_OFFSET, pivotOffset);
}
/**
* @return The tilt angle around the X axis of the tab in degree.
*/
public float getTiltX() {
return get(TILT_X_IN_DEGREES);
}
/**
* @return The offset of the X axis of the tilt pivot.
*/
public float getTiltXPivotOffset() {
return get(TILT_X_PIVOT_OFFSET);
}
/**
* @param tilt The tilt angle around the Y axis of the tab in degree.
* @param pivotOffset The offset of the Y axis of the tilt pivot.
*/
public void setTiltY(float tilt, float pivotOffset) {
set(TILT_Y_IN_DEGREES, tilt);
set(TILT_Y_PIVOT_OFFSET, pivotOffset);
}
/**
* @return The tilt angle around the Y axis of the tab in degree.
*/
public float getTiltY() {
return get(TILT_Y_IN_DEGREES);
}
/**
* @return The offset of the Y axis of the tilt pivot.
*/
public float getTiltYPivotOffset() {
return get(TILT_Y_IN_DEGREES);
}
/**
* Set the clipping offset. This apply on the scaled content.
*
* @param clippedX The top left X offset of the clipped rectangle.
* @param clippedY The top left Y offset of the clipped rectangle.
*/
public void setClipOffset(float clippedX, float clippedY) {
set(CLIPPED_X, clippedX);
set(CLIPPED_Y, clippedY);
}
/**
* Set the clipping sizes. This apply on the scaled content.
*
* @param clippedWidth The width of the clipped rectangle. Float.MAX_VALUE for no clipping.
* @param clippedHeight The height of the clipped rectangle. Float.MAX_VALUE for no clipping.
*/
public void setClipSize(float clippedWidth, float clippedHeight) {
set(CLIPPED_WIDTH, clippedWidth);
set(CLIPPED_HEIGHT, clippedHeight);
}
/**
* @return The top left X offset of the clipped rectangle.
*/
public float getClippedX() {
return get(CLIPPED_X);
}
/**
* @return The top left Y offset of the clipped rectangle.
*/
public float getClippedY() {
return get(CLIPPED_Y);
}
/**
* @return The width of the clipped rectangle. Float.MAX_VALUE for no clipping.
*/
public float getClippedWidth() {
return get(CLIPPED_WIDTH);
}
/**
* @return The height of the clipped rectangle. Float.MAX_VALUE for no clipping.
*/
public float getClippedHeight() {
return get(CLIPPED_HEIGHT);
}
/**
* @return The maximum drawable width (scaled) of the tab contents in dp.
*/
public float getScaledContentWidth() {
return getOriginalContentWidth() * get(SCALE);
}
/**
* @return The maximum drawable height (scaled) of the tab contents in dp.
*/
public float getScaledContentHeight() {
return getOriginalContentHeight() * get(SCALE);
}
/**
* @return The maximum drawable width (not scaled) of the tab contents texture.
*/
public float getOriginalContentWidth() {
return Math.min(get(ORIGINAL_CONTENT_WIDTH_IN_DP), get(MAX_CONTENT_WIDTH));
}
/**
* @return The maximum drawable height (not scaled) of the tab contents texture.
*/
public float getOriginalContentHeight() {
return Math.min(get(ORIGINAL_CONTENT_HEIGHT_IN_DP), get(MAX_CONTENT_HEIGHT));
}
/**
* @return The original unclamped width (not scaled) of the tab contents texture.
*/
public float getUnclampedOriginalContentHeight() {
return get(ORIGINAL_CONTENT_HEIGHT_IN_DP);
}
/**
* @return The width of the drawn content (clipped and scaled).
*/
public float getFinalContentWidth() {
return Math.min(get(CLIPPED_WIDTH), getScaledContentWidth());
}
/**
* @return The height of the drawn content (clipped and scaled).
*/
public float getFinalContentHeight() {
return Math.min(get(CLIPPED_HEIGHT), getScaledContentHeight());
}
/**
* @return The maximum width the content can be.
*/
public float getMaxContentWidth() {
return get(MAX_CONTENT_WIDTH);
}
/**
* @return The maximum height the content can be.
*/
public float getMaxContentHeight() {
return get(MAX_CONTENT_HEIGHT);
}
/**
* @param width The maximum width the content can be.
*/
public void setMaxContentWidth(float width) {
set(MAX_CONTENT_WIDTH, width);
}
/**
* @param height The maximum height the content can be.
*/
public void setMaxContentHeight(float height) {
set(MAX_CONTENT_HEIGHT, height);
}
/**
* @return The id of the tab, same as the id from the Tab in TabModel.
*/
public int getId() {
return get(TAB_ID);
}
/**
* @return Whether the underlying tab is incognito or not.
*/
public boolean isIncognito() {
return get(IS_INCOGNITO);
}
/**
* @param y The vertical draw position.
*/
public void setY(float y) {
set(Y, y);
}
/**
* @return The vertical draw position for the update logic.
*/
public float getY() {
return get(Y);
}
/**
* @return The vertical draw position for the renderer.
*/
public float getRenderY() {
return get(RENDER_Y);
}
/**
* @param x The horizontal draw position.
*/
public void setX(float x) {
set(X, x);
}
/**
* @return The horizontal draw position for the update logic.
*/
public float getX() {
return get(X);
}
/**
* @return The horizontal draw position for the renderer.
*/
public float getRenderX() {
return get(RENDER_X);
}
/**
* Set the transparency value for all of the tab (the contents,
* border, etc...). For components that allow specifying
* their own alpha values, it will use the min of these two fields.
*
* @param f The transparency value for the tab.
*/
public void setAlpha(float f) {
set(ALPHA, f);
}
/**
* @return The transparency value for all of the tab components.
*/
public float getAlpha() {
return get(ALPHA);
}
/**
* Set the saturation value for the tab contents.
*
* @param f The saturation value for the contents.
*/
public void setSaturation(float f) {
set(SATURATION, f);
}
/**
* @return The saturation value for the tab contents.
*/
public float getSaturation() {
return get(SCALE);
}
/**
* @param alpha The maximum alpha value of the tab border.
*/
public void setBorderAlpha(float alpha) {
set(BORDER_ALPHA, alpha);
}
/**
* @return The current alpha value at which the tab border is drawn.
*/
public float getBorderAlpha() {
return Math.min(get(BORDER_ALPHA), get(ALPHA));
}
/**
* @return The current alpha value at which the tab border inner shadow is drawn.
*/
public float getBorderInnerShadowAlpha() {
return Math.min(get(BORDER_ALPHA) * (1.0f - get(TOOLBAR_ALPHA)), get(ALPHA));
}
/**
* @param alpha The maximum alpha value of the close button on the border.
*/
public void setBorderCloseButtonAlpha(float alpha) {
set(BORDER_CLOSE_BUTTON_ALPHA, alpha);
}
/**
* @return The current alpha value at which the close button on the border is drawn.
*/
public float getBorderCloseButtonAlpha() {
return get(BORDER_CLOSE_BUTTON_ALPHA);
}
/**
* @param scale The scale factor of the border.
* 1.0f yields 1:1 pixel with the source image.
*/
public void setBorderScale(float scale) {
set(BORDER_SCALE, scale);
}
/**
* @return The current scale applied on the tab border.
*/
public float getBorderScale() {
return get(BORDER_SCALE);
}
/**
* @param decorationAlpha Whether or not to draw the decoration for this card.
*/
public void setDecorationAlpha(float decorationAlpha) {
set(DECORATION_ALPHA, decorationAlpha);
}
/**
* @return The opacity of the decoration.
*/
public float getDecorationAlpha() {
return get(DECORATION_ALPHA);
}
/**
* @param toolbarYOffset The y offset of the toolbar.
*/
public void setToolbarYOffset(float toolbarYOffset) {
set(TOOLBAR_Y_OFFSET, toolbarYOffset);
}
/**
* @return The y offset of the toolbar.
*/
public float getToolbarYOffset() {
return get(TOOLBAR_Y_OFFSET);
}
/**
* @param scale The scale of the side border (from 0 to 1).
*/
public void setSideBorderScale(float scale) {
set(SIDE_BORDER_SCALE, MathUtils.clamp(scale, 0.f, 1.f));
}
/**
* @return The scale of the side border (from 0 to 1).
*/
public float getSideBorderScale() {
return get(SIDE_BORDER_SCALE);
}
/**
* @param brightness The brightness value to apply to the tab.
*/
public void setBrightness(float brightness) {
set(BRIGHTNESS, brightness);
}
/**
* @return The brightness of the tab.
*/
public float getBrightness() {
return get(BRIGHTNESS);
}
/**
* @param drawDecoration Whether or not to draw decoration.
*/
public void setDrawDecoration(boolean drawDecoration) {
set(DECORATION_ALPHA, drawDecoration ? 1.0f : 0.0f);
}
/**
* @param percentageView The blend between the old static tab and the new live one.
*/
public void setStaticToViewBlend(float percentageView) {
set(STATIC_TO_VIEW_BLEND, percentageView);
}
/**
* @return The current blend between the old static tab and the new live one.
*/
public float getStaticToViewBlend() {
return get(STATIC_TO_VIEW_BLEND);
}
@Override
public String toString() {
return Integer.toString(getId());
}
/**
* @param originalContentWidth The maximum content width for the given orientation in px.
* @param originalContentHeight The maximum content height for the given orientation in px.
*/
public void setContentSize(int originalContentWidth, int originalContentHeight) {
set(ORIGINAL_CONTENT_WIDTH_IN_DP, originalContentWidth * sPxToDp);
set(ORIGINAL_CONTENT_HEIGHT_IN_DP, originalContentHeight * sPxToDp);
}
/**
* @return Whether the tab title should be displayed.
*/
public boolean isTitleNeeded() {
return get(IS_TITLE_NEEDED);
}
/**
* @param visible True if the {@link LayoutTab} is visible and need to be drawn.
*/
public void setVisible(boolean visible) {
set(IS_VISIBLE, visible);
}
/**
* @return True if the {@link LayoutTab} is visible and will be drawn.
*/
public boolean isVisible() {
return get(IS_VISIBLE);
}
/**
* @param shouldStall Whether or not the tab should wait for the live layer to load.
*/
public void setShouldStall(boolean shouldStall) {
set(SHOULD_STALL, shouldStall);
}
/**
* @return Whether or not the tab should wait for the live layer to load.
*/
public boolean shouldStall() {
return get(SHOULD_STALL);
}
/**
* @return Whether the tab can use a live texture to render.
*/
public boolean canUseLiveTexture() {
return get(CAN_USE_LIVE_TEXTURE);
}
/**
* @param showToolbar Whether or not to show a toolbar at the top of the content.
*/
public void setShowToolbar(boolean showToolbar) {
set(SHOW_TOOLBAR, showToolbar);
}
/**
* @return Whether or not to show a toolbar at the top of the content.
*/
public boolean showToolbar() {
return get(SHOW_TOOLBAR);
}
/**
* This value is only used if {@link #showToolbar()} is {@code true}.
*
* @param anonymize Whether or not to anonymize the toolbar (hiding URL, etc.).
*/
public void setAnonymizeToolbar(boolean anonymize) {
set(ANONYMIZE_TOOLBAR, anonymize);
}
/**
* This value is only used if {@link #showToolbar()} is {@code true}.
*
* @return Whether or not to anonymize the toolbar (hiding URL, etc.).
*/
public boolean anonymizeToolbar() {
return get(ANONYMIZE_TOOLBAR);
}
/**
* @param alpha The alpha of the toolbar.
*/
public void setToolbarAlpha(float alpha) {
set(TOOLBAR_ALPHA, alpha);
}
/**
* @return The alpha of the toolbar.
*/
public float getToolbarAlpha() {
return get(TOOLBAR_ALPHA);
}
/**
* @param inset Whether or not to inset the top vertical component of the tab border or not.
*/
public void setInsetBorderVertical(boolean inset) {
set(INSET_BORDER_VERTICAL, inset);
}
/**
* @return Whether or not to inset the top vertical component of the tab border or not.
*/
public boolean insetBorderVertical() {
return get(INSET_BORDER_VERTICAL);
}
public void setCloseButtonIsOnRight(boolean closeButtonIsOnRight) {
set(CLOSE_BUTTON_IS_ON_RIGHT, closeButtonIsOnRight);
}
public boolean isCloseButtonOnRight() {
return get(CLOSE_BUTTON_IS_ON_RIGHT);
}
/**
* @return The color of the background of the tab. Used as the best approximation to fill in.
*/
public int getBackgroundColor() {
return get(BACKGROUND_COLOR);
}
/**
* @return The color of the background of the toolbar.
*/
public int getToolbarBackgroundColor() {
return get(TOOLBAR_BACKGROUND_COLOR);
}
/**
* @return The color of the textbox in the toolbar. Used as the color for the anonymize rect.
*/
public int getTextBoxBackgroundColor() {
return get(TEXT_BOX_BACKGROUND_COLOR);
}
/**
* @return The alpha value of the textbox in the toolbar.
*/
public float getTextBoxAlpha() {
return get(TEXT_BOX_ALPHA);
}
}
| 10,995 |
1,350 | <filename>sdk/communication/azure-communication-chat/src/test/java/com/azure/communication/chat/implementation/ChatOptionsProvider.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.communication.chat.implementation;
import com.azure.communication.chat.models.*;
import com.azure.communication.common.CommunicationUserIdentifier;
import java.util.*;
public class ChatOptionsProvider {
public static CreateChatThreadOptions createThreadOptions(String userId1, String userId2) {
CreateChatThreadOptions options = new CreateChatThreadOptions("Test");
options.addParticipant(generateParticipant(
userId1,
"Tester 1"));
options.addParticipant(generateParticipant(
userId2,
"Tester 2"));
return options;
}
public static UpdateChatThreadOptions updateThreadOptions() {
UpdateChatThreadOptions options = new UpdateChatThreadOptions();
options.setTopic("Update Test");
return options;
}
public static Iterable<ChatParticipant> addParticipantsOptions(String userId1, String userId2) {
List<ChatParticipant> participants = new ArrayList<ChatParticipant>();
participants.add(generateParticipant(
userId1,
"Added Tester 1"));
participants.add(generateParticipant(
userId2,
"Added Tester 2"));
return participants;
}
public static SendChatMessageOptions sendMessageOptions() {
return sendMessageOptions(ChatMessageType.TEXT, "Content");
}
public static SendChatMessageOptions sendMessageOptions(ChatMessageType type, String content) {
SendChatMessageOptions options = new SendChatMessageOptions();
options.setContent(content);
options.setSenderDisplayName("Tester");
options.setType(type);
options.setMetadata(generateMessageMetadata());
return options;
}
public static UpdateChatMessageOptions updateMessageOptions() {
UpdateChatMessageOptions options = new UpdateChatMessageOptions();
options.setContent("Update Test");
options.setMetadata(generateUpdatedMessageMetadata());
return options;
}
private static ChatParticipant generateParticipant(String id, String displayName) {
ChatParticipant chatParticipant = new ChatParticipant();
chatParticipant.setCommunicationIdentifier(new CommunicationUserIdentifier(id));
chatParticipant.setDisplayName(displayName);
return chatParticipant;
}
private static Map<String, String> generateMessageMetadata() {
return new HashMap<String, String>() {
{
put("tags", "tags value");
put("deliveryMode", "deliveryMode value");
put("onedriveReferences", "onedriveReferences");
put("amsreferences", "[\\\"test url file 3\\\"]");
put("key", "value key");
}
};
}
private static Map<String, String> generateUpdatedMessageMetadata() {
return new HashMap<String, String>() {
{
put("tags", "");
put("deliveryMode", "deliveryMode value - updated");
put("onedriveReferences", "onedriveReferences - updated");
put("amsreferences", "[\\\"test url file 3\\\"]");
}
};
}
}
| 1,301 |
3,705 | <reponame>zaltoprofen/chainer
#pragma once
namespace chainerx {
class Kernel {
public:
Kernel() = default;
virtual ~Kernel() = default;
Kernel(const Kernel&) = delete;
Kernel(Kernel&&) = delete;
Kernel& operator=(const Kernel&) = delete;
Kernel& operator=(Kernel&&) = delete;
};
} // namespace chainerx
| 124 |
7,471 | #include "mvDrag.h"
#include <utility>
#include "mvContext.h"
#include "dearpygui.h"
#include <string>
#include "mvItemRegistry.h"
#include "mvPythonExceptions.h"
#include "AppItems/fonts/mvFont.h"
#include "AppItems/themes/mvTheme.h"
#include "AppItems/containers/mvDragPayload.h"
#include "mvPyObject.h"
#include "AppItems/widget_handlers/mvItemHandlerRegistry.h"
namespace Marvel {
mvDragFloatMulti::mvDragFloatMulti(mvUUID uuid)
: mvAppItem(uuid)
{
}
void mvDragFloatMulti::applySpecificTemplate(mvAppItem* item)
{
auto titem = static_cast<mvDragFloatMulti*>(item);
if (config.source != 0) _value = titem->_value;
_disabled_value[0] = titem->_disabled_value[0];
_disabled_value[1] = titem->_disabled_value[1];
_disabled_value[2] = titem->_disabled_value[2];
_disabled_value[3] = titem->_disabled_value[3];
_speed = titem->_speed;
_min = titem->_min;
_max = titem->_max;
_format = titem->_format;
_flags = titem->_flags;
_stor_flags = titem->_stor_flags;
_size = titem->_size;
}
PyObject* mvDragFloatMulti::getPyValue()
{
return ToPyFloatList(_value->data(), 4);
}
void mvDragFloatMulti::setPyValue(PyObject* value)
{
std::vector<float> temp = ToFloatVect(value);
while (temp.size() < 4)
temp.push_back(0.0f);
std::array<float, 4> temp_array;
for (size_t i = 0; i < temp_array.size(); i++)
temp_array[i] = temp[i];
if (_value)
*_value = temp_array;
else
_value = std::make_shared<std::array<float, 4>>(temp_array);
}
void mvDragFloatMulti::setDataSource(mvUUID dataSource)
{
if (dataSource == config.source) return;
config.source = dataSource;
mvAppItem* item = GetItem((*GContext->itemRegistry), dataSource);
if (!item)
{
mvThrowPythonError(mvErrorCode::mvSourceNotFound, "set_value",
"Source item not found: " + std::to_string(dataSource), this);
return;
}
if (GetEntityValueType(item->type) != GetEntityValueType(type))
{
mvThrowPythonError(mvErrorCode::mvSourceNotCompatible, "set_value",
"Values types do not match: " + std::to_string(dataSource), this);
return;
}
_value = *static_cast<std::shared_ptr<std::array<float, 4>>*>(item->getValue());
}
void mvDragFloatMulti::draw(ImDrawList* drawlist, float x, float y)
{
//-----------------------------------------------------------------------------
// pre draw
//-----------------------------------------------------------------------------
// show/hide
if (!config.show)
return;
// focusing
if (info.focusNextFrame)
{
ImGui::SetKeyboardFocusHere();
info.focusNextFrame = false;
}
// cache old cursor position
ImVec2 previousCursorPos = ImGui::GetCursorPos();
// set cursor position if user set
if (info.dirtyPos)
ImGui::SetCursorPos(state.pos);
// update widget's position state
state.pos = { ImGui::GetCursorPosX(), ImGui::GetCursorPosY() };
// set item width
if (config.width != 0)
ImGui::SetNextItemWidth((float)config.width);
// set indent
if (config.indent > 0.0f)
ImGui::Indent(config.indent);
// push font if a font object is attached
if (font)
{
ImFont* fontptr = static_cast<mvFont*>(font.get())->getFontPtr();
ImGui::PushFont(fontptr);
}
// themes
apply_local_theming(this);
//-----------------------------------------------------------------------------
// draw
//-----------------------------------------------------------------------------
{
ScopedID id(uuid);
bool activated = false;
if(!config.enabled) std::copy(_value->data(), _value->data() + 2, _disabled_value);
switch (_size)
{
case 2:
activated = ImGui::DragFloat2(info.internalLabel.c_str(), config.enabled ? _value->data() : &_disabled_value[0], _speed, _min, _max, _format.c_str(), _flags);
break;
case 3:
activated = ImGui::DragFloat3(info.internalLabel.c_str(), config.enabled ? _value->data() : &_disabled_value[0], _speed, _min, _max, _format.c_str(), _flags);
break;
case 4:
activated = ImGui::DragFloat4(info.internalLabel.c_str(), config.enabled ? _value->data() : &_disabled_value[0], _speed, _min, _max, _format.c_str(), _flags);
break;
default:
break;
}
if (activated)
{
auto value = *_value;
if(config.alias.empty())
mvSubmitCallback([=]() {
mvAddCallback(getCallback(false), uuid, ToPyFloatList(value.data(), (int)value.size()), config.user_data);
});
else
mvSubmitCallback([=]() {
mvAddCallback(getCallback(false), config.alias, ToPyFloatList(value.data(), (int)value.size()), config.user_data);
});
}
}
//-----------------------------------------------------------------------------
// update state
//-----------------------------------------------------------------------------
UpdateAppItemState(state);
//-----------------------------------------------------------------------------
// post draw
//-----------------------------------------------------------------------------
// set cursor position to cached position
if (info.dirtyPos)
ImGui::SetCursorPos(previousCursorPos);
if (config.indent > 0.0f)
ImGui::Unindent(config.indent);
// pop font off stack
if (font)
ImGui::PopFont();
// handle popping themes
cleanup_local_theming(this);
if (handlerRegistry)
handlerRegistry->checkEvents(&state);
// handle drag & drop if used
apply_drag_drop(this);
}
void mvDragFloatMulti::handleSpecificKeywordArgs(PyObject* dict)
{
if (dict == nullptr)
return;
if (PyObject* item = PyDict_GetItemString(dict, "format")) _format = ToString(item);
if (PyObject* item = PyDict_GetItemString(dict, "speed")) _speed = ToFloat(item);
if (PyObject* item = PyDict_GetItemString(dict, "min_value")) _min = ToFloat(item);
if (PyObject* item = PyDict_GetItemString(dict, "max_value")) _max = ToFloat(item);
if (PyObject* item = PyDict_GetItemString(dict, "size")) _size = ToInt(item);
// helper for bit flipping
auto flagop = [dict](const char* keyword, int flag, int& flags)
{
if (PyObject* item = PyDict_GetItemString(dict, keyword)) ToBool(item) ? flags |= flag : flags &= ~flag;
};
// flags
flagop("clamped", ImGuiSliderFlags_ClampOnInput, _flags);
flagop("clamped", ImGuiSliderFlags_ClampOnInput, _stor_flags);
flagop("no_input", ImGuiSliderFlags_NoInput, _flags);
flagop("no_input", ImGuiSliderFlags_NoInput, _stor_flags);
if (info.enabledLastFrame)
{
info.enabledLastFrame = false;
_flags = _stor_flags;
}
if (info.disabledLastFrame)
{
info.disabledLastFrame = false;
_stor_flags = _flags;
_flags |= ImGuiSliderFlags_NoInput;
}
}
void mvDragFloatMulti::getSpecificConfiguration(PyObject* dict)
{
if (dict == nullptr)
return;
mvPyObject py_format = ToPyString(_format);
mvPyObject py_speed = ToPyFloat(_speed);
mvPyObject py_min_value = ToPyFloat(_min);
mvPyObject py_max_value = ToPyFloat(_max);
mvPyObject py_size = ToPyInt(_size);
PyDict_SetItemString(dict, "format", py_format);
PyDict_SetItemString(dict, "speed", py_speed);
PyDict_SetItemString(dict, "min_value", py_min_value);
PyDict_SetItemString(dict, "max_value", py_max_value);
PyDict_SetItemString(dict, "size", py_size);
// helper to check and set bit
auto checkbitset = [dict](const char* keyword, int flag, const int& flags)
{
mvPyObject py_result = ToPyBool(flags & flag);
PyDict_SetItemString(dict, keyword, py_result);
};
// window flags
checkbitset("clamped", ImGuiSliderFlags_ClampOnInput, _flags);
checkbitset("no_input", ImGuiSliderFlags_NoInput, _flags);
}
mvDragIntMulti::mvDragIntMulti(mvUUID uuid)
: mvAppItem(uuid)
{
}
void mvDragIntMulti::applySpecificTemplate(mvAppItem* item)
{
auto titem = static_cast<mvDragIntMulti*>(item);
if (config.source != 0) _value = titem->_value;
_disabled_value[0] = titem->_disabled_value[0];
_disabled_value[1] = titem->_disabled_value[1];
_disabled_value[2] = titem->_disabled_value[2];
_disabled_value[3] = titem->_disabled_value[3];
_speed = titem->_speed;
_min = titem->_min;
_max = titem->_max;
_format = titem->_format;
_flags = titem->_flags;
_stor_flags = titem->_stor_flags;
_size = titem->_size;
}
PyObject* mvDragIntMulti::getPyValue()
{
return ToPyIntList(_value->data(), 4);
}
void mvDragIntMulti::setPyValue(PyObject* value)
{
std::vector<int> temp = ToIntVect(value);
while (temp.size() < 4)
temp.push_back(0);
std::array<int, 4> temp_array;
for (size_t i = 0; i < temp_array.size(); i++)
temp_array[i] = temp[i];
if (_value)
*_value = temp_array;
else
_value = std::make_shared<std::array<int, 4>>(temp_array);
}
void mvDragIntMulti::setDataSource(mvUUID dataSource)
{
if (dataSource == config.source) return;
config.source = dataSource;
mvAppItem* item = GetItem((*GContext->itemRegistry), dataSource);
if (!item)
{
mvThrowPythonError(mvErrorCode::mvSourceNotFound, "set_value",
"Source item not found: " + std::to_string(dataSource), this);
return;
}
if (GetEntityValueType(item->type) != GetEntityValueType(type))
{
mvThrowPythonError(mvErrorCode::mvSourceNotCompatible, "set_value",
"Values types do not match: " + std::to_string(dataSource), this);
return;
}
_value = *static_cast<std::shared_ptr<std::array<int, 4>>*>(item->getValue());
}
void mvDragIntMulti::draw(ImDrawList* drawlist, float x, float y)
{
//-----------------------------------------------------------------------------
// pre draw
//-----------------------------------------------------------------------------
// show/hide
if (!config.show)
return;
// focusing
if (info.focusNextFrame)
{
ImGui::SetKeyboardFocusHere();
info.focusNextFrame = false;
}
// cache old cursor position
ImVec2 previousCursorPos = ImGui::GetCursorPos();
// set cursor position if user set
if (info.dirtyPos)
ImGui::SetCursorPos(state.pos);
// update widget's position state
state.pos = { ImGui::GetCursorPosX(), ImGui::GetCursorPosY() };
// set item width
if (config.width != 0)
ImGui::SetNextItemWidth((float)config.width);
// set indent
if (config.indent > 0.0f)
ImGui::Indent(config.indent);
// push font if a font object is attached
if (font)
{
ImFont* fontptr = static_cast<mvFont*>(font.get())->getFontPtr();
ImGui::PushFont(fontptr);
}
// themes
apply_local_theming(this);
//-----------------------------------------------------------------------------
// draw
//-----------------------------------------------------------------------------
{
ScopedID id(uuid);
bool activated = false;
if(!config.enabled) std::copy(_value->data(), _value->data() + 2, _disabled_value);
switch (_size)
{
case 2:
activated = ImGui::DragInt2(info.internalLabel.c_str(), config.enabled ? _value->data() : &_disabled_value[0], _speed, _min, _max, _format.c_str(), _flags);
break;
case 3:
activated = ImGui::DragInt3(info.internalLabel.c_str(), config.enabled ? _value->data() : &_disabled_value[0], _speed, _min, _max, _format.c_str(), _flags);
break;
case 4:
activated = ImGui::DragInt4(info.internalLabel.c_str(), config.enabled ? _value->data() : &_disabled_value[0], _speed, _min, _max, _format.c_str(), _flags);
break;
default:
break;
}
if (activated)
{
auto value = *_value;
if(config.alias.empty())
mvSubmitCallback([=]() {
mvAddCallback(getCallback(false), uuid, ToPyIntList(value.data(), (int)value.size()), config.user_data);
});
else
mvSubmitCallback([=]() {
mvAddCallback(getCallback(false), config.alias, ToPyIntList(value.data(), (int)value.size()), config.user_data);
});
}
}
//-----------------------------------------------------------------------------
// update state
//-----------------------------------------------------------------------------
UpdateAppItemState(state);
//-----------------------------------------------------------------------------
// post draw
//-----------------------------------------------------------------------------
// set cursor position to cached position
if (info.dirtyPos)
ImGui::SetCursorPos(previousCursorPos);
if (config.indent > 0.0f)
ImGui::Unindent(config.indent);
// pop font off stack
if (font)
ImGui::PopFont();
// handle popping themes
cleanup_local_theming(this);
if (handlerRegistry)
handlerRegistry->checkEvents(&state);
// handle drag & drop if used
apply_drag_drop(this);
}
void mvDragIntMulti::handleSpecificKeywordArgs(PyObject* dict)
{
if (dict == nullptr)
return;
if (PyObject* item = PyDict_GetItemString(dict, "format")) _format = ToString(item);
if (PyObject* item = PyDict_GetItemString(dict, "speed")) _speed = ToFloat(item);
if (PyObject* item = PyDict_GetItemString(dict, "min_value")) _min = ToInt(item);
if (PyObject* item = PyDict_GetItemString(dict, "max_value")) _max = ToInt(item);
if (PyObject* item = PyDict_GetItemString(dict, "size")) _size = ToInt(item);
// helper for bit flipping
auto flagop = [dict](const char* keyword, int flag, int& flags)
{
if (PyObject* item = PyDict_GetItemString(dict, keyword)) ToBool(item) ? flags |= flag : flags &= ~flag;
};
// flags
flagop("clamped", ImGuiSliderFlags_ClampOnInput, _flags);
flagop("clamped", ImGuiSliderFlags_ClampOnInput, _stor_flags);
flagop("no_input", ImGuiSliderFlags_NoInput, _flags);
flagop("no_input", ImGuiSliderFlags_NoInput, _stor_flags);
if (info.enabledLastFrame)
{
info.enabledLastFrame = false;
_flags = _stor_flags;
}
if (info.disabledLastFrame)
{
info.disabledLastFrame = false;
_stor_flags = _flags;
_flags |= ImGuiSliderFlags_NoInput;
}
}
void mvDragIntMulti::getSpecificConfiguration(PyObject* dict)
{
if (dict == nullptr)
return;
mvPyObject py_format = ToPyString(_format);
mvPyObject py_speed = ToPyFloat(_speed);
mvPyObject py_min_value = ToPyInt(_min);
mvPyObject py_max_value = ToPyInt(_max);
mvPyObject py_size = ToPyInt(_size);
PyDict_SetItemString(dict, "format", py_format);
PyDict_SetItemString(dict, "speed", py_speed);
PyDict_SetItemString(dict, "min_value", py_min_value);
PyDict_SetItemString(dict, "max_value", py_max_value);
PyDict_SetItemString(dict, "size", py_size);
// helper to check and set bit
auto checkbitset = [dict](const char* keyword, int flag, const int& flags)
{
mvPyObject py_result = ToPyBool(flags & flag);
PyDict_SetItemString(dict, keyword, py_result);
};
// window flags
checkbitset("clamped", ImGuiSliderFlags_ClampOnInput, _flags);
checkbitset("no_input", ImGuiSliderFlags_NoInput, _flags);
}
mvDragFloat::mvDragFloat(mvUUID uuid)
: mvAppItem(uuid)
{
}
void mvDragFloat::applySpecificTemplate(mvAppItem* item)
{
auto titem = static_cast<mvDragFloat*>(item);
if (config.source != 0) _value = titem->_value;
_disabled_value = titem->_disabled_value;
_speed = titem->_speed;
_min = titem->_min;
_max = titem->_max;
_format = titem->_format;
_flags = titem->_flags;
_stor_flags = titem->_stor_flags;
}
PyObject* mvDragFloat::getPyValue()
{
return ToPyFloat(*_value);
}
void mvDragFloat::setPyValue(PyObject* value)
{
*_value = ToFloat(value);
}
void mvDragFloat::setDataSource(mvUUID dataSource)
{
if (dataSource == config.source) return;
config.source = dataSource;
mvAppItem* item = GetItem((*GContext->itemRegistry), dataSource);
if (!item)
{
mvThrowPythonError(mvErrorCode::mvSourceNotFound, "set_value",
"Source item not found: " + std::to_string(dataSource), this);
return;
}
if (GetEntityValueType(item->type) != GetEntityValueType(type))
{
mvThrowPythonError(mvErrorCode::mvSourceNotCompatible, "set_value",
"Values types do not match: " + std::to_string(dataSource), this);
return;
}
_value = *static_cast<std::shared_ptr<float>*>(item->getValue());
}
void mvDragFloat::draw(ImDrawList* drawlist, float x, float y)
{
ScopedID id(uuid);
//-----------------------------------------------------------------------------
// pre draw
//-----------------------------------------------------------------------------
// show/hide
if (!config.show)
return;
// focusing
if (info.focusNextFrame)
{
ImGui::SetKeyboardFocusHere();
info.focusNextFrame = false;
}
// cache old cursor position
ImVec2 previousCursorPos = ImGui::GetCursorPos();
// set cursor position if user set
if (info.dirtyPos)
ImGui::SetCursorPos(state.pos);
// update widget's position state
state.pos = { ImGui::GetCursorPosX(), ImGui::GetCursorPosY() };
// set item width
if (config.width != 0)
ImGui::SetNextItemWidth((float)config.width);
// set indent
if (config.indent > 0.0f)
ImGui::Indent(config.indent);
// push font if a font object is attached
if (font)
{
ImFont* fontptr = static_cast<mvFont*>(font.get())->getFontPtr();
ImGui::PushFont(fontptr);
}
// themes
apply_local_theming(this);
//-----------------------------------------------------------------------------
// draw
//-----------------------------------------------------------------------------
{
if(!config.enabled) _disabled_value = *_value;
if (ImGui::DragFloat(info.internalLabel.c_str(), config.enabled ? _value.get() : &_disabled_value, _speed, _min, _max, _format.c_str(), _flags))
{
auto value = *_value;
if(config.alias.empty())
mvSubmitCallback([=]() {
mvAddCallback(getCallback(false), uuid, ToPyFloat(value), config.user_data);
});
else
mvSubmitCallback([=]() {
mvAddCallback(getCallback(false), config.alias, ToPyFloat(value), config.user_data);
});
}
}
//-----------------------------------------------------------------------------
// update state
//-----------------------------------------------------------------------------
UpdateAppItemState(state);
//-----------------------------------------------------------------------------
// post draw
//-----------------------------------------------------------------------------
// set cursor position to cached position
if (info.dirtyPos)
ImGui::SetCursorPos(previousCursorPos);
if (config.indent > 0.0f)
ImGui::Unindent(config.indent);
// pop font off stack
if (font)
ImGui::PopFont();
// handle popping themes
cleanup_local_theming(this);
if (handlerRegistry)
handlerRegistry->checkEvents(&state);
// handle drag & drop if used
apply_drag_drop(this);
}
mvDragInt::mvDragInt(mvUUID uuid)
: mvAppItem(uuid)
{
}
void mvDragInt::applySpecificTemplate(mvAppItem* item)
{
auto titem = static_cast<mvDragInt*>(item);
if (config.source != 0) _value = titem->_value;
_disabled_value = titem->_disabled_value;
_speed = titem->_speed;
_min = titem->_min;
_max = titem->_max;
_format = titem->_format;
_flags = titem->_flags;
_stor_flags = titem->_stor_flags;
}
void mvDragInt::setDataSource(mvUUID dataSource)
{
if (dataSource == config.source) return;
config.source = dataSource;
mvAppItem* item = GetItem((*GContext->itemRegistry), dataSource);
if (!item)
{
mvThrowPythonError(mvErrorCode::mvSourceNotFound, "set_value",
"Source item not found: " + std::to_string(dataSource), this);
return;
}
if (GetEntityValueType(item->type) != GetEntityValueType(type))
{
mvThrowPythonError(mvErrorCode::mvSourceNotCompatible, "set_value",
"Values types do not match: " + std::to_string(dataSource), this);
return;
}
_value = *static_cast<std::shared_ptr<int>*>(item->getValue());
}
PyObject* mvDragInt::getPyValue()
{
return ToPyInt(*_value);
}
void mvDragInt::setPyValue(PyObject* value)
{
*_value = ToInt(value);
}
void mvDragInt::draw(ImDrawList* drawlist, float x, float y)
{
//-----------------------------------------------------------------------------
// pre draw
//-----------------------------------------------------------------------------
// show/hide
if (!config.show)
return;
// focusing
if (info.focusNextFrame)
{
ImGui::SetKeyboardFocusHere();
info.focusNextFrame = false;
}
// cache old cursor position
ImVec2 previousCursorPos = ImGui::GetCursorPos();
// set cursor position if user set
if (info.dirtyPos)
ImGui::SetCursorPos(state.pos);
// update widget's position state
state.pos = { ImGui::GetCursorPosX(), ImGui::GetCursorPosY() };
// set item width
if (config.width != 0)
ImGui::SetNextItemWidth((float)config.width);
// set indent
if (config.indent > 0.0f)
ImGui::Indent(config.indent);
// push font if a font object is attached
if (font)
{
ImFont* fontptr = static_cast<mvFont*>(font.get())->getFontPtr();
ImGui::PushFont(fontptr);
}
// themes
apply_local_theming(this);
//-----------------------------------------------------------------------------
// draw
//-----------------------------------------------------------------------------
{
ScopedID id(uuid);
if(!config.enabled) _disabled_value = *_value;
if (ImGui::DragInt(info.internalLabel.c_str(), config.enabled ? _value.get() : &_disabled_value, _speed, _min, _max, _format.c_str(), _flags))
{
auto value = *_value;
if(config.alias.empty())
mvSubmitCallback([=]() {
mvAddCallback(getCallback(false), uuid, ToPyInt(value), config.user_data);
});
else
mvSubmitCallback([=]() {
mvAddCallback(getCallback(false), config.alias, ToPyInt(value), config.user_data);
});
}
}
//-----------------------------------------------------------------------------
// update state
//-----------------------------------------------------------------------------
UpdateAppItemState(state);
//-----------------------------------------------------------------------------
// post draw
//-----------------------------------------------------------------------------
// set cursor position to cached position
if (info.dirtyPos)
ImGui::SetCursorPos(previousCursorPos);
if (config.indent > 0.0f)
ImGui::Unindent(config.indent);
// pop font off stack
if (font)
ImGui::PopFont();
// handle popping themes
cleanup_local_theming(this);
if (handlerRegistry)
handlerRegistry->checkEvents(&state);
// handle drag & drop if used
apply_drag_drop(this);
}
void mvDragFloat::handleSpecificKeywordArgs(PyObject* dict)
{
if (dict == nullptr)
return;
if (PyObject* item = PyDict_GetItemString(dict, "format")) _format = ToString(item);
if (PyObject* item = PyDict_GetItemString(dict, "speed")) _speed = ToFloat(item);
if (PyObject* item = PyDict_GetItemString(dict, "min_value")) _min = ToFloat(item);
if (PyObject* item = PyDict_GetItemString(dict, "max_value")) _max = ToFloat(item);
// helper for bit flipping
auto flagop = [dict](const char* keyword, int flag, int& flags)
{
if (PyObject* item = PyDict_GetItemString(dict, keyword)) ToBool(item) ? flags |= flag : flags &= ~flag;
};
// flags
flagop("clamped", ImGuiSliderFlags_ClampOnInput, _flags);
flagop("clamped", ImGuiSliderFlags_ClampOnInput, _stor_flags);
flagop("no_input", ImGuiSliderFlags_NoInput, _flags);
flagop("no_input", ImGuiSliderFlags_NoInput, _stor_flags);
if (info.enabledLastFrame)
{
info.enabledLastFrame = false;
_flags = _stor_flags;
}
if (info.disabledLastFrame)
{
info.disabledLastFrame = false;
_stor_flags = _flags;
_flags |= ImGuiSliderFlags_NoInput;
}
}
void mvDragFloat::getSpecificConfiguration(PyObject* dict)
{
if (dict == nullptr)
return;
mvPyObject py_format = ToPyString(_format);
mvPyObject py_speed = ToPyFloat(_speed);
mvPyObject py_min_value = ToPyFloat(_min);
mvPyObject py_max_value = ToPyFloat(_max);
PyDict_SetItemString(dict, "format", py_format);
PyDict_SetItemString(dict, "speed", py_speed);
PyDict_SetItemString(dict, "min_value", py_min_value);
PyDict_SetItemString(dict, "max_value", py_max_value);
// helper to check and set bit
auto checkbitset = [dict](const char* keyword, int flag, const int& flags)
{
mvPyObject py_result = ToPyBool(flags & flag);
PyDict_SetItemString(dict, keyword, py_result);
};
// window flags
checkbitset("clamped", ImGuiSliderFlags_ClampOnInput, _flags);
checkbitset("no_input", ImGuiSliderFlags_NoInput, _flags);
}
void mvDragInt::handleSpecificKeywordArgs(PyObject* dict)
{
if (dict == nullptr)
return;
if (PyObject* item = PyDict_GetItemString(dict, "format")) _format = ToString(item);
if (PyObject* item = PyDict_GetItemString(dict, "speed")) _speed = ToFloat(item);
if (PyObject* item = PyDict_GetItemString(dict, "min_value")) _min = ToInt(item);
if (PyObject* item = PyDict_GetItemString(dict, "max_value")) _max = ToInt(item);
// helper for bit flipping
auto flagop = [dict](const char* keyword, int flag, int& flags)
{
if (PyObject* item = PyDict_GetItemString(dict, keyword)) ToBool(item) ? flags |= flag : flags &= ~flag;
};
// flags
flagop("clamped", ImGuiSliderFlags_ClampOnInput, _flags);
flagop("clamped", ImGuiSliderFlags_ClampOnInput, _stor_flags);
flagop("no_input", ImGuiSliderFlags_NoInput, _flags);
flagop("no_input", ImGuiSliderFlags_NoInput, _stor_flags);
if (info.enabledLastFrame)
{
info.enabledLastFrame = false;
_flags = _stor_flags;
}
if (info.disabledLastFrame)
{
info.disabledLastFrame = false;
_stor_flags = _flags;
_flags |= ImGuiSliderFlags_NoInput;
}
}
void mvDragInt::getSpecificConfiguration(PyObject* dict)
{
if (dict == nullptr)
return;
mvPyObject py_format = ToPyString(_format);
mvPyObject py_speed = ToPyFloat(_speed);
mvPyObject py_min_value = ToPyInt(_min);
mvPyObject py_max_value = ToPyInt(_max);
PyDict_SetItemString(dict, "format", py_format);
PyDict_SetItemString(dict, "speed", py_speed);
PyDict_SetItemString(dict, "min_value", py_min_value);
PyDict_SetItemString(dict, "max_value", py_max_value);
// helper to check and set bit
auto checkbitset = [dict](const char* keyword, int flag, const int& flags)
{
mvPyObject py_result = ToPyBool(flags & flag);
PyDict_SetItemString(dict, keyword, py_result);
};
// window flags
checkbitset("clamped", ImGuiSliderFlags_ClampOnInput, _flags);
checkbitset("no_input", ImGuiSliderFlags_NoInput, _flags);
}
} | 14,244 |
1,585 | /*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2020 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/attribute/attribute.h"
#include "ompi/memchecker.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_Type_dup = PMPI_Type_dup
#endif
#define MPI_Type_dup PMPI_Type_dup
#endif
static const char FUNC_NAME[] = "MPI_Type_dup";
int MPI_Type_dup (MPI_Datatype type,
MPI_Datatype *newtype)
{
MEMCHECKER(
memchecker_datatype(type);
);
if( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (NULL == type || MPI_DATATYPE_NULL == type ||
NULL == newtype) {
return OMPI_ERRHANDLER_NOHANDLE_INVOKE(MPI_ERR_TYPE,
FUNC_NAME );
}
}
if (OMPI_SUCCESS != ompi_datatype_duplicate( type, newtype)) {
ompi_datatype_destroy( newtype );
OMPI_ERRHANDLER_RETURN (MPI_ERR_INTERN, MPI_COMM_WORLD,
MPI_ERR_INTERN, FUNC_NAME );
}
ompi_datatype_set_args( *newtype, 0, NULL, 0, NULL, 1, &type, MPI_COMBINER_DUP );
/* Copy all the old attributes, if there were any. This is done
here (vs. ompi_datatype_duplicate()) because MPI_TYPE_DUP is the
only MPI function that copies attributes. All other MPI
functions that take an old type and generate a newtype do not
copy attributes. Really. */
if (NULL != type->d_keyhash) {
ompi_attr_hash_init(&(*newtype)->d_keyhash);
if (OMPI_SUCCESS != ompi_attr_copy_all(TYPE_ATTR,
type, *newtype,
type->d_keyhash,
(*newtype)->d_keyhash)) {
ompi_datatype_destroy(newtype);
OMPI_ERRHANDLER_NOHANDLE_RETURN( MPI_ERR_INTERN,
MPI_ERR_INTERN, FUNC_NAME );
}
}
return MPI_SUCCESS;
}
| 1,456 |
788 | <gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#import <Foundation/Foundation.h>
enum
{
kUGHTTPGet = 0,
kUGHTTPPost = 1,
kUGHTTPPostAuth = 2,
kUGHTTPPut = 3,
kUGHTTPDelete = 4
};
@interface UGHTTPManager : NSObject
// blocks until a response is received, or until there's an error.
// in the event of a response, it's returned. If there's an error,
// the funciton returns nil and you can call getLastError to see what
// went wrong.
-(NSString *)syncTransaction:(NSString *)url operation:(int)op operationData:(NSString *)opData;
// sets up the transaction asynchronously. The delegate that's sent in
// must have the following functions:
//
// -(void)httpManagerError:(UGHTTPManager *)manager error:(NSString *)error
// -(void)httpManagerResponse:(UGHTTPManager *)manager response:(NSString *)response
//
// In all cases, it returns a transaction ID. A return value
// of -1 means there was an error.
// You can call getLastError to find out what went wrong.
-(int)asyncTransaction:(NSString *)url operation:(int)op operationData:(NSString *)opData delegate:(id)delegate;
// get the current transactionID
-(int)getTransactionID;
// sets the auth key
-(void)setAuth: (NSString *)auth;
// cancel a pending transaction. The delegate will not be called and the results
// will be ignored. Though the server side will still have happened.
-(void)cancel;
// returns YES if this instance is available. NO if this instance is currently
// in use as part of an asynchronous transaction.
-(BOOL)isAvailable;
// sets the availability flag of this instance. This is done by UGClient
-(void)setAvailable:(BOOL)available;
// a helpful utility function to make a string comform to URL
// rules. It will escape all the special characters.
+(NSString *)escapeSpecials:(NSString *)raw;
// At all times, this will return the plain-text explanation of the last
// thing that went wrong. It is cleared to "No Error" at the beginnign of
// each new transaction.
-(NSString *)getLastError;
@end
| 760 |
815 | <reponame>xj361685640/ParaView<filename>Plugins/ZSpace/zSpace/vtkPVZSpaceView.h<gh_stars>100-1000
/*=========================================================================
Program: ParaView
Module: vtkPVZSpaceView.h
Copyright (c) Kitware, Inc.
All rights reserved.
See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
/**
* @class vtkPVZSpaceView
*
* vtkPVZSpaceView extends vtkPVRenderView to render the scene using Crystal Eyes
* stereo and interact with it through a zSpace device.
* The zSpace device is composed of a high definition 3D display coupled with
* an advanced mixed reality tracking system and angle-awareness sensor.
*
* This view is designed to work on full screen (or in a cave display).
*
* This only works on Windows as the zSpace SDK is only available on this OS.
*
* This class uses the singleton class vtkZSpaceSDKManager object to communicate with the zSpace SDK
* :
* - get the camera view and projection matrix from the zSpace sdk and give it to the
* actual camera
* - handle interactions by getting stylus state from zSpace sdk and invoking event for the zSpace
* interactor style.
* Notice that the interactor style responds to a Button3DEvent or Move3DEvent, so
* the EventDataDevice3D device, input and action need to be set for each action.
* Here is the list of associations ( in the format Device + Input + Action ) :
* - MiddleButton of the stylus : GenericTracker + Trigger + Press/Release
* - RightButton of the stylus : RightController + Trigger + Press/Release
* - LeftButton of the stylus : LeftController + Trigger + Press/Release
*
* Please refer to vtkZSpaceInteractorStyle to know what are the possible interactions.
*
* Notice that this PVView stores the current/last world event position and orientation
* to simulate the RenderWindowInteractor3D behavior, as ParaView doesn't support
* a RenderWindowInteractor3D for instance. This should be removed when this feature is supported.
*/
#ifndef vtkPVZSpaceView_h
#define vtkPVZSpaceView_h
#include "vtkDataObject.h" // for vtkDataObject enums
#include "vtkNew.h" // for vtkNew
#include "vtkPVRenderView.h"
#include "vtkZSpaceViewModule.h" // for export macro
class vtkZSpaceInteractorStyle;
class vtkZSpaceSDKManager;
class vtkZSpaceRayActor;
class vtkProp3D;
class vtkEventDataDevice3D;
class vtkZSpaceCamera;
class VTKZSPACEVIEW_EXPORT vtkPVZSpaceView : public vtkPVRenderView
{
public:
static vtkPVZSpaceView* New();
vtkTypeMacro(vtkPVZSpaceView, vtkPVRenderView);
void PrintSelf(ostream& os, vtkIndent indent) override;
/**
* Called when a ResetCameraEvent is fired. Call zSpaceSDKManager::CalculateFit
* to update the viewer scale, near and far plane given by the zSpace SDK.
*/
void ResetCamera();
void ResetCamera(double bounds[6]);
//@}
/**
* Overriden to give to the Interactor the zSpaceInteractorStyle.
*/
void SetInteractionMode(int mode) override;
/**
* Set the physical distance between eyes on the glasses
* Delegate to zSpace SDK manager.
* Default is 0.056f.
*/
void SetInterPupillaryDistance(float);
/**
* If true, perform picking every frame
* Delegate to zSpaceInteractorStyle.
* Default is true.
*/
void SetInteractivePicking(bool);
/**
* Draw or not the ray stylus
* Delegate to zSpace renderer.
* Default is true.
*/
void SetDrawStylus(bool);
/**
* Perform an hardware picking with a ray defined by the ZSpaceSDKManager
* ray transform. Configure the camera to be at the origin of the
* ray, looking in the direction of the ray, and pick the center of
* the viewport.
* Restore previous camera settings at the end.
*/
void SelectWithRay(const double pos[3]);
//@{
/**
* Select the field association used when picking.
* Default is vtkDataObject::FIELD_ASSOCIATION_CELLS.
*/
vtkSetClampMacro(PickingFieldAssociation, int, vtkDataObject::FIELD_ASSOCIATION_POINTS,
vtkDataObject::FIELD_ASSOCIATION_CELLS);
vtkGetMacro(PickingFieldAssociation, int);
//@}
/**
* All transformations applied to actors with the stylus can be
* reset with this method.
*/
void ResetAllUserTransforms();
protected:
vtkPVZSpaceView();
~vtkPVZSpaceView() override;
/**
* Update zSpace SDK manager, the camera and render as usual.
*/
void Render(bool interactive, bool skip_rendering) override;
/**
* Give to the zSpace SDK the scene bounds to let it choose the best
* viewer scale, near and far clip. The camera position is set with the
* zSpace advised position.
*/
void CalculateFit(double* bounds);
/**
* Override ResetCameraClippingRange to disable automatic clipping range
* calculations of the camera as it is done by the zSpace SDK.
*/
void ResetCameraClippingRange() override{};
void SetupInteractor(vtkRenderWindowInteractor*) override;
private:
vtkPVZSpaceView(const vtkPVZSpaceView&) = delete;
void operator=(const vtkPVZSpaceView&) = delete;
friend class vtkPVZSpaceViewInteractorStyle;
vtkNew<vtkZSpaceInteractorStyle> ZSpaceInteractorStyle;
vtkNew<vtkZSpaceRayActor> StylusRayActor;
vtkNew<vtkZSpaceCamera> ZSpaceCamera;
bool DrawStylus = true;
// The field association used when picking with the ray
int PickingFieldAssociation = vtkDataObject::FIELD_ASSOCIATION_POINTS;
};
#endif
| 1,759 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package mod._forms;
import com.sun.star.beans.NamedValue;
import com.sun.star.beans.XPropertySet;
import com.sun.star.form.XBoundComponent;
import com.sun.star.form.XLoadable;
import com.sun.star.sdbc.SQLException;
import com.sun.star.sdbc.XResultSetUpdate;
import com.sun.star.uno.Exception;
import com.sun.star.uno.UnoRuntime;
import com.sun.star.uno.XInterface;
import ifc.form._XUpdateBroadcaster.UpdateChecker;
import java.io.PrintWriter;
import lib.TestEnvironment;
import lib.TestParameters;
import util.DBTools;
/**
* Test for object which is represented by service
* <code>com.sun.star.form.component.DatabaseListBox</code>. <p>
* Object implements the following interfaces :
* <ul>
* <li> <code>com::sun::star::io::XPersistObject</code></li>
* <li> <code>com::sun::star::awt::UnoControlListBoxModel</code></li>
* <li> <code>com::sun::star::form::XReset</code></li>
* <li> <code>com::sun::star::form::XBoundComponent</code></li>
* <li> <code>com::sun::star::form::FormComponent</code></li>
* <li> <code>com::sun::star::form::component::ListBox</code></li>
* <li> <code>com::sun::star::beans::XFastPropertySet</code></li>
* <li> <code>com::sun::star::beans::XMultiPropertySet</code></li>
* <li> <code>com::sun::star::form::XUpdateBroadcaster</code></li>
* <li> <code>com::sun::star::form::DataAwareControlModel</code></li>
* <li> <code>com::sun::star::beans::XPropertyState</code></li>
* <li> <code>com::sun::star::form::FormControlModel</code></li>
* <li> <code>com::sun::star::container::XNamed</code></li>
* <li> <code>com::sun::star::lang::XComponent</code></li>
* <li> <code>com::sun::star::lang::XEventListener</code></li>
* <li> <code>com::sun::star::beans::XPropertyAccess</code></li>
* <li> <code>com::sun::star::beans::XPropertyContainer</code></li>
* <li> <code>com::sun::star::beans::XPropertySet</code></li>
* <li> <code>com::sun::star::form::XLoadListener</code></li>
* <li> <code>com::sun::star::form::component::DatabaseListBox</code></li>
* <li> <code>com::sun::star::container::XChild</code></li>
* </ul> <p>
* This object test <b> is NOT </b> designed to be run in several
* threads concurently.
* @see com.sun.star.io.XPersistObject
* @see com.sun.star.awt.UnoControlListBoxModel
* @see com.sun.star.form.XReset
* @see com.sun.star.form.XBoundComponent
* @see com.sun.star.form.FormComponent
* @see com.sun.star.form.component.ListBox
* @see com.sun.star.beans.XFastPropertySet
* @see com.sun.star.beans.XMultiPropertySet
* @see com.sun.star.form.XUpdateBroadcaster
* @see com.sun.star.form.DataAwareControlModel
* @see com.sun.star.beans.XPropertyState
* @see com.sun.star.form
* @see com.sun.star.container.XNamed
* @see com.sun.star.lang.XComponent
* @see com.sun.star.lang.XEventListener
* @see com.sun.star.beans.XPropertyAccess
* @see com.sun.star.beans.XPropertyContainer
* @see com.sun.star.beans.XPropertySet
* @see com.sun.star.form.XLoadListener
* @see com.sun.star.form.component.DatabaseListBox
* @see com.sun.star.container.XChild
* @see ifc.io._XPersistObject
* @see ifc.awt._UnoControlListBoxModel
* @see ifc.form._XReset
* @see ifc.form._XBoundComponent
* @see ifc.form._FormComponent
* @see ifc.form.component._ListBox
* @see ifc.beans._XFastPropertySet
* @see ifc.beans._XMultiPropertySet
* @see ifc.form._XUpdateBroadcaster
* @see ifc.form._DataAwareControlModel
* @see ifc.beans._XPropertyState
* @see ifc.form._FormControlModel
* @see ifc.container._XNamed
* @see ifc.lang._XComponent
* @see ifc.lang._XEventListener
* @see ifc.beans._XPropertySet
* @see ifc.form._XLoadListener
* @see ifc.form.component._DatabaseListBox
* @see ifc.container._XChild
*/
public class OListBoxModel extends GenericModelTest {
/**
* Set some member variable of the super class <CODE>GenericModelTest</CODE>:
* <pre>
* super.m_ChangePropertyName = "Date";
* super.m_kindOfControl="DateField";
* super.m_ObjectName = "stardiv.one.form.component.DateField";
* NamedValue DataField = new NamedValue();
* DataField.Name = "DataField";
* DataField.Value = DBTools.TST_DATE_F;
* super.m_propertiesToSet.add(DataField);
*
* NamedValue ListSource = new NamedValue();
* ListSource.Name = "ListSource";
* ListSource.Value = new String[] {
* "OListBoxModel1", "OListBoxModel2", "OListBoxModel3"};
* super.m_propertiesToSet.add(ListSource);
* super.m_LCShape_Type = "FixedText";
* </pre>
* Then <CODE>super.initialize()</CODE> was called.
* @param tParam the test parameter
* @param log the log writer
*/
protected void initialize(TestParameters tParam, PrintWriter log) {
super.initialize(tParam, log);
super.m_ChangePropertyName = "SelectedItems";
super.m_kindOfControl="ListBox";
super.m_ObjectName = "stardiv.one.form.component.ListBox";
NamedValue DataField = new NamedValue();
DataField.Name = "DataField";
DataField.Value = DBTools.TST_STRING_F;
super.m_propertiesToSet.add(DataField);
NamedValue ListSource = new NamedValue();
ListSource.Name = "ListSource";
ListSource.Value = new String[] {
"OListBoxModel1", "OListBoxModel2", "OListBoxModel3"};
super.m_propertiesToSet.add(ListSource);
super.m_LCShape_Type = "FixedText";
}
/**
* calls <CODE>cleanup()</CODE> from it's super class
* @param tParam the test parameter
* @param log the log writer
*/
protected void cleanup(TestParameters tParam, PrintWriter log) {
super.cleanup(tParam, log);
}
/**
* calls <CODE>createTestEnvironment()</CODE> from it's super class
* This test uses not the generic implementaion of <CODE>cecker()</CODE> of its
* super class. This tests uses its own implementation of <CODE>checker()</CODE>
* to test <CODE>com::sun::star::form::XUpdateBroadcaster</CODE>
* @param Param the test parameter
* @param log the log writer
* @return lib.TestEnvironment
*/
protected synchronized TestEnvironment createTestEnvironment(TestParameters Param,
PrintWriter log) {
TestEnvironment tEnv = super.createTestEnvironment(Param, log);
tEnv.addObjRelation("XUpdateBroadcaster.Checker",
new Checker(m_XFormLoader, m_XPS, m_XCtrl, m_ChangePropertyName, m_ChangePropertyValue));
return tEnv;
}
static class Checker implements UpdateChecker {
private short lastItem = (short) 0;
XLoadable formLoaderF = null;
XPropertySet ps = null;
XInterface ctrl = null;
String ChangePropertyName = null;
Object ChangePropertyValue = null;
public Checker(XLoadable xl, XPropertySet ps, XInterface ctrl, String ChangePropertyName, Object ChangePropertyValue) {
formLoaderF = xl;
this.ps = ps;
this.ctrl = ctrl;
this.ChangePropertyName=ChangePropertyName;
this.ChangePropertyValue=ChangePropertyValue;
}
public void update() throws Exception {
if (!formLoaderF.isLoaded()) {
formLoaderF.load();
}
lastItem = (short) (1 - lastItem);
ps.setPropertyValue(ChangePropertyName, new short[] { lastItem });
}
public void commit() throws SQLException {
XBoundComponent bound = (XBoundComponent) UnoRuntime.queryInterface(
XBoundComponent.class, ctrl);
XResultSetUpdate update = (XResultSetUpdate) UnoRuntime.queryInterface(
XResultSetUpdate.class,
formLoaderF);
bound.commit();
update.updateRow();
}
public boolean wasCommited() throws Exception {
formLoaderF.reload();
short[] getS = (short[]) ps.getPropertyValue(ChangePropertyName);
return (getS.length > 0) && (lastItem == getS[0]);
}
}
} // finish class OListBoxModel
| 3,802 |
388 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
channel_list = {
"mono": 1,
"stereo": 2,
"2.1": 3,
"3.0": 3,
"3.0(back)": 3,
"3.1": 4,
"4.0": 4,
"quad": 4,
"quad(side)": 4,
"5.0": 5,
"5.1": 6,
"6.0": 6,
"6.0(front)": 6,
"hexagonal": 6,
"6.1": 7,
"6.1(front)": 7,
"7.0": 7,
"7.0(front)": 7,
"7.1": 8,
"7.1(wide)": 8,
}
lossless = ["flac", "truehd", "alac", "tta", "wavpack", "mlp"]
def build_audio(audio_tracks, audio_file_index=0):
command_list = []
for track in audio_tracks:
command_list.append(
f"-map {audio_file_index}:{track.index} "
f'-metadata:s:{track.outdex} title="{track.title}" '
f'-metadata:s:{track.outdex} handler="{track.title}"'
)
if track.language:
command_list.append(f"-metadata:s:{track.outdex} language={track.language}")
if not track.conversion_codec or track.conversion_codec == "none":
command_list.append(f"-c:{track.outdex} copy")
elif track.conversion_codec:
downmix = (
f"-ac:{track.outdex} {channel_list[track.downmix]} -filter:{track.outdex} aformat=channel_layouts={track.downmix}"
if track.downmix
else ""
)
bitrate = ""
if track.conversion_codec not in lossless:
bitrate = f"-b:{track.outdex} {track.conversion_bitrate} "
command_list.append(f"-c:{track.outdex} {track.conversion_codec} {bitrate} {downmix}")
return " ".join(command_list)
| 837 |
509 | <reponame>time-series-tools/seglearn<filename>seglearn/tests/test_util.py<gh_stars>100-1000
# Author: <NAME>
# License: BSD
import numpy as np
from seglearn.datasets import load_watch
from seglearn.base import TS_Data
from seglearn import util
def test_util():
df = load_watch()
data = TS_Data(df['X'], df['side'])
Xt, Xc = util.get_ts_data_parts(data)
assert np.array_equal(Xc, df['side'])
assert np.all([np.array_equal(Xt[i], df['X'][i]) for i in range(len(df['X']))])
util.check_ts_data(data, df['y'])
util.check_ts_data(df['X'], df['y'])
util.ts_stats(df['X'], df['y'], fs=1., class_labels=df['y_labels'])
def test_to_categorical_series():
p = np.arange(10)
step = 2
width = 3
s = util.segmented_prediction_to_series(p, step, width, categorical_target=True)
assert len(s) == (len(p) - 1) * step + width
assert np.all(np.isin(s, p))
p = np.arange(10)
step = 3
width = 2
s = util.segmented_prediction_to_series(p, step, width, categorical_target=True)
assert len(s) == (len(p) - 1) * step + width
assert np.all(np.isin(s, p))
p = np.arange(10)
p = np.column_stack([p, p])
step = 2
width = 3
s = util.segmented_prediction_to_series(p, step, width, categorical_target=True)
assert len(s) == (len(p) - 1) * step + width
assert s.shape[1] == 2
assert np.all(np.isin(s, p))
def test_to_real_series():
p = np.arange(20) # highly overlapping case
step = 2
width = 5
s = util.segmented_prediction_to_series(p, step, width, categorical_target=False)
assert len(s) == (len(p) - 1) * step + width
assert np.all(s <= max(p))
assert np.all(s >= min(p))
p = np.arange(10)
step = 3
width = 2
s = util.segmented_prediction_to_series(p, step, width, categorical_target=False)
assert len(s) == (len(p) - 1) * step + width
assert np.all(s <= max(p))
assert np.all(s >= min(p))
p = np.arange(5)
p = np.column_stack([p, p])
step = 2
width = 5
s = util.segmented_prediction_to_series(p, step, width, categorical_target=False)
assert len(s) == (len(p) - 1) * step + width
assert s.shape[1] == 2
assert np.all(s <= np.max(p))
assert np.all(s >= np.min(p))
| 1,003 |
2,535 | #pragma once
#include <foundation/python_types.h>
#include <third_party/akaze/lib/AKAZE.h>
namespace features {
py::object akaze(foundation::pyarray_uint8 image, AKAZEOptions options);
}
| 70 |
411 | <filename>giraph-block-app/src/main/java/org/apache/giraph/block_app/reducers/collect/CollectPrimitiveReduceOperation.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.giraph.block_app.reducers.collect;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.giraph.reducers.impl.KryoWrappedReduceOperation;
import org.apache.giraph.types.ops.PrimitiveTypeOps;
import org.apache.giraph.types.ops.TypeOpsUtils;
import org.apache.giraph.types.ops.collections.ResettableIterator;
import org.apache.giraph.types.ops.collections.array.WArrayList;
import org.apache.giraph.utils.WritableUtils;
/**
* Collect primitive values reduce operation
*
* @param <S> Primitive Writable type, which has its type ops
*/
public class CollectPrimitiveReduceOperation<S>
extends KryoWrappedReduceOperation<S, WArrayList<S>> {
/**
* Type ops if available, or null
*/
private PrimitiveTypeOps<S> typeOps;
/** For reflection only */
public CollectPrimitiveReduceOperation() {
}
public CollectPrimitiveReduceOperation(PrimitiveTypeOps<S> typeOps) {
this.typeOps = typeOps;
}
@Override
public WArrayList<S> createValue() {
return createList();
}
@Override
public void reduce(WArrayList<S> reduceInto, S value) {
reduceInto.addW(value);
}
@Override
public void reduceMerge(final WArrayList<S> reduceInto,
WArrayList<S> toReduce) {
ResettableIterator<S> iterator = toReduce.fastIteratorW();
while (iterator.hasNext()) {
reduceInto.addW(iterator.next());
}
}
public WArrayList<S> createList() {
return typeOps.createArrayList();
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeClass(typeOps.getTypeClass(), out);
}
@Override
public void readFields(DataInput in) throws IOException {
typeOps = TypeOpsUtils.getPrimitiveTypeOps(
WritableUtils.<S>readClass(in));
}
}
| 866 |
1,617 | /* Generated by the protocol buffer compiler. DO NOT EDIT! */
/* Generated from: peer/chaincode.proto */
#ifndef PROTOBUF_C_peer_2fchaincode_2eproto__INCLUDED
#define PROTOBUF_C_peer_2fchaincode_2eproto__INCLUDED
#include <protobuf-c/protobuf-c.h>
PROTOBUF_C__BEGIN_DECLS
#if PROTOBUF_C_VERSION_NUMBER < 1000000
# error This file was generated by a newer version of protoc-c which is incompatible with your libprotobuf-c headers. Please update your headers.
#elif 1002001 < PROTOBUF_C_MIN_COMPILER_VERSION
# error This file was generated by an older version of protoc-c which is incompatible with your libprotobuf-c headers. Please regenerate this file with a newer version of protoc-c.
#endif
typedef struct _Protos__ChaincodeID Protos__ChaincodeID;
typedef struct _Protos__ChaincodeInput Protos__ChaincodeInput;
typedef struct _Protos__ChaincodeInput__DecorationsEntry Protos__ChaincodeInput__DecorationsEntry;
typedef struct _Protos__ChaincodeSpec Protos__ChaincodeSpec;
typedef struct _Protos__ChaincodeDeploymentSpec Protos__ChaincodeDeploymentSpec;
typedef struct _Protos__ChaincodeInvocationSpec Protos__ChaincodeInvocationSpec;
typedef struct _Protos__LifecycleEvent Protos__LifecycleEvent;
/* --- enums --- */
typedef enum _Protos__ChaincodeSpec__Type {
PROTOS__CHAINCODE_SPEC__TYPE__UNDEFINED = 0,
PROTOS__CHAINCODE_SPEC__TYPE__GOLANG = 1,
PROTOS__CHAINCODE_SPEC__TYPE__NODE = 2,
PROTOS__CHAINCODE_SPEC__TYPE__CAR = 3,
PROTOS__CHAINCODE_SPEC__TYPE__JAVA = 4
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(PROTOS__CHAINCODE_SPEC__TYPE)
} Protos__ChaincodeSpec__Type;
typedef enum _Protos__ChaincodeDeploymentSpec__ExecutionEnvironment {
PROTOS__CHAINCODE_DEPLOYMENT_SPEC__EXECUTION_ENVIRONMENT__DOCKER = 0,
PROTOS__CHAINCODE_DEPLOYMENT_SPEC__EXECUTION_ENVIRONMENT__SYSTEM = 1
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(PROTOS__CHAINCODE_DEPLOYMENT_SPEC__EXECUTION_ENVIRONMENT)
} Protos__ChaincodeDeploymentSpec__ExecutionEnvironment;
/*
* Confidentiality Levels
*/
typedef enum _Protos__ConfidentialityLevel {
PROTOS__CONFIDENTIALITY_LEVEL__PUBLIC = 0,
PROTOS__CONFIDENTIALITY_LEVEL__CONFIDENTIAL = 1
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(PROTOS__CONFIDENTIALITY_LEVEL)
} Protos__ConfidentialityLevel;
/* --- messages --- */
/*
*ChaincodeID contains the path as specified by the deploy transaction
*that created it as well as the hashCode that is generated by the
*system for the path. From the user level (ie, CLI, REST API and so on)
*deploy transaction is expected to provide the path and other requests
*are expected to provide the hashCode. The other value will be ignored.
*Internally, the structure could contain both values. For instance, the
*hashCode will be set when first generated using the path
*/
struct _Protos__ChaincodeID
{
ProtobufCMessage base;
/*
*deploy transaction will use the path
*/
char *path;
/*
*all other requests will use the name (really a hashcode) generated by
*the deploy transaction
*/
char *name;
/*
*user friendly version name for the chaincode
*/
char *version;
};
#define PROTOS__CHAINCODE_ID__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&protos__chaincode_id__descriptor) \
, NULL, NULL, NULL }
struct _Protos__ChaincodeInput__DecorationsEntry
{
ProtobufCMessage base;
char *key;
protobuf_c_boolean has_value;
ProtobufCBinaryData value;
};
#define PROTOS__CHAINCODE_INPUT__DECORATIONS_ENTRY__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&protos__chaincode_input__decorations_entry__descriptor) \
, NULL, 0,{0,NULL} }
/*
* Carries the chaincode function and its arguments.
* UnmarshalJSON in transaction.go converts the string-based REST/JSON input to
* the []byte-based current ChaincodeInput structure.
*/
struct _Protos__ChaincodeInput
{
ProtobufCMessage base;
size_t n_args;
ProtobufCBinaryData *args;
size_t n_decorations;
Protos__ChaincodeInput__DecorationsEntry **decorations;
};
#define PROTOS__CHAINCODE_INPUT__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&protos__chaincode_input__descriptor) \
, 0,NULL, 0,NULL }
/*
* Carries the chaincode specification. This is the actual metadata required for
* defining a chaincode.
*/
struct _Protos__ChaincodeSpec
{
ProtobufCMessage base;
protobuf_c_boolean has_type;
Protos__ChaincodeSpec__Type type;
Protos__ChaincodeID *chaincode_id;
Protos__ChaincodeInput *input;
protobuf_c_boolean has_timeout;
int32_t timeout;
};
#define PROTOS__CHAINCODE_SPEC__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&protos__chaincode_spec__descriptor) \
, 0,0, NULL, NULL, 0,0 }
/*
* Specify the deployment of a chaincode.
* TODO: Define `codePackage`.
*/
struct _Protos__ChaincodeDeploymentSpec
{
ProtobufCMessage base;
Protos__ChaincodeSpec *chaincode_spec;
protobuf_c_boolean has_code_package;
ProtobufCBinaryData code_package;
protobuf_c_boolean has_exec_env;
Protos__ChaincodeDeploymentSpec__ExecutionEnvironment exec_env;
};
#define PROTOS__CHAINCODE_DEPLOYMENT_SPEC__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&protos__chaincode_deployment_spec__descriptor) \
, NULL, 0,{0,NULL}, 0,0 }
/*
* Carries the chaincode function and its arguments.
*/
struct _Protos__ChaincodeInvocationSpec
{
ProtobufCMessage base;
Protos__ChaincodeSpec *chaincode_spec;
};
#define PROTOS__CHAINCODE_INVOCATION_SPEC__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&protos__chaincode_invocation_spec__descriptor) \
, NULL }
/*
* LifecycleEvent is used as the payload of the chaincode event emitted by LSCC
*/
struct _Protos__LifecycleEvent
{
ProtobufCMessage base;
char *chaincode_name;
};
#define PROTOS__LIFECYCLE_EVENT__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&protos__lifecycle_event__descriptor) \
, NULL }
/* Protos__ChaincodeID methods */
void protos__chaincode_id__init
(Protos__ChaincodeID *message);
size_t protos__chaincode_id__get_packed_size
(const Protos__ChaincodeID *message);
size_t protos__chaincode_id__pack
(const Protos__ChaincodeID *message,
uint8_t *out);
size_t protos__chaincode_id__pack_to_buffer
(const Protos__ChaincodeID *message,
ProtobufCBuffer *buffer);
Protos__ChaincodeID *
protos__chaincode_id__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void protos__chaincode_id__free_unpacked
(Protos__ChaincodeID *message,
ProtobufCAllocator *allocator);
/* Protos__ChaincodeInput__DecorationsEntry methods */
void protos__chaincode_input__decorations_entry__init
(Protos__ChaincodeInput__DecorationsEntry *message);
/* Protos__ChaincodeInput methods */
void protos__chaincode_input__init
(Protos__ChaincodeInput *message);
size_t protos__chaincode_input__get_packed_size
(const Protos__ChaincodeInput *message);
size_t protos__chaincode_input__pack
(const Protos__ChaincodeInput *message,
uint8_t *out);
size_t protos__chaincode_input__pack_to_buffer
(const Protos__ChaincodeInput *message,
ProtobufCBuffer *buffer);
Protos__ChaincodeInput *
protos__chaincode_input__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void protos__chaincode_input__free_unpacked
(Protos__ChaincodeInput *message,
ProtobufCAllocator *allocator);
/* Protos__ChaincodeSpec methods */
void protos__chaincode_spec__init
(Protos__ChaincodeSpec *message);
size_t protos__chaincode_spec__get_packed_size
(const Protos__ChaincodeSpec *message);
size_t protos__chaincode_spec__pack
(const Protos__ChaincodeSpec *message,
uint8_t *out);
size_t protos__chaincode_spec__pack_to_buffer
(const Protos__ChaincodeSpec *message,
ProtobufCBuffer *buffer);
Protos__ChaincodeSpec *
protos__chaincode_spec__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void protos__chaincode_spec__free_unpacked
(Protos__ChaincodeSpec *message,
ProtobufCAllocator *allocator);
/* Protos__ChaincodeDeploymentSpec methods */
void protos__chaincode_deployment_spec__init
(Protos__ChaincodeDeploymentSpec *message);
size_t protos__chaincode_deployment_spec__get_packed_size
(const Protos__ChaincodeDeploymentSpec *message);
size_t protos__chaincode_deployment_spec__pack
(const Protos__ChaincodeDeploymentSpec *message,
uint8_t *out);
size_t protos__chaincode_deployment_spec__pack_to_buffer
(const Protos__ChaincodeDeploymentSpec *message,
ProtobufCBuffer *buffer);
Protos__ChaincodeDeploymentSpec *
protos__chaincode_deployment_spec__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void protos__chaincode_deployment_spec__free_unpacked
(Protos__ChaincodeDeploymentSpec *message,
ProtobufCAllocator *allocator);
/* Protos__ChaincodeInvocationSpec methods */
void protos__chaincode_invocation_spec__init
(Protos__ChaincodeInvocationSpec *message);
size_t protos__chaincode_invocation_spec__get_packed_size
(const Protos__ChaincodeInvocationSpec *message);
size_t protos__chaincode_invocation_spec__pack
(const Protos__ChaincodeInvocationSpec *message,
uint8_t *out);
size_t protos__chaincode_invocation_spec__pack_to_buffer
(const Protos__ChaincodeInvocationSpec *message,
ProtobufCBuffer *buffer);
Protos__ChaincodeInvocationSpec *
protos__chaincode_invocation_spec__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void protos__chaincode_invocation_spec__free_unpacked
(Protos__ChaincodeInvocationSpec *message,
ProtobufCAllocator *allocator);
/* Protos__LifecycleEvent methods */
void protos__lifecycle_event__init
(Protos__LifecycleEvent *message);
size_t protos__lifecycle_event__get_packed_size
(const Protos__LifecycleEvent *message);
size_t protos__lifecycle_event__pack
(const Protos__LifecycleEvent *message,
uint8_t *out);
size_t protos__lifecycle_event__pack_to_buffer
(const Protos__LifecycleEvent *message,
ProtobufCBuffer *buffer);
Protos__LifecycleEvent *
protos__lifecycle_event__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void protos__lifecycle_event__free_unpacked
(Protos__LifecycleEvent *message,
ProtobufCAllocator *allocator);
/* --- per-message closures --- */
typedef void (*Protos__ChaincodeID_Closure)
(const Protos__ChaincodeID *message,
void *closure_data);
typedef void (*Protos__ChaincodeInput__DecorationsEntry_Closure)
(const Protos__ChaincodeInput__DecorationsEntry *message,
void *closure_data);
typedef void (*Protos__ChaincodeInput_Closure)
(const Protos__ChaincodeInput *message,
void *closure_data);
typedef void (*Protos__ChaincodeSpec_Closure)
(const Protos__ChaincodeSpec *message,
void *closure_data);
typedef void (*Protos__ChaincodeDeploymentSpec_Closure)
(const Protos__ChaincodeDeploymentSpec *message,
void *closure_data);
typedef void (*Protos__ChaincodeInvocationSpec_Closure)
(const Protos__ChaincodeInvocationSpec *message,
void *closure_data);
typedef void (*Protos__LifecycleEvent_Closure)
(const Protos__LifecycleEvent *message,
void *closure_data);
/* --- services --- */
/* --- descriptors --- */
extern const ProtobufCEnumDescriptor protos__confidentiality_level__descriptor;
extern const ProtobufCMessageDescriptor protos__chaincode_id__descriptor;
extern const ProtobufCMessageDescriptor protos__chaincode_input__descriptor;
extern const ProtobufCMessageDescriptor protos__chaincode_input__decorations_entry__descriptor;
extern const ProtobufCMessageDescriptor protos__chaincode_spec__descriptor;
extern const ProtobufCEnumDescriptor protos__chaincode_spec__type__descriptor;
extern const ProtobufCMessageDescriptor protos__chaincode_deployment_spec__descriptor;
extern const ProtobufCEnumDescriptor protos__chaincode_deployment_spec__execution_environment__descriptor;
extern const ProtobufCMessageDescriptor protos__chaincode_invocation_spec__descriptor;
extern const ProtobufCMessageDescriptor protos__lifecycle_event__descriptor;
PROTOBUF_C__END_DECLS
#endif /* PROTOBUF_C_peer_2fchaincode_2eproto__INCLUDED */
| 6,030 |
14,668 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/sessions/tab_loader_tester.h"
#include "base/run_loop.h"
#include "base/test/bind.h"
TabLoaderTester::TabLoaderTester() = default;
TabLoaderTester::TabLoaderTester(TabLoader* tab_loader)
: tab_loader_(tab_loader) {}
TabLoaderTester::~TabLoaderTester() = default;
void TabLoaderTester::SetTabLoader(TabLoader* tab_loader) {
tab_loader_ = tab_loader;
}
// static
void TabLoaderTester::SetMaxLoadedTabCountForTesting(size_t value) {
TabLoader::SetMaxLoadedTabCountForTesting(value);
}
// static
void TabLoaderTester::SetConstructionCallbackForTesting(
base::RepeatingCallback<void(TabLoader*)>* callback) {
TabLoader::SetConstructionCallbackForTesting(callback);
}
void TabLoaderTester::SetMaxSimultaneousLoadsForTesting(size_t loading_slots) {
tab_loader_->SetMaxSimultaneousLoadsForTesting(loading_slots);
}
void TabLoaderTester::SetTickClockForTesting(base::TickClock* tick_clock) {
tab_loader_->SetTickClockForTesting(tick_clock);
}
void TabLoaderTester::MaybeLoadSomeTabsForTesting() {
tab_loader_->MaybeLoadSomeTabsForTesting();
}
void TabLoaderTester::ForceLoadTimerFired() {
tab_loader_->ForceLoadTimerFired();
}
void TabLoaderTester::OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
tab_loader_->OnMemoryPressure(memory_pressure_level);
}
void TabLoaderTester::SetTabLoadingEnabled(bool enabled) {
tab_loader_->SetTabLoadingEnabled(enabled);
}
bool TabLoaderTester::IsLoadingEnabled() const {
return tab_loader_->IsLoadingEnabled();
}
size_t TabLoaderTester::force_load_delay_multiplier() const {
return tab_loader_->force_load_delay_multiplier_;
}
base::TimeTicks TabLoaderTester::force_load_time() const {
return tab_loader_->force_load_time_;
}
base::OneShotTimer& TabLoaderTester::force_load_timer() {
return tab_loader_->force_load_timer_;
}
const TabLoader::TabVector& TabLoaderTester::tabs_to_load() const {
return tab_loader_->tabs_to_load_;
}
const TabLoader::TabSet& TabLoaderTester::tabs_load_initiated() const {
return tab_loader_->tabs_load_initiated_;
}
size_t TabLoaderTester::scheduled_to_load_count() const {
return tab_loader_->scheduled_to_load_count_;
}
// static
TabLoader* TabLoaderTester::shared_tab_loader() {
return TabLoader::shared_tab_loader_;
}
resource_coordinator::SessionRestorePolicy* TabLoaderTester::GetPolicy() {
return tab_loader_->delegate_->GetPolicyForTesting();
}
bool TabLoaderTester::IsSharedTabLoader() const {
return tab_loader_ == TabLoader::shared_tab_loader_;
}
bool TabLoaderTester::HasTimedOutLoads() const {
if (tab_loader_->tabs_loading_.empty())
return false;
base::TimeTicks expiry_time =
tab_loader_->tabs_loading_.begin()->loading_start_time +
tab_loader_->GetLoadTimeoutPeriod();
return expiry_time <= tab_loader_->clock_->NowTicks();
}
void TabLoaderTester::WaitForTabLoadingEnabled() {
base::RunLoop run_loop;
TabLoader* tab_loader = tab_loader_;
auto callback =
base::BindLambdaForTesting([&run_loop, tab_loader](bool loading_enabled) {
if (loading_enabled && tab_loader->IsLoadingEnabled())
run_loop.Quit();
});
tab_loader_->SetTabLoadingEnabledCallbackForTesting(&callback);
run_loop.Run();
tab_loader_->SetTabLoadingEnabledCallbackForTesting(nullptr);
}
| 1,193 |
868 | <filename>third_party/protobuf/3.4.0/src/google/protobuf/compiler/js/js_generator.cc
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <google/protobuf/compiler/js/js_generator.h>
#include <assert.h>
#include <algorithm>
#include <limits>
#include <map>
#include <memory>
#ifndef _SHARED_PTR_H
#include <google/protobuf/stubs/shared_ptr.h>
#endif
#include <string>
#include <utility>
#include <vector>
#include <google/protobuf/stubs/logging.h>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/stringprintf.h>
#include <google/protobuf/compiler/js/well_known_types_embed.h>
#include <google/protobuf/io/printer.h>
#include <google/protobuf/io/zero_copy_stream.h>
#include <google/protobuf/descriptor.pb.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/stubs/strutil.h>
namespace google {
namespace protobuf {
namespace compiler {
namespace js {
// Sorted list of JavaScript keywords. These cannot be used as names. If they
// appear, we prefix them with "pb_".
const char* kKeyword[] = {
"abstract",
"boolean",
"break",
"byte",
"case",
"catch",
"char",
"class",
"const",
"continue",
"debugger",
"default",
"delete",
"do",
"double",
"else",
"enum",
"export",
"extends",
"false",
"final",
"finally",
"float",
"for",
"function",
"goto",
"if",
"implements",
"import",
"in",
"instanceof",
"int",
"interface",
"long",
"native",
"new",
"null",
"package",
"private",
"protected",
"public",
"return",
"short",
"static",
"super",
"switch",
"synchronized",
"this",
"throw",
"throws",
"transient",
"try",
"typeof",
"var",
"void",
"volatile",
"while",
"with",
};
static const int kNumKeyword = sizeof(kKeyword) / sizeof(char*);
namespace {
// The mode of operation for bytes fields. Historically JSPB always carried
// bytes as JS {string}, containing base64 content by convention. With binary
// and proto3 serialization the new convention is to represent it as binary
// data in Uint8Array. See b/26173701 for background on the migration.
enum BytesMode {
BYTES_DEFAULT, // Default type for getBytesField to return.
BYTES_B64, // Explicitly coerce to base64 string where needed.
BYTES_U8, // Explicitly coerce to Uint8Array where needed.
};
bool IsReserved(const string& ident) {
for (int i = 0; i < kNumKeyword; i++) {
if (ident == kKeyword[i]) {
return true;
}
}
return false;
}
bool StrEndsWith(StringPiece sp, StringPiece x) {
return sp.size() >= x.size() && sp.substr(sp.size() - x.size()) == x;
}
// Returns a copy of |filename| with any trailing ".protodevel" or ".proto
// suffix stripped.
// TODO(haberman): Unify with copy in compiler/cpp/internal/helpers.cc.
string StripProto(const string& filename) {
const char* suffix =
StrEndsWith(filename, ".protodevel") ? ".protodevel" : ".proto";
return StripSuffixString(filename, suffix);
}
// Given a filename like foo/bar/baz.proto, returns the corresponding JavaScript
// file foo/bar/baz.js.
string GetJSFilename(const GeneratorOptions& options, const string& filename) {
return StripProto(filename) + options.GetFileNameExtension();
}
// Given a filename like foo/bar/baz.proto, returns the root directory
// path ../../
string GetRootPath(const string& from_filename, const string& to_filename) {
if (to_filename.find("google/protobuf") == 0) {
// Well-known types (.proto files in the google/protobuf directory) are
// assumed to come from the 'google-protobuf' npm package. We may want to
// generalize this exception later by letting others put generated code in
// their own npm packages.
return "google-protobuf/";
}
size_t slashes = std::count(from_filename.begin(), from_filename.end(), '/');
if (slashes == 0) {
return "./";
}
string result = "";
for (size_t i = 0; i < slashes; i++) {
result += "../";
}
return result;
}
// Returns the alias we assign to the module of the given .proto filename
// when importing.
string ModuleAlias(const string& filename) {
// This scheme could technically cause problems if a file includes any 2 of:
// foo/bar_baz.proto
// foo_bar_baz.proto
// foo_bar/baz.proto
//
// We'll worry about this problem if/when we actually see it. This name isn't
// exposed to users so we can change it later if we need to.
string basename = StripProto(filename);
StripString(&basename, "-", '$');
StripString(&basename, "/", '_');
StripString(&basename, ".", '_');
return basename + "_pb";
}
// Returns the fully normalized JavaScript path for the given
// file descriptor's package.
string GetFilePath(const GeneratorOptions& options,
const FileDescriptor* file) {
if (!options.namespace_prefix.empty()) {
return options.namespace_prefix;
} else if (!file->package().empty()) {
return "proto." + file->package();
} else {
return "proto";
}
}
// Returns the name of the message with a leading dot and taking into account
// nesting, for example ".OuterMessage.InnerMessage", or returns empty if
// descriptor is null. This function does not handle namespacing, only message
// nesting.
string GetNestedMessageName(const Descriptor* descriptor) {
if (descriptor == NULL) {
return "";
}
string result =
StripPrefixString(descriptor->full_name(), descriptor->file()->package());
// Add a leading dot if one is not already present.
if (!result.empty() && result[0] != '.') {
result = "." + result;
}
return result;
}
// Returns the path prefix for a message or enumeration that
// lives under the given file and containing type.
string GetPrefix(const GeneratorOptions& options,
const FileDescriptor* file_descriptor,
const Descriptor* containing_type) {
string prefix = GetFilePath(options, file_descriptor) +
GetNestedMessageName(containing_type);
if (!prefix.empty()) {
prefix += ".";
}
return prefix;
}
// Returns the fully normalized JavaScript path for the given
// message descriptor.
string GetMessagePath(const GeneratorOptions& options,
const Descriptor* descriptor) {
return GetPrefix(
options, descriptor->file(),
descriptor->containing_type()) + descriptor->name();
}
// Returns the fully normalized JavaScript path for the given
// enumeration descriptor.
string GetEnumPath(const GeneratorOptions& options,
const EnumDescriptor* enum_descriptor) {
return GetPrefix(
options, enum_descriptor->file(),
enum_descriptor->containing_type()) + enum_descriptor->name();
}
string MaybeCrossFileRef(const GeneratorOptions& options,
const FileDescriptor* from_file,
const Descriptor* to_message) {
if (options.import_style == GeneratorOptions::kImportCommonJs &&
from_file != to_message->file()) {
// Cross-file ref in CommonJS needs to use the module alias instead of
// the global name.
return ModuleAlias(to_message->file()->name()) +
GetNestedMessageName(to_message->containing_type()) + "." +
to_message->name();
} else {
// Within a single file we use a full name.
return GetMessagePath(options, to_message);
}
}
string SubmessageTypeRef(const GeneratorOptions& options,
const FieldDescriptor* field) {
GOOGLE_CHECK(field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE);
return MaybeCrossFileRef(options, field->file(), field->message_type());
}
// - Object field name: LOWER_UNDERSCORE -> LOWER_CAMEL, except for group fields
// (UPPER_CAMEL -> LOWER_CAMEL), with "List" (or "Map") appended if appropriate,
// and with reserved words triggering a "pb_" prefix.
// - Getters/setters: LOWER_UNDERSCORE -> UPPER_CAMEL, except for group fields
// (use the name directly), then append "List" if appropriate, then append "$"
// if resulting name is equal to a reserved word.
// - Enums: just uppercase.
// Locale-independent version of ToLower that deals only with ASCII A-Z.
char ToLowerASCII(char c) {
if (c >= 'A' && c <= 'Z') {
return (c - 'A') + 'a';
} else {
return c;
}
}
std::vector<string> ParseLowerUnderscore(const string& input) {
std::vector<string> words;
string running = "";
for (int i = 0; i < input.size(); i++) {
if (input[i] == '_') {
if (!running.empty()) {
words.push_back(running);
running.clear();
}
} else {
running += ToLowerASCII(input[i]);
}
}
if (!running.empty()) {
words.push_back(running);
}
return words;
}
std::vector<string> ParseUpperCamel(const string& input) {
std::vector<string> words;
string running = "";
for (int i = 0; i < input.size(); i++) {
if (input[i] >= 'A' && input[i] <= 'Z' && !running.empty()) {
words.push_back(running);
running.clear();
}
running += ToLowerASCII(input[i]);
}
if (!running.empty()) {
words.push_back(running);
}
return words;
}
string ToLowerCamel(const std::vector<string>& words) {
string result;
for (int i = 0; i < words.size(); i++) {
string word = words[i];
if (i == 0 && (word[0] >= 'A' && word[0] <= 'Z')) {
word[0] = (word[0] - 'A') + 'a';
} else if (i != 0 && (word[0] >= 'a' && word[0] <= 'z')) {
word[0] = (word[0] - 'a') + 'A';
}
result += word;
}
return result;
}
string ToUpperCamel(const std::vector<string>& words) {
string result;
for (int i = 0; i < words.size(); i++) {
string word = words[i];
if (word[0] >= 'a' && word[0] <= 'z') {
word[0] = (word[0] - 'a') + 'A';
}
result += word;
}
return result;
}
// Based on code from descriptor.cc (Thanks Kenton!)
// Uppercases the entire string, turning ValueName into
// VALUENAME.
string ToEnumCase(const string& input) {
string result;
result.reserve(input.size());
for (int i = 0; i < input.size(); i++) {
if ('a' <= input[i] && input[i] <= 'z') {
result.push_back(input[i] - 'a' + 'A');
} else {
result.push_back(input[i]);
}
}
return result;
}
string ToFileName(const string& input) {
string result;
result.reserve(input.size());
for (int i = 0; i < input.size(); i++) {
if ('A' <= input[i] && input[i] <= 'Z') {
result.push_back(input[i] - 'A' + 'a');
} else {
result.push_back(input[i]);
}
}
return result;
}
// When we're generating one output file per type name, this is the filename
// that top-level extensions should go in.
string GetExtensionFileName(const GeneratorOptions& options,
const FileDescriptor* file) {
return options.output_dir + "/" + ToFileName(GetFilePath(options, file)) +
options.GetFileNameExtension();
}
// When we're generating one output file per type name, this is the filename
// that a top-level message should go in.
string GetMessageFileName(const GeneratorOptions& options,
const Descriptor* desc) {
return options.output_dir + "/" + ToFileName(desc->name()) +
options.GetFileNameExtension();
}
// When we're generating one output file per type name, this is the filename
// that a top-level message should go in.
string GetEnumFileName(const GeneratorOptions& options,
const EnumDescriptor* desc) {
return options.output_dir + "/" + ToFileName(desc->name()) +
options.GetFileNameExtension();
}
// Returns the message/response ID, if set.
string GetMessageId(const Descriptor* desc) {
return string();
}
bool IgnoreExtensionField(const FieldDescriptor* field) {
// Exclude descriptor extensions from output "to avoid clutter" (from original
// codegen).
return field->is_extension() &&
field->containing_type()->file()->name() ==
"google/protobuf/descriptor.proto";
}
// Used inside Google only -- do not remove.
bool IsResponse(const Descriptor* desc) { return false; }
bool IgnoreField(const FieldDescriptor* field) {
return IgnoreExtensionField(field);
}
// Used inside Google only -- do not remove.
bool ShouldTreatMapsAsRepeatedFields(const FileDescriptor& descriptor) {
return false;
}
// Do we ignore this message type?
bool IgnoreMessage(const GeneratorOptions& options, const Descriptor* d) {
return d->options().map_entry() &&
!ShouldTreatMapsAsRepeatedFields(*d->file());
}
bool IsMap(const GeneratorOptions& options, const FieldDescriptor* field) {
return field->is_map() && !ShouldTreatMapsAsRepeatedFields(*field->file());
}
// Does JSPB ignore this entire oneof? True only if all fields are ignored.
bool IgnoreOneof(const OneofDescriptor* oneof) {
for (int i = 0; i < oneof->field_count(); i++) {
if (!IgnoreField(oneof->field(i))) {
return false;
}
}
return true;
}
string JSIdent(const GeneratorOptions& options, const FieldDescriptor* field,
bool is_upper_camel, bool is_map, bool drop_list) {
string result;
if (field->type() == FieldDescriptor::TYPE_GROUP) {
result = is_upper_camel ?
ToUpperCamel(ParseUpperCamel(field->message_type()->name())) :
ToLowerCamel(ParseUpperCamel(field->message_type()->name()));
} else {
result = is_upper_camel ?
ToUpperCamel(ParseLowerUnderscore(field->name())) :
ToLowerCamel(ParseLowerUnderscore(field->name()));
}
if (is_map || IsMap(options, field)) {
// JSPB-style or proto3-style map.
result += "Map";
} else if (!drop_list && field->is_repeated()) {
// Repeated field.
result += "List";
}
return result;
}
string JSObjectFieldName(const GeneratorOptions& options,
const FieldDescriptor* field) {
string name = JSIdent(options, field,
/* is_upper_camel = */ false,
/* is_map = */ false,
/* drop_list = */ false);
if (IsReserved(name)) {
name = "pb_" + name;
}
return name;
}
string JSByteGetterSuffix(BytesMode bytes_mode) {
switch (bytes_mode) {
case BYTES_DEFAULT:
return "";
case BYTES_B64:
return "B64";
case BYTES_U8:
return "U8";
default:
assert(false);
}
return "";
}
// Returns the field name as a capitalized portion of a getter/setter method
// name, e.g. MyField for .getMyField().
string JSGetterName(const GeneratorOptions& options,
const FieldDescriptor* field,
BytesMode bytes_mode = BYTES_DEFAULT,
bool drop_list = false) {
string name = JSIdent(options, field,
/* is_upper_camel = */ true,
/* is_map = */ false, drop_list);
if (field->type() == FieldDescriptor::TYPE_BYTES) {
string suffix = JSByteGetterSuffix(bytes_mode);
if (!suffix.empty()) {
name += "_as" + suffix;
}
}
if (name == "Extension" || name == "JsPbMessageId") {
// Avoid conflicts with base-class names.
name += "$";
}
return name;
}
string JSOneofName(const OneofDescriptor* oneof) {
return ToUpperCamel(ParseLowerUnderscore(oneof->name()));
}
// Returns the index corresponding to this field in the JSPB array (underlying
// data storage array).
string JSFieldIndex(const FieldDescriptor* field) {
// Determine whether this field is a member of a group. Group fields are a bit
// wonky: their "containing type" is a message type created just for the
// group, and that type's parent type has a field with the group-message type
// as its message type and TYPE_GROUP as its field type. For such fields, the
// index we use is relative to the field number of the group submessage field.
// For all other fields, we just use the field number.
const Descriptor* containing_type = field->containing_type();
const Descriptor* parent_type = containing_type->containing_type();
if (parent_type != NULL) {
for (int i = 0; i < parent_type->field_count(); i++) {
if (parent_type->field(i)->type() == FieldDescriptor::TYPE_GROUP &&
parent_type->field(i)->message_type() == containing_type) {
return SimpleItoa(field->number() - parent_type->field(i)->number());
}
}
}
return SimpleItoa(field->number());
}
string JSOneofIndex(const OneofDescriptor* oneof) {
int index = -1;
for (int i = 0; i < oneof->containing_type()->oneof_decl_count(); i++) {
const OneofDescriptor* o = oneof->containing_type()->oneof_decl(i);
// If at least one field in this oneof is not JSPB-ignored, count the oneof.
for (int j = 0; j < o->field_count(); j++) {
const FieldDescriptor* f = o->field(j);
if (!IgnoreField(f)) {
index++;
break; // inner loop
}
}
if (o == oneof) {
break;
}
}
return SimpleItoa(index);
}
// Decodes a codepoint in \x0000 -- \xFFFF.
uint16 DecodeUTF8Codepoint(uint8* bytes, size_t* length) {
if (*length == 0) {
return 0;
}
size_t expected = 0;
if ((*bytes & 0x80) == 0) {
expected = 1;
} else if ((*bytes & 0xe0) == 0xc0) {
expected = 2;
} else if ((*bytes & 0xf0) == 0xe0) {
expected = 3;
} else {
// Too long -- don't accept.
*length = 0;
return 0;
}
if (*length < expected) {
// Not enough bytes -- don't accept.
*length = 0;
return 0;
}
*length = expected;
switch (expected) {
case 1: return bytes[0];
case 2: return ((bytes[0] & 0x1F) << 6) |
((bytes[1] & 0x3F) << 0);
case 3: return ((bytes[0] & 0x0F) << 12) |
((bytes[1] & 0x3F) << 6) |
((bytes[2] & 0x3F) << 0);
default: return 0;
}
}
// Escapes the contents of a string to be included within double-quotes ("") in
// JavaScript. The input data should be a UTF-8 encoded C++ string of chars.
// Returns false if |out| was truncated because |in| contained invalid UTF-8 or
// codepoints outside the BMP.
// TODO(lukestebbing): Support codepoints outside the BMP.
bool EscapeJSString(const string& in, string* out) {
size_t decoded = 0;
for (size_t i = 0; i < in.size(); i += decoded) {
uint16 codepoint = 0;
// Decode the next UTF-8 codepoint.
size_t have_bytes = in.size() - i;
uint8 bytes[3] = {
static_cast<uint8>(in[i]),
static_cast<uint8>(((i + 1) < in.size()) ? in[i + 1] : 0),
static_cast<uint8>(((i + 2) < in.size()) ? in[i + 2] : 0),
};
codepoint = DecodeUTF8Codepoint(bytes, &have_bytes);
if (have_bytes == 0) {
return false;
}
decoded = have_bytes;
switch (codepoint) {
case '\'': *out += "\\x27"; break;
case '"': *out += "\\x22"; break;
case '<': *out += "\\x3c"; break;
case '=': *out += "\\x3d"; break;
case '>': *out += "\\x3e"; break;
case '&': *out += "\\x26"; break;
case '\b': *out += "\\b"; break;
case '\t': *out += "\\t"; break;
case '\n': *out += "\\n"; break;
case '\f': *out += "\\f"; break;
case '\r': *out += "\\r"; break;
case '\\': *out += "\\\\"; break;
default:
// TODO(lukestebbing): Once we're supporting codepoints outside the BMP,
// use a single Unicode codepoint escape if the output language is
// ECMAScript 2015 or above. Otherwise, use a surrogate pair.
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar#String_literals
if (codepoint >= 0x20 && codepoint <= 0x7e) {
*out += static_cast<char>(codepoint);
} else if (codepoint >= 0x100) {
*out += StringPrintf("\\u%04x", codepoint);
} else {
*out += StringPrintf("\\x%02x", codepoint);
}
break;
}
}
return true;
}
string EscapeBase64(const string& in) {
static const char* kAlphabet =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
string result;
for (size_t i = 0; i < in.size(); i += 3) {
int value = (in[i] << 16) |
(((i + 1) < in.size()) ? (in[i + 1] << 8) : 0) |
(((i + 2) < in.size()) ? (in[i + 2] << 0) : 0);
result += kAlphabet[(value >> 18) & 0x3f];
result += kAlphabet[(value >> 12) & 0x3f];
if ((i + 1) < in.size()) {
result += kAlphabet[(value >> 6) & 0x3f];
} else {
result += '=';
}
if ((i + 2) < in.size()) {
result += kAlphabet[(value >> 0) & 0x3f];
} else {
result += '=';
}
}
return result;
}
// Post-process the result of SimpleFtoa/SimpleDtoa to *exactly* match the
// original codegen's formatting (which is just .toString() on java.lang.Double
// or java.lang.Float).
string PostProcessFloat(string result) {
// If inf, -inf or nan, replace with +Infinity, -Infinity or NaN.
if (result == "inf") {
return "Infinity";
} else if (result == "-inf") {
return "-Infinity";
} else if (result == "nan") {
return "NaN";
}
// If scientific notation (e.g., "1e10"), (i) capitalize the "e", (ii)
// ensure that the mantissa (portion prior to the "e") has at least one
// fractional digit (after the decimal point), and (iii) strip any unnecessary
// leading zeroes and/or '+' signs from the exponent.
string::size_type exp_pos = result.find('e');
if (exp_pos != string::npos) {
string mantissa = result.substr(0, exp_pos);
string exponent = result.substr(exp_pos + 1);
// Add ".0" to mantissa if no fractional part exists.
if (mantissa.find('.') == string::npos) {
mantissa += ".0";
}
// Strip the sign off the exponent and store as |exp_neg|.
bool exp_neg = false;
if (!exponent.empty() && exponent[0] == '+') {
exponent = exponent.substr(1);
} else if (!exponent.empty() && exponent[0] == '-') {
exp_neg = true;
exponent = exponent.substr(1);
}
// Strip any leading zeroes off the exponent.
while (exponent.size() > 1 && exponent[0] == '0') {
exponent = exponent.substr(1);
}
return mantissa + "E" + string(exp_neg ? "-" : "") + exponent;
}
// Otherwise, this is an ordinary decimal number. Append ".0" if result has no
// decimal/fractional part in order to match output of original codegen.
if (result.find('.') == string::npos) {
result += ".0";
}
return result;
}
string FloatToString(float value) {
string result = SimpleFtoa(value);
return PostProcessFloat(result);
}
string DoubleToString(double value) {
string result = SimpleDtoa(value);
return PostProcessFloat(result);
}
// Return true if this is an integral field that should be represented as string
// in JS.
bool IsIntegralFieldWithStringJSType(const FieldDescriptor* field) {
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT64:
case FieldDescriptor::CPPTYPE_UINT64:
// The default value of JSType is JS_NORMAL, which behaves the same as
// JS_NUMBER.
return field->options().jstype() == google::protobuf::FieldOptions::JS_STRING;
default:
return false;
}
}
string MaybeNumberString(const FieldDescriptor* field, const string& orig) {
return IsIntegralFieldWithStringJSType(field) ? ("\"" + orig + "\"") : orig;
}
string JSFieldDefault(const FieldDescriptor* field) {
if (field->is_repeated()) {
return "[]";
}
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32:
return MaybeNumberString(
field, SimpleItoa(field->default_value_int32()));
case FieldDescriptor::CPPTYPE_UINT32:
// The original codegen is in Java, and Java protobufs store unsigned
// integer values as signed integer values. In order to exactly match the
// output, we need to reinterpret as base-2 signed. Ugh.
return MaybeNumberString(
field, SimpleItoa(static_cast<int32>(field->default_value_uint32())));
case FieldDescriptor::CPPTYPE_INT64:
return MaybeNumberString(
field, SimpleItoa(field->default_value_int64()));
case FieldDescriptor::CPPTYPE_UINT64:
// See above note for uint32 -- reinterpreting as signed.
return MaybeNumberString(
field, SimpleItoa(static_cast<int64>(field->default_value_uint64())));
case FieldDescriptor::CPPTYPE_ENUM:
return SimpleItoa(field->default_value_enum()->number());
case FieldDescriptor::CPPTYPE_BOOL:
return field->default_value_bool() ? "true" : "false";
case FieldDescriptor::CPPTYPE_FLOAT:
return FloatToString(field->default_value_float());
case FieldDescriptor::CPPTYPE_DOUBLE:
return DoubleToString(field->default_value_double());
case FieldDescriptor::CPPTYPE_STRING:
if (field->type() == FieldDescriptor::TYPE_STRING) {
string out;
bool is_valid = EscapeJSString(field->default_value_string(), &out);
if (!is_valid) {
// TODO(lukestebbing): Decide whether this should be a hard error.
GOOGLE_LOG(WARNING) << "The default value for field " << field->full_name()
<< " was truncated since it contained invalid UTF-8 or"
" codepoints outside the basic multilingual plane.";
}
return "\"" + out + "\"";
} else { // Bytes
return "\"" + EscapeBase64(field->default_value_string()) + "\"";
}
case FieldDescriptor::CPPTYPE_MESSAGE:
return "null";
}
GOOGLE_LOG(FATAL) << "Shouldn't reach here.";
return "";
}
string ProtoTypeName(const GeneratorOptions& options,
const FieldDescriptor* field) {
switch (field->type()) {
case FieldDescriptor::TYPE_BOOL:
return "bool";
case FieldDescriptor::TYPE_INT32:
return "int32";
case FieldDescriptor::TYPE_UINT32:
return "uint32";
case FieldDescriptor::TYPE_SINT32:
return "sint32";
case FieldDescriptor::TYPE_FIXED32:
return "fixed32";
case FieldDescriptor::TYPE_SFIXED32:
return "sfixed32";
case FieldDescriptor::TYPE_INT64:
return "int64";
case FieldDescriptor::TYPE_UINT64:
return "uint64";
case FieldDescriptor::TYPE_SINT64:
return "sint64";
case FieldDescriptor::TYPE_FIXED64:
return "fixed64";
case FieldDescriptor::TYPE_SFIXED64:
return "sfixed64";
case FieldDescriptor::TYPE_FLOAT:
return "float";
case FieldDescriptor::TYPE_DOUBLE:
return "double";
case FieldDescriptor::TYPE_STRING:
return "string";
case FieldDescriptor::TYPE_BYTES:
return "bytes";
case FieldDescriptor::TYPE_GROUP:
return GetMessagePath(options, field->message_type());
case FieldDescriptor::TYPE_ENUM:
return GetEnumPath(options, field->enum_type());
case FieldDescriptor::TYPE_MESSAGE:
return GetMessagePath(options, field->message_type());
default:
return "";
}
}
string JSIntegerTypeName(const FieldDescriptor* field) {
return IsIntegralFieldWithStringJSType(field) ? "string" : "number";
}
string JSStringTypeName(const GeneratorOptions& options,
const FieldDescriptor* field,
BytesMode bytes_mode) {
if (field->type() == FieldDescriptor::TYPE_BYTES) {
switch (bytes_mode) {
case BYTES_DEFAULT:
return "(string|Uint8Array)";
case BYTES_B64:
return "string";
case BYTES_U8:
return "Uint8Array";
default:
assert(false);
}
}
return "string";
}
string JSTypeName(const GeneratorOptions& options,
const FieldDescriptor* field,
BytesMode bytes_mode) {
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_BOOL:
return "boolean";
case FieldDescriptor::CPPTYPE_INT32:
return JSIntegerTypeName(field);
case FieldDescriptor::CPPTYPE_INT64:
return JSIntegerTypeName(field);
case FieldDescriptor::CPPTYPE_UINT32:
return JSIntegerTypeName(field);
case FieldDescriptor::CPPTYPE_UINT64:
return JSIntegerTypeName(field);
case FieldDescriptor::CPPTYPE_FLOAT:
return "number";
case FieldDescriptor::CPPTYPE_DOUBLE:
return "number";
case FieldDescriptor::CPPTYPE_STRING:
return JSStringTypeName(options, field, bytes_mode);
case FieldDescriptor::CPPTYPE_ENUM:
return GetEnumPath(options, field->enum_type());
case FieldDescriptor::CPPTYPE_MESSAGE:
return GetMessagePath(options, field->message_type());
default:
return "";
}
}
// Used inside Google only -- do not remove.
bool UseBrokenPresenceSemantics(const GeneratorOptions& options,
const FieldDescriptor* field) {
return false;
}
// Returns true for fields that return "null" from accessors when they are
// unset. This should normally only be true for non-repeated submessages, but
// we have legacy users who relied on old behavior where accessors behaved this
// way.
bool ReturnsNullWhenUnset(const GeneratorOptions& options,
const FieldDescriptor* field) {
if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE &&
field->is_optional()) {
return true;
}
// TODO(haberman): remove this case and unconditionally return false.
return UseBrokenPresenceSemantics(options, field) && !field->is_repeated() &&
!field->has_default_value();
}
// In a sane world, this would be the same as ReturnsNullWhenUnset(). But in
// the status quo, some fields declare that they never return null/undefined
// even though they actually do:
// * required fields
// * optional enum fields
// * proto3 primitive fields.
bool DeclaredReturnTypeIsNullable(const GeneratorOptions& options,
const FieldDescriptor* field) {
if (field->is_required() || field->type() == FieldDescriptor::TYPE_ENUM) {
return false;
}
if (field->file()->syntax() == FileDescriptor::SYNTAX_PROTO3 &&
field->cpp_type() != FieldDescriptor::CPPTYPE_MESSAGE) {
return false;
}
return ReturnsNullWhenUnset(options, field);
}
bool SetterAcceptsUndefined(const GeneratorOptions& options,
const FieldDescriptor* field) {
if (ReturnsNullWhenUnset(options, field)) {
return true;
}
// Broken presence semantics always accepts undefined for setters.
return UseBrokenPresenceSemantics(options, field);
}
bool SetterAcceptsNull(const GeneratorOptions& options,
const FieldDescriptor* field) {
if (ReturnsNullWhenUnset(options, field)) {
return true;
}
// With broken presence semantics, fields with defaults accept "null" for
// setters, but other fields do not. This is a strange quirk of the old
// codegen.
return UseBrokenPresenceSemantics(options, field) &&
field->has_default_value();
}
// Returns types which are known to by non-nullable by default.
// The style guide requires that we omit "!" in this case.
bool IsPrimitive(const string& type) {
return type == "undefined" || type == "string" || type == "number" ||
type == "boolean";
}
string JSFieldTypeAnnotation(const GeneratorOptions& options,
const FieldDescriptor* field,
bool is_setter_argument,
bool force_present,
bool singular_if_not_packed,
BytesMode bytes_mode = BYTES_DEFAULT) {
GOOGLE_CHECK(!(is_setter_argument && force_present));
string jstype = JSTypeName(options, field, bytes_mode);
if (field->is_repeated() &&
(field->is_packed() || !singular_if_not_packed)) {
if (field->type() == FieldDescriptor::TYPE_BYTES &&
bytes_mode == BYTES_DEFAULT) {
jstype = "(Array<!Uint8Array>|Array<string>)";
} else {
if (!IsPrimitive(jstype)) {
jstype = "!" + jstype;
}
jstype = "Array.<" + jstype + ">";
}
}
bool is_null_or_undefined = false;
if (is_setter_argument) {
if (SetterAcceptsNull(options, field)) {
jstype = "?" + jstype;
is_null_or_undefined = true;
}
if (SetterAcceptsUndefined(options, field)) {
jstype += "|undefined";
is_null_or_undefined = true;
}
} else if (force_present) {
// Don't add null or undefined.
} else {
if (DeclaredReturnTypeIsNullable(options, field)) {
jstype = "?" + jstype;
is_null_or_undefined = true;
}
}
if (!is_null_or_undefined && !IsPrimitive(jstype)) {
jstype = "!" + jstype;
}
return jstype;
}
string JSBinaryReaderMethodType(const FieldDescriptor* field) {
string name = field->type_name();
if (name[0] >= 'a' && name[0] <= 'z') {
name[0] = (name[0] - 'a') + 'A';
}
return IsIntegralFieldWithStringJSType(field) ? (name + "String") : name;
}
string JSBinaryReadWriteMethodName(const FieldDescriptor* field,
bool is_writer) {
string name = JSBinaryReaderMethodType(field);
if (field->is_packed()) {
name = "Packed" + name;
} else if (is_writer && field->is_repeated()) {
name = "Repeated" + name;
}
return name;
}
string JSBinaryReaderMethodName(const GeneratorOptions& options,
const FieldDescriptor* field) {
return "jspb.BinaryReader.prototype.read" +
JSBinaryReadWriteMethodName(field, /* is_writer = */ false);
}
string JSBinaryWriterMethodName(const GeneratorOptions& options,
const FieldDescriptor* field) {
return "jspb.BinaryWriter.prototype.write" +
JSBinaryReadWriteMethodName(field, /* is_writer = */ true);
}
string JSReturnClause(const FieldDescriptor* desc) {
return "";
}
string JSReturnDoc(const GeneratorOptions& options,
const FieldDescriptor* desc) {
return "";
}
bool HasRepeatedFields(const GeneratorOptions& options,
const Descriptor* desc) {
for (int i = 0; i < desc->field_count(); i++) {
if (desc->field(i)->is_repeated() && !IsMap(options, desc->field(i))) {
return true;
}
}
return false;
}
static const char* kRepeatedFieldArrayName = ".repeatedFields_";
string RepeatedFieldsArrayName(const GeneratorOptions& options,
const Descriptor* desc) {
return HasRepeatedFields(options, desc)
? (GetMessagePath(options, desc) + kRepeatedFieldArrayName)
: "null";
}
bool HasOneofFields(const Descriptor* desc) {
for (int i = 0; i < desc->field_count(); i++) {
if (desc->field(i)->containing_oneof()) {
return true;
}
}
return false;
}
static const char* kOneofGroupArrayName = ".oneofGroups_";
string OneofFieldsArrayName(const GeneratorOptions& options,
const Descriptor* desc) {
return HasOneofFields(desc)
? (GetMessagePath(options, desc) + kOneofGroupArrayName)
: "null";
}
string RepeatedFieldNumberList(const GeneratorOptions& options,
const Descriptor* desc) {
std::vector<string> numbers;
for (int i = 0; i < desc->field_count(); i++) {
if (desc->field(i)->is_repeated() && !IsMap(options, desc->field(i))) {
numbers.push_back(JSFieldIndex(desc->field(i)));
}
}
return "[" + Join(numbers, ",") + "]";
}
string OneofGroupList(const Descriptor* desc) {
// List of arrays (one per oneof), each of which is a list of field indices
std::vector<string> oneof_entries;
for (int i = 0; i < desc->oneof_decl_count(); i++) {
const OneofDescriptor* oneof = desc->oneof_decl(i);
if (IgnoreOneof(oneof)) {
continue;
}
std::vector<string> oneof_fields;
for (int j = 0; j < oneof->field_count(); j++) {
if (IgnoreField(oneof->field(j))) {
continue;
}
oneof_fields.push_back(JSFieldIndex(oneof->field(j)));
}
oneof_entries.push_back("[" + Join(oneof_fields, ",") + "]");
}
return "[" + Join(oneof_entries, ",") + "]";
}
string JSOneofArray(const GeneratorOptions& options,
const FieldDescriptor* field) {
return OneofFieldsArrayName(options, field->containing_type()) + "[" +
JSOneofIndex(field->containing_oneof()) + "]";
}
string RelativeTypeName(const FieldDescriptor* field) {
assert(field->cpp_type() == FieldDescriptor::CPPTYPE_ENUM ||
field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE);
// For a field with an enum or message type, compute a name relative to the
// path name of the message type containing this field.
string package = field->file()->package();
string containing_type = field->containing_type()->full_name() + ".";
string type = (field->cpp_type() == FieldDescriptor::CPPTYPE_ENUM) ?
field->enum_type()->full_name() : field->message_type()->full_name();
// |prefix| is advanced as we find separators '.' past the common package
// prefix that yield common prefixes in the containing type's name and this
// type's name.
int prefix = 0;
for (int i = 0; i < type.size() && i < containing_type.size(); i++) {
if (type[i] != containing_type[i]) {
break;
}
if (type[i] == '.' && i >= package.size()) {
prefix = i + 1;
}
}
return type.substr(prefix);
}
string JSExtensionsObjectName(const GeneratorOptions& options,
const FileDescriptor* from_file,
const Descriptor* desc) {
if (desc->full_name() == "google.protobuf.bridge.MessageSet") {
// TODO(haberman): fix this for the kImportCommonJs case.
return "jspb.Message.messageSetExtensions";
} else {
return MaybeCrossFileRef(options, from_file, desc) + ".extensions";
}
}
static const int kMapKeyField = 1;
static const int kMapValueField = 2;
const FieldDescriptor* MapFieldKey(const FieldDescriptor* field) {
assert(field->is_map());
return field->message_type()->FindFieldByNumber(kMapKeyField);
}
const FieldDescriptor* MapFieldValue(const FieldDescriptor* field) {
assert(field->is_map());
return field->message_type()->FindFieldByNumber(kMapValueField);
}
string FieldDefinition(const GeneratorOptions& options,
const FieldDescriptor* field) {
if (IsMap(options, field)) {
const FieldDescriptor* key_field = MapFieldKey(field);
const FieldDescriptor* value_field = MapFieldValue(field);
string key_type = ProtoTypeName(options, key_field);
string value_type;
if (value_field->type() == FieldDescriptor::TYPE_ENUM ||
value_field->type() == FieldDescriptor::TYPE_MESSAGE) {
value_type = RelativeTypeName(value_field);
} else {
value_type = ProtoTypeName(options, value_field);
}
return StringPrintf("map<%s, %s> %s = %d;",
key_type.c_str(),
value_type.c_str(),
field->name().c_str(),
field->number());
} else {
string qualifier = field->is_repeated() ? "repeated" :
(field->is_optional() ? "optional" : "required");
string type, name;
if (field->type() == FieldDescriptor::TYPE_ENUM ||
field->type() == FieldDescriptor::TYPE_MESSAGE) {
type = RelativeTypeName(field);
name = field->name();
} else if (field->type() == FieldDescriptor::TYPE_GROUP) {
type = "group";
name = field->message_type()->name();
} else {
type = ProtoTypeName(options, field);
name = field->name();
}
return StringPrintf("%s %s %s = %d;",
qualifier.c_str(),
type.c_str(),
name.c_str(),
field->number());
}
}
string FieldComments(const FieldDescriptor* field, BytesMode bytes_mode) {
string comments;
if (field->cpp_type() == FieldDescriptor::CPPTYPE_BOOL) {
comments +=
" * Note that Boolean fields may be set to 0/1 when serialized from "
"a Java server.\n"
" * You should avoid comparisons like {@code val === true/false} in "
"those cases.\n";
}
if (field->type() == FieldDescriptor::TYPE_BYTES && bytes_mode == BYTES_U8) {
comments +=
" * Note that Uint8Array is not supported on all browsers.\n"
" * @see http://caniuse.com/Uint8Array\n";
}
return comments;
}
bool ShouldGenerateExtension(const FieldDescriptor* field) {
return
field->is_extension() &&
!IgnoreField(field);
}
bool HasExtensions(const Descriptor* desc) {
for (int i = 0; i < desc->extension_count(); i++) {
if (ShouldGenerateExtension(desc->extension(i))) {
return true;
}
}
for (int i = 0; i < desc->nested_type_count(); i++) {
if (HasExtensions(desc->nested_type(i))) {
return true;
}
}
return false;
}
bool HasExtensions(const FileDescriptor* file) {
for (int i = 0; i < file->extension_count(); i++) {
if (ShouldGenerateExtension(file->extension(i))) {
return true;
}
}
for (int i = 0; i < file->message_type_count(); i++) {
if (HasExtensions(file->message_type(i))) {
return true;
}
}
return false;
}
bool HasMap(const GeneratorOptions& options, const Descriptor* desc) {
for (int i = 0; i < desc->field_count(); i++) {
if (IsMap(options, desc->field(i))) {
return true;
}
}
for (int i = 0; i < desc->nested_type_count(); i++) {
if (HasMap(options, desc->nested_type(i))) {
return true;
}
}
return false;
}
bool FileHasMap(const GeneratorOptions& options, const FileDescriptor* desc) {
for (int i = 0; i < desc->message_type_count(); i++) {
if (HasMap(options, desc->message_type(i))) {
return true;
}
}
return false;
}
bool IsExtendable(const Descriptor* desc) {
return desc->extension_range_count() > 0;
}
// Returns the max index in the underlying data storage array beyond which the
// extension object is used.
string GetPivot(const Descriptor* desc) {
static const int kDefaultPivot = 500;
// Find the max field number
int max_field_number = 0;
for (int i = 0; i < desc->field_count(); i++) {
if (!IgnoreField(desc->field(i)) &&
desc->field(i)->number() > max_field_number) {
max_field_number = desc->field(i)->number();
}
}
int pivot = -1;
if (IsExtendable(desc) || (max_field_number >= kDefaultPivot)) {
pivot = ((max_field_number + 1) < kDefaultPivot) ?
(max_field_number + 1) : kDefaultPivot;
}
return SimpleItoa(pivot);
}
// Whether this field represents presence. For fields with presence, we
// generate extra methods (clearFoo() and hasFoo()) for this field.
bool HasFieldPresence(const GeneratorOptions& options,
const FieldDescriptor* field) {
if (field->is_repeated() || field->is_map()) {
// We say repeated fields and maps don't have presence, but we still do
// generate clearFoo() methods for them through a special case elsewhere.
return false;
}
if (UseBrokenPresenceSemantics(options, field)) {
// Proto3 files with broken presence semantics have field presence.
return true;
}
return field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE ||
field->containing_oneof() != NULL ||
field->file()->syntax() == FileDescriptor::SYNTAX_PROTO2;
}
// We use this to implement the semantics that same file can be generated
// multiple times, but the last one wins. We never actually write the files,
// but we keep a set of which descriptors were the final one for a given
// filename.
class FileDeduplicator {
public:
explicit FileDeduplicator(const GeneratorOptions& options)
: error_on_conflict_(options.error_on_name_conflict) {}
bool AddFile(const string& filename, const void* desc, string* error) {
if (descs_by_filename_.find(filename) != descs_by_filename_.end()) {
if (error_on_conflict_) {
*error = "Name conflict: file name " + filename +
" would be generated by two descriptors";
return false;
}
allowed_descs_.erase(descs_by_filename_[filename]);
}
descs_by_filename_[filename] = desc;
allowed_descs_.insert(desc);
return true;
}
void GetAllowedSet(std::set<const void*>* allowed_set) {
*allowed_set = allowed_descs_;
}
private:
bool error_on_conflict_;
std::map<string, const void*> descs_by_filename_;
std::set<const void*> allowed_descs_;
};
void DepthFirstSearch(const FileDescriptor* file,
std::vector<const FileDescriptor*>* list,
std::set<const FileDescriptor*>* seen) {
if (!seen->insert(file).second) {
return;
}
// Add all dependencies.
for (int i = 0; i < file->dependency_count(); i++) {
DepthFirstSearch(file->dependency(i), list, seen);
}
// Add this file.
list->push_back(file);
}
// A functor for the predicate to remove_if() below. Returns true if a given
// FileDescriptor is not in the given set.
class NotInSet {
public:
explicit NotInSet(const std::set<const FileDescriptor*>& file_set)
: file_set_(file_set) {}
bool operator()(const FileDescriptor* file) {
return file_set_.count(file) == 0;
}
private:
const std::set<const FileDescriptor*>& file_set_;
};
// This function generates an ordering of the input FileDescriptors that matches
// the logic of the old code generator. The order is significant because two
// different input files can generate the same output file, and the last one
// needs to win.
void GenerateJspbFileOrder(const std::vector<const FileDescriptor*>& input,
std::vector<const FileDescriptor*>* ordered) {
// First generate an ordering of all reachable files (including dependencies)
// with depth-first search. This mimics the behavior of --include_imports,
// which is what the old codegen used.
ordered->clear();
std::set<const FileDescriptor*> seen;
std::set<const FileDescriptor*> input_set;
for (int i = 0; i < input.size(); i++) {
DepthFirstSearch(input[i], ordered, &seen);
input_set.insert(input[i]);
}
// Now remove the entries that are not actually in our input list.
ordered->erase(
std::remove_if(ordered->begin(), ordered->end(), NotInSet(input_set)),
ordered->end());
}
// If we're generating code in file-per-type mode, avoid overwriting files
// by choosing the last descriptor that writes each filename and permitting
// only those to generate code.
bool GenerateJspbAllowedSet(const GeneratorOptions& options,
const std::vector<const FileDescriptor*>& files,
std::set<const void*>* allowed_set,
string* error) {
std::vector<const FileDescriptor*> files_ordered;
GenerateJspbFileOrder(files, &files_ordered);
// Choose the last descriptor for each filename.
FileDeduplicator dedup(options);
for (int i = 0; i < files_ordered.size(); i++) {
for (int j = 0; j < files_ordered[i]->message_type_count(); j++) {
const Descriptor* desc = files_ordered[i]->message_type(j);
if (!dedup.AddFile(GetMessageFileName(options, desc), desc, error)) {
return false;
}
}
for (int j = 0; j < files_ordered[i]->enum_type_count(); j++) {
const EnumDescriptor* desc = files_ordered[i]->enum_type(j);
if (!dedup.AddFile(GetEnumFileName(options, desc), desc, error)) {
return false;
}
}
// Pull out all free-floating extensions and generate files for those too.
bool has_extension = false;
for (int j = 0; j < files_ordered[i]->extension_count(); j++) {
if (ShouldGenerateExtension(files_ordered[i]->extension(j))) {
has_extension = true;
}
}
if (has_extension) {
if (!dedup.AddFile(GetExtensionFileName(options, files_ordered[i]),
files_ordered[i], error)) {
return false;
}
}
}
dedup.GetAllowedSet(allowed_set);
return true;
}
} // anonymous namespace
void Generator::GenerateHeader(const GeneratorOptions& options,
io::Printer* printer) const {
printer->Print("/**\n"
" * @fileoverview\n"
" * @enhanceable\n"
" * @suppress {messageConventions} JS Compiler reports an "
"error if a variable or\n"
" * field starts with 'MSG_' and isn't a translatable "
"message.\n"
" * @public\n"
" */\n"
"// GENERATED CODE -- DO NOT EDIT!\n"
"\n");
}
void Generator::FindProvidesForFile(const GeneratorOptions& options,
io::Printer* printer,
const FileDescriptor* file,
std::set<string>* provided) const {
for (int i = 0; i < file->message_type_count(); i++) {
FindProvidesForMessage(options, printer, file->message_type(i), provided);
}
for (int i = 0; i < file->enum_type_count(); i++) {
FindProvidesForEnum(options, printer, file->enum_type(i), provided);
}
}
void Generator::FindProvides(const GeneratorOptions& options,
io::Printer* printer,
const std::vector<const FileDescriptor*>& files,
std::set<string>* provided) const {
for (int i = 0; i < files.size(); i++) {
FindProvidesForFile(options, printer, files[i], provided);
}
printer->Print("\n");
}
void Generator::FindProvidesForMessage(
const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc,
std::set<string>* provided) const {
if (IgnoreMessage(options, desc)) {
return;
}
string name = GetMessagePath(options, desc);
provided->insert(name);
for (int i = 0; i < desc->enum_type_count(); i++) {
FindProvidesForEnum(options, printer, desc->enum_type(i),
provided);
}
for (int i = 0; i < desc->nested_type_count(); i++) {
FindProvidesForMessage(options, printer, desc->nested_type(i),
provided);
}
}
void Generator::FindProvidesForEnum(const GeneratorOptions& options,
io::Printer* printer,
const EnumDescriptor* enumdesc,
std::set<string>* provided) const {
string name = GetEnumPath(options, enumdesc);
provided->insert(name);
}
void Generator::FindProvidesForFields(
const GeneratorOptions& options,
io::Printer* printer,
const std::vector<const FieldDescriptor*>& fields,
std::set<string>* provided) const {
for (int i = 0; i < fields.size(); i++) {
const FieldDescriptor* field = fields[i];
if (IgnoreField(field)) {
continue;
}
string name = GetFilePath(options, field->file()) + "." +
JSObjectFieldName(options, field);
provided->insert(name);
}
}
void Generator::GenerateProvides(const GeneratorOptions& options,
io::Printer* printer,
std::set<string>* provided) const {
for (std::set<string>::iterator it = provided->begin();
it != provided->end(); ++it) {
if (options.import_style == GeneratorOptions::kImportClosure) {
printer->Print("goog.provide('$name$');\n", "name", *it);
} else {
// We aren't using Closure's import system, but we use goog.exportSymbol()
// to construct the expected tree of objects, eg.
//
// goog.exportSymbol('foo.bar.Baz', null, this);
//
// // Later generated code expects foo.bar = {} to exist:
// foo.bar.Baz = function() { /* ... */ }
printer->Print("goog.exportSymbol('$name$', null, global);\n", "name",
*it);
}
}
}
void Generator::GenerateRequiresForMessage(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc,
std::set<string>* provided) const {
std::set<string> required;
std::set<string> forwards;
bool have_message = false;
FindRequiresForMessage(options, desc,
&required, &forwards, &have_message);
GenerateRequiresImpl(options, printer, &required, &forwards, provided,
/* require_jspb = */ have_message,
/* require_extension = */ HasExtensions(desc),
/* require_map = */ HasMap(options, desc));
}
void Generator::GenerateRequiresForLibrary(
const GeneratorOptions& options, io::Printer* printer,
const std::vector<const FileDescriptor*>& files,
std::set<string>* provided) const {
GOOGLE_CHECK_EQ(options.import_style, GeneratorOptions::kImportClosure);
// For Closure imports we need to import every message type individually.
std::set<string> required;
std::set<string> forwards;
bool have_extensions = false;
bool have_map = false;
bool have_message = false;
for (int i = 0; i < files.size(); i++) {
for (int j = 0; j < files[i]->message_type_count(); j++) {
const Descriptor* desc = files[i]->message_type(j);
if (!IgnoreMessage(options, desc)) {
FindRequiresForMessage(options, desc, &required, &forwards,
&have_message);
}
}
if (!have_extensions && HasExtensions(files[i])) {
have_extensions = true;
}
if (!have_map && FileHasMap(options, files[i])) {
have_map = true;
}
for (int j = 0; j < files[i]->extension_count(); j++) {
const FieldDescriptor* extension = files[i]->extension(j);
if (IgnoreField(extension)) {
continue;
}
if (extension->containing_type()->full_name() !=
"google.protobuf.bridge.MessageSet") {
required.insert(GetMessagePath(options, extension->containing_type()));
}
FindRequiresForField(options, extension, &required, &forwards);
have_extensions = true;
}
}
GenerateRequiresImpl(options, printer, &required, &forwards, provided,
/* require_jspb = */ have_message,
/* require_extension = */ have_extensions,
/* require_map = */ have_map);
}
void Generator::GenerateRequiresForExtensions(
const GeneratorOptions& options, io::Printer* printer,
const std::vector<const FieldDescriptor*>& fields,
std::set<string>* provided) const {
std::set<string> required;
std::set<string> forwards;
for (int i = 0; i < fields.size(); i++) {
const FieldDescriptor* field = fields[i];
if (IgnoreField(field)) {
continue;
}
FindRequiresForExtension(options, field, &required, &forwards);
}
GenerateRequiresImpl(options, printer, &required, &forwards, provided,
/* require_jspb = */ false,
/* require_extension = */ fields.size() > 0,
/* require_map = */ false);
}
void Generator::GenerateRequiresImpl(const GeneratorOptions& options,
io::Printer* printer,
std::set<string>* required,
std::set<string>* forwards,
std::set<string>* provided,
bool require_jspb, bool require_extension,
bool require_map) const {
if (require_jspb) {
required->insert("jspb.Message");
required->insert("jspb.BinaryReader");
required->insert("jspb.BinaryWriter");
}
if (require_extension) {
required->insert("jspb.ExtensionFieldBinaryInfo");
required->insert("jspb.ExtensionFieldInfo");
}
if (require_map) {
required->insert("jspb.Map");
}
std::set<string>::iterator it;
for (it = required->begin(); it != required->end(); ++it) {
if (provided->find(*it) != provided->end()) {
continue;
}
printer->Print("goog.require('$name$');\n",
"name", *it);
}
printer->Print("\n");
for (it = forwards->begin(); it != forwards->end(); ++it) {
if (provided->find(*it) != provided->end()) {
continue;
}
printer->Print("goog.forwardDeclare('$name$');\n",
"name", *it);
}
}
bool NamespaceOnly(const Descriptor* desc) {
return false;
}
void Generator::FindRequiresForMessage(
const GeneratorOptions& options,
const Descriptor* desc,
std::set<string>* required,
std::set<string>* forwards,
bool* have_message) const {
if (!NamespaceOnly(desc)) {
*have_message = true;
for (int i = 0; i < desc->field_count(); i++) {
const FieldDescriptor* field = desc->field(i);
if (IgnoreField(field)) {
continue;
}
FindRequiresForField(options, field, required, forwards);
}
}
for (int i = 0; i < desc->extension_count(); i++) {
const FieldDescriptor* field = desc->extension(i);
if (IgnoreField(field)) {
continue;
}
FindRequiresForExtension(options, field, required, forwards);
}
for (int i = 0; i < desc->nested_type_count(); i++) {
FindRequiresForMessage(options, desc->nested_type(i), required, forwards,
have_message);
}
}
void Generator::FindRequiresForField(const GeneratorOptions& options,
const FieldDescriptor* field,
std::set<string>* required,
std::set<string>* forwards) const {
if (field->cpp_type() == FieldDescriptor::CPPTYPE_ENUM &&
// N.B.: file-level extensions with enum type do *not* create
// dependencies, as per original codegen.
!(field->is_extension() && field->extension_scope() == NULL)) {
if (options.add_require_for_enums) {
required->insert(GetEnumPath(options, field->enum_type()));
} else {
forwards->insert(GetEnumPath(options, field->enum_type()));
}
} else if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) {
if (!IgnoreMessage(options, field->message_type())) {
required->insert(GetMessagePath(options, field->message_type()));
}
}
}
void Generator::FindRequiresForExtension(const GeneratorOptions& options,
const FieldDescriptor* field,
std::set<string>* required,
std::set<string>* forwards) const {
if (field->containing_type()->full_name() != "google.protobuf.bridge.MessageSet") {
required->insert(GetMessagePath(options, field->containing_type()));
}
FindRequiresForField(options, field, required, forwards);
}
void Generator::GenerateTestOnly(const GeneratorOptions& options,
io::Printer* printer) const {
if (options.testonly) {
printer->Print("goog.setTestOnly();\n\n");
}
printer->Print("\n");
}
void Generator::GenerateClassesAndEnums(const GeneratorOptions& options,
io::Printer* printer,
const FileDescriptor* file) const {
for (int i = 0; i < file->message_type_count(); i++) {
GenerateClass(options, printer, file->message_type(i));
}
for (int i = 0; i < file->enum_type_count(); i++) {
GenerateEnum(options, printer, file->enum_type(i));
}
}
void Generator::GenerateClass(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
if (IgnoreMessage(options, desc)) {
return;
}
if (!NamespaceOnly(desc)) {
printer->Print("\n");
GenerateClassConstructor(options, printer, desc);
GenerateClassFieldInfo(options, printer, desc);
GenerateClassToObject(options, printer, desc);
// These must come *before* the extension-field info generation in
// GenerateClassRegistration so that references to the binary
// serialization/deserialization functions may be placed in the extension
// objects.
GenerateClassDeserializeBinary(options, printer, desc);
GenerateClassSerializeBinary(options, printer, desc);
}
// Recurse on nested types. These must come *before* the extension-field
// info generation in GenerateClassRegistration so that extensions that
// reference nested types proceed the definitions of the nested types.
for (int i = 0; i < desc->enum_type_count(); i++) {
GenerateEnum(options, printer, desc->enum_type(i));
}
for (int i = 0; i < desc->nested_type_count(); i++) {
GenerateClass(options, printer, desc->nested_type(i));
}
if (!NamespaceOnly(desc)) {
GenerateClassRegistration(options, printer, desc);
GenerateClassFields(options, printer, desc);
if (IsExtendable(desc) && desc->full_name() != "google.protobuf.bridge.MessageSet") {
GenerateClassExtensionFieldInfo(options, printer, desc);
}
if (options.import_style != GeneratorOptions::kImportClosure) {
for (int i = 0; i < desc->extension_count(); i++) {
GenerateExtension(options, printer, desc->extension(i));
}
}
}
}
void Generator::GenerateClassConstructor(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
printer->Print(
"/**\n"
" * Generated by JsPbCodeGenerator.\n"
" * @param {Array=} opt_data Optional initial data array, typically "
"from a\n"
" * server response, or constructed directly in Javascript. The array "
"is used\n"
" * in place and becomes part of the constructed object. It is not "
"cloned.\n"
" * If no data is provided, the constructed object will be empty, but "
"still\n"
" * valid.\n"
" * @extends {jspb.Message}\n"
" * @constructor\n"
" */\n"
"$classname$ = function(opt_data) {\n",
"classname", GetMessagePath(options, desc));
string message_id = GetMessageId(desc);
printer->Print(
" jspb.Message.initialize(this, opt_data, $messageId$, $pivot$, "
"$rptfields$, $oneoffields$);\n",
"messageId", !message_id.empty() ?
("'" + message_id + "'") :
(IsResponse(desc) ? "''" : "0"),
"pivot", GetPivot(desc),
"rptfields", RepeatedFieldsArrayName(options, desc),
"oneoffields", OneofFieldsArrayName(options, desc));
printer->Print(
"};\n"
"goog.inherits($classname$, jspb.Message);\n"
"if (goog.DEBUG && !COMPILED) {\n"
" $classname$.displayName = '$classname$';\n"
"}\n",
"classname", GetMessagePath(options, desc));
}
void Generator::GenerateClassFieldInfo(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
if (HasRepeatedFields(options, desc)) {
printer->Print(
"/**\n"
" * List of repeated fields within this message type.\n"
" * @private {!Array<number>}\n"
" * @const\n"
" */\n"
"$classname$$rptfieldarray$ = $rptfields$;\n"
"\n",
"classname", GetMessagePath(options, desc),
"rptfieldarray", kRepeatedFieldArrayName,
"rptfields", RepeatedFieldNumberList(options, desc));
}
if (HasOneofFields(desc)) {
printer->Print(
"/**\n"
" * Oneof group definitions for this message. Each group defines the "
"field\n"
" * numbers belonging to that group. When of these fields' value is "
"set, all\n"
" * other fields in the group are cleared. During deserialization, if "
"multiple\n"
" * fields are encountered for a group, only the last value seen will "
"be kept.\n"
" * @private {!Array<!Array<number>>}\n"
" * @const\n"
" */\n"
"$classname$$oneofgrouparray$ = $oneofgroups$;\n"
"\n",
"classname", GetMessagePath(options, desc),
"oneofgrouparray", kOneofGroupArrayName,
"oneofgroups", OneofGroupList(desc));
for (int i = 0; i < desc->oneof_decl_count(); i++) {
if (IgnoreOneof(desc->oneof_decl(i))) {
continue;
}
GenerateOneofCaseDefinition(options, printer, desc->oneof_decl(i));
}
}
}
void Generator::GenerateClassXid(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
printer->Print(
"\n"
"\n"
"$class$.prototype.messageXid = xid('$class$');\n",
"class", GetMessagePath(options, desc));
}
void Generator::GenerateOneofCaseDefinition(
const GeneratorOptions& options,
io::Printer* printer,
const OneofDescriptor* oneof) const {
printer->Print(
"/**\n"
" * @enum {number}\n"
" */\n"
"$classname$.$oneof$Case = {\n"
" $upcase$_NOT_SET: 0",
"classname", GetMessagePath(options, oneof->containing_type()),
"oneof", JSOneofName(oneof),
"upcase", ToEnumCase(oneof->name()));
for (int i = 0; i < oneof->field_count(); i++) {
if (IgnoreField(oneof->field(i))) {
continue;
}
printer->Print(
",\n"
" $upcase$: $number$",
"upcase", ToEnumCase(oneof->field(i)->name()),
"number", JSFieldIndex(oneof->field(i)));
}
printer->Print(
"\n"
"};\n"
"\n"
"/**\n"
" * @return {$class$.$oneof$Case}\n"
" */\n"
"$class$.prototype.get$oneof$Case = function() {\n"
" return /** @type {$class$.$oneof$Case} */(jspb.Message."
"computeOneofCase(this, $class$.oneofGroups_[$oneofindex$]));\n"
"};\n"
"\n",
"class", GetMessagePath(options, oneof->containing_type()),
"oneof", JSOneofName(oneof),
"oneofindex", JSOneofIndex(oneof));
}
void Generator::GenerateClassToObject(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
printer->Print(
"\n"
"\n"
"if (jspb.Message.GENERATE_TO_OBJECT) {\n"
"/**\n"
" * Creates an object representation of this proto suitable for use in "
"Soy templates.\n"
" * Field names that are reserved in JavaScript and will be renamed to "
"pb_name.\n"
" * To access a reserved field use, foo.pb_<name>, eg, foo.pb_default.\n"
" * For the list of reserved names please see:\n"
" * com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.\n"
" * @param {boolean=} opt_includeInstance Whether to include the JSPB "
"instance\n"
" * for transitional soy proto support: http://goto/soy-param-"
"migration\n"
" * @return {!Object}\n"
" */\n"
"$classname$.prototype.toObject = function(opt_includeInstance) {\n"
" return $classname$.toObject(opt_includeInstance, this);\n"
"};\n"
"\n"
"\n"
"/**\n"
" * Static version of the {@see toObject} method.\n"
" * @param {boolean|undefined} includeInstance Whether to include the "
"JSPB\n"
" * instance for transitional soy proto support:\n"
" * http://goto/soy-param-migration\n"
" * @param {!$classname$} msg The msg instance to transform.\n"
" * @return {!Object}\n"
" * @suppress {unusedLocalVariables} f is only used for nested messages\n"
" */\n"
"$classname$.toObject = function(includeInstance, msg) {\n"
" var f, obj = {",
"classname", GetMessagePath(options, desc));
bool first = true;
for (int i = 0; i < desc->field_count(); i++) {
const FieldDescriptor* field = desc->field(i);
if (IgnoreField(field)) {
continue;
}
if (!first) {
printer->Print(",\n ");
} else {
printer->Print("\n ");
first = false;
}
GenerateClassFieldToObject(options, printer, field);
}
if (!first) {
printer->Print("\n };\n\n");
} else {
printer->Print("\n\n };\n\n");
}
if (IsExtendable(desc)) {
printer->Print(
" jspb.Message.toObjectExtension(/** @type {!jspb.Message} */ (msg), "
"obj,\n"
" $extObject$, $class$.prototype.getExtension,\n"
" includeInstance);\n",
"extObject", JSExtensionsObjectName(options, desc->file(), desc),
"class", GetMessagePath(options, desc));
}
printer->Print(
" if (includeInstance) {\n"
" obj.$$jspbMessageInstance = msg;\n"
" }\n"
" return obj;\n"
"};\n"
"}\n"
"\n"
"\n",
"classname", GetMessagePath(options, desc));
}
void Generator::GenerateFieldValueExpression(io::Printer* printer,
const char *obj_reference,
const FieldDescriptor* field,
bool use_default) const {
bool is_float_or_double =
field->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT ||
field->cpp_type() == FieldDescriptor::CPPTYPE_DOUBLE;
if (use_default) {
if (is_float_or_double) {
// Coerce "Nan" and "Infinity" to actual float values.
//
// This will change null to 0, but that doesn't matter since we're getting
// with a default.
printer->Print("+");
}
printer->Print(
"jspb.Message.getFieldWithDefault($obj$, $index$, $default$)",
"obj", obj_reference,
"index", JSFieldIndex(field),
"default", JSFieldDefault(field));
} else {
if (is_float_or_double) {
if (field->is_required()) {
// Use "+" to convert all fields to numeric (including null).
printer->Print(
"+jspb.Message.getField($obj$, $index$)",
"index", JSFieldIndex(field),
"obj", obj_reference);
} else {
// Converts "NaN" and "Infinity" while preserving null.
printer->Print(
"jspb.Message.get$cardinality$FloatingPointField($obj$, $index$)",
"cardinality", field->is_repeated() ? "Repeated" : "Optional",
"index", JSFieldIndex(field),
"obj", obj_reference);
}
} else {
printer->Print("jspb.Message.get$cardinality$Field($obj$, $index$)",
"cardinality", field->is_repeated() ? "Repeated" : "",
"index", JSFieldIndex(field),
"obj", obj_reference);
}
}
}
void Generator::GenerateClassFieldToObject(const GeneratorOptions& options,
io::Printer* printer,
const FieldDescriptor* field) const {
printer->Print("$fieldname$: ",
"fieldname", JSObjectFieldName(options, field));
if (IsMap(options, field)) {
const FieldDescriptor* value_field = MapFieldValue(field);
// If the map values are of a message type, we must provide their static
// toObject() method; otherwise we pass undefined for that argument.
string value_to_object;
if (value_field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) {
value_to_object =
GetMessagePath(options, value_field->message_type()) + ".toObject";
} else {
value_to_object = "undefined";
}
printer->Print(
"(f = msg.get$name$()) ? f.toObject(includeInstance, $valuetoobject$) "
": []",
"name", JSGetterName(options, field), "valuetoobject", value_to_object);
} else if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) {
// Message field.
if (field->is_repeated()) {
{
printer->Print("jspb.Message.toObjectList(msg.get$getter$(),\n"
" $type$.toObject, includeInstance)",
"getter", JSGetterName(options, field),
"type", SubmessageTypeRef(options, field));
}
} else {
printer->Print("(f = msg.get$getter$()) && "
"$type$.toObject(includeInstance, f)",
"getter", JSGetterName(options, field),
"type", SubmessageTypeRef(options, field));
}
} else if (field->type() == FieldDescriptor::TYPE_BYTES) {
// For bytes fields we want to always return the B64 data.
printer->Print("msg.get$getter$()",
"getter", JSGetterName(options, field, BYTES_B64));
} else {
bool use_default = field->has_default_value();
if (field->file()->syntax() == FileDescriptor::SYNTAX_PROTO3 &&
// Repeated fields get initialized to their default in the constructor
// (why?), so we emit a plain getField() call for them.
!field->is_repeated() && !UseBrokenPresenceSemantics(options, field)) {
// Proto3 puts all defaults (including implicit defaults) in toObject().
// But for proto2 we leave the existing semantics unchanged: unset fields
// without default are unset.
use_default = true;
}
// We don't implement this by calling the accessors, because the semantics
// of the accessors are changing independently of the toObject() semantics.
// We are migrating the accessors to return defaults instead of null, but
// it may take longer to migrate toObject (or we might not want to do it at
// all). So we want to generate independent code.
GenerateFieldValueExpression(printer, "msg", field, use_default);
}
}
void Generator::GenerateClassFromObject(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
printer->Print(
"if (jspb.Message.GENERATE_FROM_OBJECT) {\n"
"/**\n"
" * Loads data from an object into a new instance of this proto.\n"
" * @param {!Object} obj The object representation of this proto to\n"
" * load the data from.\n"
" * @return {!$classname$}\n"
" */\n"
"$classname$.fromObject = function(obj) {\n"
" var f, msg = new $classname$();\n",
"classname", GetMessagePath(options, desc));
for (int i = 0; i < desc->field_count(); i++) {
const FieldDescriptor* field = desc->field(i);
GenerateClassFieldFromObject(options, printer, field);
}
printer->Print(
" return msg;\n"
"};\n"
"}\n");
}
void Generator::GenerateClassFieldFromObject(
const GeneratorOptions& options,
io::Printer* printer,
const FieldDescriptor* field) const {
if (IsMap(options, field)) {
const FieldDescriptor* value_field = MapFieldValue(field);
if (value_field->type() == FieldDescriptor::TYPE_MESSAGE) {
// Since the map values are of message type, we have to do some extra work
// to recursively call fromObject() on them before setting the map field.
printer->Print(
" goog.isDef(obj.$name$) && jspb.Message.setWrapperField(\n"
" msg, $index$, jspb.Map.fromObject(obj.$name$, $fieldclass$, "
"$fieldclass$.fromObject));\n",
"name", JSObjectFieldName(options, field),
"index", JSFieldIndex(field),
"fieldclass", GetMessagePath(options, value_field->message_type()));
} else {
// `msg` is a newly-constructed message object that has not yet built any
// map containers wrapping underlying arrays, so we can simply directly
// set the array here without fear of a stale wrapper.
printer->Print(
" goog.isDef(obj.$name$) && "
"jspb.Message.setField(msg, $index$, obj.$name$);\n",
"name", JSObjectFieldName(options, field),
"index", JSFieldIndex(field));
}
} else if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) {
// Message field (singular or repeated)
if (field->is_repeated()) {
{
printer->Print(
" goog.isDef(obj.$name$) && "
"jspb.Message.setRepeatedWrapperField(\n"
" msg, $index$, goog.array.map(obj.$name$, function(i) {\n"
" return $fieldclass$.fromObject(i);\n"
" }));\n",
"name", JSObjectFieldName(options, field),
"index", JSFieldIndex(field),
"fieldclass", SubmessageTypeRef(options, field));
}
} else {
printer->Print(
" goog.isDef(obj.$name$) && jspb.Message.setWrapperField(\n"
" msg, $index$, $fieldclass$.fromObject(obj.$name$));\n",
"name", JSObjectFieldName(options, field),
"index", JSFieldIndex(field),
"fieldclass", SubmessageTypeRef(options, field));
}
} else {
// Simple (primitive) field.
printer->Print(
" goog.isDef(obj.$name$) && jspb.Message.setField(msg, $index$, "
"obj.$name$);\n",
"name", JSObjectFieldName(options, field),
"index", JSFieldIndex(field));
}
}
void Generator::GenerateClassRegistration(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
// Register any extensions defined inside this message type.
for (int i = 0; i < desc->extension_count(); i++) {
const FieldDescriptor* extension = desc->extension(i);
if (ShouldGenerateExtension(extension)) {
GenerateExtension(options, printer, extension);
}
}
}
void Generator::GenerateClassFields(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
for (int i = 0; i < desc->field_count(); i++) {
if (!IgnoreField(desc->field(i))) {
GenerateClassField(options, printer, desc->field(i));
}
}
}
void GenerateBytesWrapper(const GeneratorOptions& options,
io::Printer* printer,
const FieldDescriptor* field,
BytesMode bytes_mode) {
string type = JSFieldTypeAnnotation(
options, field,
/* is_setter_argument = */ false,
/* force_present = */ false,
/* singular_if_not_packed = */ false, bytes_mode);
printer->Print(
"/**\n"
" * $fielddef$\n"
"$comment$"
" * This is a type-conversion wrapper around `get$defname$()`\n"
" * @return {$type$}\n"
" */\n"
"$class$.prototype.get$name$ = function() {\n"
" return /** @type {$type$} */ (jspb.Message.bytes$list$As$suffix$(\n"
" this.get$defname$()));\n"
"};\n"
"\n"
"\n",
"fielddef", FieldDefinition(options, field),
"comment", FieldComments(field, bytes_mode),
"type", type,
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field, bytes_mode),
"list", field->is_repeated() ? "List" : "",
"suffix", JSByteGetterSuffix(bytes_mode),
"defname", JSGetterName(options, field, BYTES_DEFAULT));
}
void Generator::GenerateClassField(const GeneratorOptions& options,
io::Printer* printer,
const FieldDescriptor* field) const {
if (IsMap(options, field)) {
const FieldDescriptor* key_field = MapFieldKey(field);
const FieldDescriptor* value_field = MapFieldValue(field);
// Map field: special handling to instantiate the map object on demand.
string key_type =
JSFieldTypeAnnotation(
options, key_field,
/* is_setter_argument = */ false,
/* force_present = */ true,
/* singular_if_not_packed = */ false);
string value_type =
JSFieldTypeAnnotation(
options, value_field,
/* is_setter_argument = */ false,
/* force_present = */ true,
/* singular_if_not_packed = */ false);
printer->Print(
"/**\n"
" * $fielddef$\n"
" * @param {boolean=} opt_noLazyCreate Do not create the map if\n"
" * empty, instead returning `undefined`\n"
" * @return {!jspb.Map<$keytype$,$valuetype$>}\n"
" */\n",
"fielddef", FieldDefinition(options, field),
"keytype", key_type,
"valuetype", value_type);
printer->Print(
"$class$.prototype.get$name$ = function(opt_noLazyCreate) {\n"
" return /** @type {!jspb.Map<$keytype$,$valuetype$>} */ (\n",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field),
"keytype", key_type,
"valuetype", value_type);
printer->Print(
" jspb.Message.getMapField(this, $index$, opt_noLazyCreate",
"index", JSFieldIndex(field));
if (value_field->type() == FieldDescriptor::TYPE_MESSAGE) {
printer->Print(
",\n"
" $messageType$",
"messageType", GetMessagePath(options, value_field->message_type()));
} else {
printer->Print(",\n"
" null");
}
printer->Print(
"));\n");
printer->Print(
"};\n"
"\n"
"\n");
} else if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) {
// Message field: special handling in order to wrap the underlying data
// array with a message object.
printer->Print(
"/**\n"
" * $fielddef$\n"
"$comment$"
" * @return {$type$}\n"
" */\n",
"fielddef", FieldDefinition(options, field),
"comment", FieldComments(field, BYTES_DEFAULT),
"type", JSFieldTypeAnnotation(options, field,
/* is_setter_argument = */ false,
/* force_present = */ false,
/* singular_if_not_packed = */ false));
printer->Print(
"$class$.prototype.get$name$ = function() {\n"
" return /** @type{$type$} */ (\n"
" jspb.Message.get$rpt$WrapperField(this, $wrapperclass$, "
"$index$$required$));\n"
"};\n"
"\n"
"\n",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field),
"type", JSFieldTypeAnnotation(options, field,
/* is_setter_argument = */ false,
/* force_present = */ false,
/* singular_if_not_packed = */ false),
"rpt", (field->is_repeated() ? "Repeated" : ""),
"index", JSFieldIndex(field),
"wrapperclass", SubmessageTypeRef(options, field),
"required", (field->label() == FieldDescriptor::LABEL_REQUIRED ?
", 1" : ""));
printer->Print(
"/** @param {$optionaltype$} value$returndoc$ */\n"
"$class$.prototype.set$name$ = function(value) {\n"
" jspb.Message.set$oneoftag$$repeatedtag$WrapperField(",
"optionaltype",
JSFieldTypeAnnotation(options, field,
/* is_setter_argument = */ true,
/* force_present = */ false,
/* singular_if_not_packed = */ false),
"returndoc", JSReturnDoc(options, field),
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field),
"oneoftag", (field->containing_oneof() ? "Oneof" : ""),
"repeatedtag", (field->is_repeated() ? "Repeated" : ""));
printer->Print(
"this, $index$$oneofgroup$, value);$returnvalue$\n"
"};\n"
"\n"
"\n",
"index", JSFieldIndex(field),
"oneofgroup", (field->containing_oneof() ?
(", " + JSOneofArray(options, field)) : ""),
"returnvalue", JSReturnClause(field));
if (field->is_repeated()) {
GenerateRepeatedMessageHelperMethods(options, printer, field);
}
} else {
bool untyped =
false;
// Simple (primitive) field, either singular or repeated.
// TODO(b/26173701): Always use BYTES_DEFAULT for the getter return type;
// at this point we "lie" to non-binary users and tell the return
// type is always base64 string, pending a LSC to migrate to typed getters.
BytesMode bytes_mode =
field->type() == FieldDescriptor::TYPE_BYTES && !options.binary ?
BYTES_B64 : BYTES_DEFAULT;
string typed_annotation = JSFieldTypeAnnotation(
options, field,
/* is_setter_argument = */ false,
/* force_present = */ false,
/* singular_if_not_packed = */ false,
/* bytes_mode = */ bytes_mode);
if (untyped) {
printer->Print(
"/**\n"
" * @return {?} Raw field, untyped.\n"
" */\n");
} else {
printer->Print(
"/**\n"
" * $fielddef$\n"
"$comment$"
" * @return {$type$}\n"
" */\n",
"fielddef", FieldDefinition(options, field),
"comment", FieldComments(field, bytes_mode),
"type", typed_annotation);
}
printer->Print(
"$class$.prototype.get$name$ = function() {\n",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field));
if (untyped) {
printer->Print(
" return ");
} else {
printer->Print(
" return /** @type {$type$} */ (",
"type", typed_annotation);
}
bool use_default = !ReturnsNullWhenUnset(options, field);
// Raw fields with no default set should just return undefined.
if (untyped && !field->has_default_value()) {
use_default = false;
}
// Repeated fields get initialized to their default in the constructor
// (why?), so we emit a plain getField() call for them.
if (field->is_repeated()) {
use_default = false;
}
GenerateFieldValueExpression(printer, "this", field, use_default);
if (untyped) {
printer->Print(
";\n"
"};\n"
"\n"
"\n");
} else {
printer->Print(
");\n"
"};\n"
"\n"
"\n");
}
if (field->type() == FieldDescriptor::TYPE_BYTES && !untyped) {
GenerateBytesWrapper(options, printer, field, BYTES_B64);
GenerateBytesWrapper(options, printer, field, BYTES_U8);
}
if (untyped) {
printer->Print(
"/**\n"
" * @param {*} value$returndoc$\n"
" */\n",
"returndoc", JSReturnDoc(options, field));
} else {
printer->Print(
"/** @param {$optionaltype$} value$returndoc$ */\n", "optionaltype",
JSFieldTypeAnnotation(
options, field,
/* is_setter_argument = */ true,
/* force_present = */ false,
/* singular_if_not_packed = */ false),
"returndoc", JSReturnDoc(options, field));
}
printer->Print(
"$class$.prototype.set$name$ = function(value) {\n"
" jspb.Message.set$oneoftag$Field(this, $index$",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field),
"oneoftag", (field->containing_oneof() ? "Oneof" : ""),
"index", JSFieldIndex(field));
printer->Print(
"$oneofgroup$, $type$value$rptvalueinit$$typeclose$);$returnvalue$\n"
"};\n"
"\n"
"\n",
"type",
untyped ? "/** @type{string|number|boolean|Array|undefined} */(" : "",
"typeclose", untyped ? ")" : "",
"oneofgroup",
(field->containing_oneof() ? (", " + JSOneofArray(options, field))
: ""),
"returnvalue", JSReturnClause(field), "rptvalueinit",
(field->is_repeated() ? " || []" : ""));
if (untyped) {
printer->Print(
"/**\n"
" * Clears the value.$returndoc$\n"
" */\n",
"returndoc", JSReturnDoc(options, field));
}
if (field->is_repeated()) {
GenerateRepeatedPrimitiveHelperMethods(options, printer, field, untyped);
}
}
// Generate clearFoo() method for map fields, repeated fields, and other
// fields with presence.
if (IsMap(options, field)) {
printer->Print(
"$class$.prototype.clear$name$ = function() {\n"
" this.get$name$().clear();$returnvalue$\n"
"};\n"
"\n"
"\n",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field),
"returnvalue", JSReturnClause(field));
} else if (field->is_repeated() ||
(field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE &&
!field->is_required())) {
// Fields where we can delegate to the regular setter.
printer->Print(
"$class$.prototype.clear$name$ = function() {\n"
" this.set$name$($clearedvalue$);$returnvalue$\n"
"};\n"
"\n"
"\n",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field),
"clearedvalue", (field->is_repeated() ? "[]" : "undefined"),
"returnvalue", JSReturnClause(field));
} else if (HasFieldPresence(options, field)) {
// Fields where we can't delegate to the regular setter because it doesn't
// accept "undefined" as an argument.
printer->Print(
"$class$.prototype.clear$name$ = function() {\n"
" jspb.Message.set$maybeoneof$Field(this, "
"$index$$maybeoneofgroup$, ",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field),
"maybeoneof", (field->containing_oneof() ? "Oneof" : ""),
"maybeoneofgroup", (field->containing_oneof() ?
(", " + JSOneofArray(options, field)) : ""),
"index", JSFieldIndex(field));
printer->Print(
"$clearedvalue$);$returnvalue$\n"
"};\n"
"\n"
"\n",
"clearedvalue", (field->is_repeated() ? "[]" : "undefined"),
"returnvalue", JSReturnClause(field));
}
if (HasFieldPresence(options, field)) {
printer->Print(
"/**\n"
" * Returns whether this field is set.\n"
" * @return {!boolean}\n"
" */\n"
"$class$.prototype.has$name$ = function() {\n"
" return jspb.Message.getField(this, $index$) != null;\n"
"};\n"
"\n"
"\n",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field),
"index", JSFieldIndex(field));
}
}
void Generator::GenerateRepeatedPrimitiveHelperMethods(
const GeneratorOptions& options, io::Printer* printer,
const FieldDescriptor* field, bool untyped) const {
printer->Print(
"/**\n"
" * @param {!$optionaltype$} value\n"
" * @param {number=} opt_index\n"
" */\n"
"$class$.prototype.add$name$ = function(value, opt_index) {\n"
" jspb.Message.addToRepeatedField(this, $index$",
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field, BYTES_DEFAULT,
/* drop_list = */ true),
"optionaltype", JSTypeName(options, field, BYTES_DEFAULT), "index",
JSFieldIndex(field));
printer->Print(
"$oneofgroup$, $type$value$rptvalueinit$$typeclose$, opt_index);\n"
"};\n"
"\n"
"\n",
"type", untyped ? "/** @type{string|number|boolean|!Uint8Array} */(" : "",
"typeclose", untyped ? ")" : "", "oneofgroup",
(field->containing_oneof() ? (", " + JSOneofArray(options, field)) : ""),
"rptvalueinit", "");
}
void Generator::GenerateRepeatedMessageHelperMethods(
const GeneratorOptions& options, io::Printer* printer,
const FieldDescriptor* field) const {
printer->Print(
"/**\n"
" * @param {!$optionaltype$=} opt_value\n"
" * @param {number=} opt_index\n"
" * @return {!$optionaltype$}\n"
" */\n"
"$class$.prototype.add$name$ = function(opt_value, opt_index) {\n"
" return jspb.Message.addTo$repeatedtag$WrapperField(",
"optionaltype", JSTypeName(options, field, BYTES_DEFAULT),
"class", GetMessagePath(options, field->containing_type()),
"name", JSGetterName(options, field, BYTES_DEFAULT,
/* drop_list = */ true),
"repeatedtag", (field->is_repeated() ? "Repeated" : ""));
printer->Print(
"this, $index$$oneofgroup$, opt_value, $ctor$, opt_index);\n"
"};\n"
"\n"
"\n",
"index", JSFieldIndex(field), "oneofgroup",
(field->containing_oneof() ? (", " + JSOneofArray(options, field)) : ""),
"ctor", GetMessagePath(options, field->message_type()));
}
void Generator::GenerateClassExtensionFieldInfo(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
if (IsExtendable(desc)) {
printer->Print(
"\n"
"/**\n"
" * The extensions registered with this message class. This is a "
"map of\n"
" * extension field number to fieldInfo object.\n"
" *\n"
" * For example:\n"
" * { 123: {fieldIndex: 123, fieldName: {my_field_name: 0}, "
"ctor: proto.example.MyMessage} }\n"
" *\n"
" * fieldName contains the JsCompiler renamed field name property "
"so that it\n"
" * works in OPTIMIZED mode.\n"
" *\n"
" * @type {!Object.<number, jspb.ExtensionFieldInfo>}\n"
" */\n"
"$class$.extensions = {};\n"
"\n",
"class", GetMessagePath(options, desc));
printer->Print(
"\n"
"/**\n"
" * The extensions registered with this message class. This is a "
"map of\n"
" * extension field number to fieldInfo object.\n"
" *\n"
" * For example:\n"
" * { 123: {fieldIndex: 123, fieldName: {my_field_name: 0}, "
"ctor: proto.example.MyMessage} }\n"
" *\n"
" * fieldName contains the JsCompiler renamed field name property "
"so that it\n"
" * works in OPTIMIZED mode.\n"
" *\n"
" * @type {!Object.<number, jspb.ExtensionFieldBinaryInfo>}\n"
" */\n"
"$class$.extensionsBinary = {};\n"
"\n",
"class", GetMessagePath(options, desc));
}
}
void Generator::GenerateClassDeserializeBinary(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
// TODO(cfallin): Handle lazy decoding when requested by field option and/or
// by default for 'bytes' fields and packed repeated fields.
printer->Print(
"/**\n"
" * Deserializes binary data (in protobuf wire format).\n"
" * @param {jspb.ByteSource} bytes The bytes to deserialize.\n"
" * @return {!$class$}\n"
" */\n"
"$class$.deserializeBinary = function(bytes) {\n"
" var reader = new jspb.BinaryReader(bytes);\n"
" var msg = new $class$;\n"
" return $class$.deserializeBinaryFromReader(msg, reader);\n"
"};\n"
"\n"
"\n"
"/**\n"
" * Deserializes binary data (in protobuf wire format) from the\n"
" * given reader into the given message object.\n"
" * @param {!$class$} msg The message object to deserialize into.\n"
" * @param {!jspb.BinaryReader} reader The BinaryReader to use.\n"
" * @return {!$class$}\n"
" */\n"
"$class$.deserializeBinaryFromReader = function(msg, reader) {\n"
" while (reader.nextField()) {\n"
" if (reader.isEndGroup()) {\n"
" break;\n"
" }\n"
" var field = reader.getFieldNumber();\n"
" switch (field) {\n",
"class", GetMessagePath(options, desc));
for (int i = 0; i < desc->field_count(); i++) {
if (!IgnoreField(desc->field(i))) {
GenerateClassDeserializeBinaryField(options, printer, desc->field(i));
}
}
printer->Print(
" default:\n");
if (IsExtendable(desc)) {
printer->Print(
" jspb.Message.readBinaryExtension(msg, reader, $extobj$Binary,\n"
" $class$.prototype.getExtension,\n"
" $class$.prototype.setExtension);\n"
" break;\n",
"extobj", JSExtensionsObjectName(options, desc->file(), desc),
"class", GetMessagePath(options, desc));
} else {
printer->Print(
" reader.skipField();\n"
" break;\n");
}
printer->Print(
" }\n"
" }\n"
" return msg;\n"
"};\n"
"\n"
"\n");
}
void Generator::GenerateClassDeserializeBinaryField(
const GeneratorOptions& options,
io::Printer* printer,
const FieldDescriptor* field) const {
printer->Print(" case $num$:\n",
"num", SimpleItoa(field->number()));
if (IsMap(options, field)) {
const FieldDescriptor* key_field = MapFieldKey(field);
const FieldDescriptor* value_field = MapFieldValue(field);
printer->Print(
" var value = msg.get$name$();\n"
" reader.readMessage(value, function(message, reader) {\n",
"name", JSGetterName(options, field));
printer->Print(" jspb.Map.deserializeBinary(message, reader, "
"$keyReaderFn$, $valueReaderFn$",
"keyReaderFn", JSBinaryReaderMethodName(options, key_field),
"valueReaderFn", JSBinaryReaderMethodName(options, value_field));
if (value_field->type() == FieldDescriptor::TYPE_MESSAGE) {
printer->Print(", $messageType$.deserializeBinaryFromReader",
"messageType", GetMessagePath(options, value_field->message_type()));
}
printer->Print(");\n");
printer->Print(" });\n");
} else {
if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) {
printer->Print(
" var value = new $fieldclass$;\n"
" reader.read$msgOrGroup$($grpfield$value,"
"$fieldclass$.deserializeBinaryFromReader);\n",
"fieldclass", SubmessageTypeRef(options, field),
"msgOrGroup", (field->type() == FieldDescriptor::TYPE_GROUP) ?
"Group" : "Message",
"grpfield", (field->type() == FieldDescriptor::TYPE_GROUP) ?
(SimpleItoa(field->number()) + ", ") : "");
} else {
printer->Print(
" var value = /** @type {$fieldtype$} */ "
"(reader.read$reader$());\n",
"fieldtype", JSFieldTypeAnnotation(options, field, false, true,
/* singular_if_not_packed */ true,
BYTES_U8),
"reader",
JSBinaryReadWriteMethodName(field, /* is_writer = */ false));
}
if (field->is_repeated() && !field->is_packed()) {
printer->Print(
" msg.add$name$(value);\n", "name",
JSGetterName(options, field, BYTES_DEFAULT, /* drop_list = */ true));
} else {
// Singular fields, and packed repeated fields, receive a |value| either
// as the field's value or as the array of all the field's values; set
// this as the field's value directly.
printer->Print(
" msg.set$name$(value);\n",
"name", JSGetterName(options, field));
}
}
printer->Print(" break;\n");
}
void Generator::GenerateClassSerializeBinary(const GeneratorOptions& options,
io::Printer* printer,
const Descriptor* desc) const {
printer->Print(
"/**\n"
" * Serializes the message to binary data (in protobuf wire format).\n"
" * @return {!Uint8Array}\n"
" */\n"
"$class$.prototype.serializeBinary = function() {\n"
" var writer = new jspb.BinaryWriter();\n"
" $class$.serializeBinaryToWriter(this, writer);\n"
" return writer.getResultBuffer();\n"
"};\n"
"\n"
"\n"
"/**\n"
" * Serializes the given message to binary data (in protobuf wire\n"
" * format), writing to the given BinaryWriter.\n"
" * @param {!$class$} message\n"
" * @param {!jspb.BinaryWriter} writer\n"
" * @suppress {unusedLocalVariables} f is only used for nested messages\n"
" */\n"
"$class$.serializeBinaryToWriter = function(message, "
"writer) {\n"
" var f = undefined;\n",
"class", GetMessagePath(options, desc));
for (int i = 0; i < desc->field_count(); i++) {
if (!IgnoreField(desc->field(i))) {
GenerateClassSerializeBinaryField(options, printer, desc->field(i));
}
}
if (IsExtendable(desc)) {
printer->Print(
" jspb.Message.serializeBinaryExtensions(message, writer,\n"
" $extobj$Binary, $class$.prototype.getExtension);\n",
"extobj", JSExtensionsObjectName(options, desc->file(), desc),
"class", GetMessagePath(options, desc));
}
printer->Print(
"};\n"
"\n"
"\n");
}
void Generator::GenerateClassSerializeBinaryField(
const GeneratorOptions& options,
io::Printer* printer,
const FieldDescriptor* field) const {
if (HasFieldPresence(options, field) &&
field->cpp_type() != FieldDescriptor::CPPTYPE_MESSAGE) {
string typed_annotation = JSFieldTypeAnnotation(
options, field,
/* is_setter_argument = */ false,
/* force_present = */ false,
/* singular_if_not_packed = */ false,
/* bytes_mode = */ BYTES_DEFAULT);
printer->Print(
" f = /** @type {$type$} */ "
"(jspb.Message.getField(message, $index$));\n",
"index", JSFieldIndex(field),
"type", typed_annotation);
} else {
printer->Print(
" f = message.get$name$($nolazy$);\n",
"name", JSGetterName(options, field, BYTES_U8),
// No lazy creation for maps containers -- fastpath the empty case.
"nolazy", IsMap(options, field) ? "true" : "");
}
// Print an `if (condition)` statement that evaluates to true if the field
// goes on the wire.
if (IsMap(options, field)) {
printer->Print(
" if (f && f.getLength() > 0) {\n");
} else if (field->is_repeated()) {
printer->Print(
" if (f.length > 0) {\n");
} else {
if (HasFieldPresence(options, field)) {
printer->Print(
" if (f != null) {\n");
} else {
// No field presence: serialize onto the wire only if value is
// non-default. Defaults are documented here:
// https://goto.google.com/lhdfm
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32:
case FieldDescriptor::CPPTYPE_INT64:
case FieldDescriptor::CPPTYPE_UINT32:
case FieldDescriptor::CPPTYPE_UINT64: {
if (IsIntegralFieldWithStringJSType(field)) {
// We can use `parseInt` here even though it will not be precise for
// 64-bit quantities because we are only testing for zero/nonzero,
// and JS numbers (64-bit floating point values, i.e., doubles) are
// integer-precise in the range that includes zero.
printer->Print(" if (parseInt(f, 10) !== 0) {\n");
} else {
printer->Print(" if (f !== 0) {\n");
}
break;
}
case FieldDescriptor::CPPTYPE_ENUM:
case FieldDescriptor::CPPTYPE_FLOAT:
case FieldDescriptor::CPPTYPE_DOUBLE:
printer->Print(
" if (f !== 0.0) {\n");
break;
case FieldDescriptor::CPPTYPE_BOOL:
printer->Print(
" if (f) {\n");
break;
case FieldDescriptor::CPPTYPE_STRING:
printer->Print(
" if (f.length > 0) {\n");
break;
default:
assert(false);
break;
}
}
}
// Write the field on the wire.
if (IsMap(options, field)) {
const FieldDescriptor* key_field = MapFieldKey(field);
const FieldDescriptor* value_field = MapFieldValue(field);
printer->Print(
" f.serializeBinary($index$, writer, "
"$keyWriterFn$, $valueWriterFn$",
"index", SimpleItoa(field->number()),
"keyWriterFn", JSBinaryWriterMethodName(options, key_field),
"valueWriterFn", JSBinaryWriterMethodName(options, value_field));
if (value_field->type() == FieldDescriptor::TYPE_MESSAGE) {
printer->Print(", $messageType$.serializeBinaryToWriter",
"messageType", GetMessagePath(options, value_field->message_type()));
}
printer->Print(");\n");
} else {
printer->Print(
" writer.write$method$(\n"
" $index$,\n"
" f",
"method", JSBinaryReadWriteMethodName(field, /* is_writer = */ true),
"index", SimpleItoa(field->number()));
if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE &&
!IsMap(options, field)) {
printer->Print(
",\n"
" $submsg$.serializeBinaryToWriter\n",
"submsg", SubmessageTypeRef(options, field));
} else {
printer->Print("\n");
}
printer->Print(
" );\n");
}
// Close the `if`.
printer->Print(
" }\n");
}
void Generator::GenerateEnum(const GeneratorOptions& options,
io::Printer* printer,
const EnumDescriptor* enumdesc) const {
printer->Print(
"/**\n"
" * @enum {number}\n"
" */\n"
"$name$ = {\n",
"name", GetEnumPath(options, enumdesc));
for (int i = 0; i < enumdesc->value_count(); i++) {
const EnumValueDescriptor* value = enumdesc->value(i);
printer->Print(
" $name$: $value$$comma$\n",
"name", ToEnumCase(value->name()),
"value", SimpleItoa(value->number()),
"comma", (i == enumdesc->value_count() - 1) ? "" : ",");
}
printer->Print(
"};\n"
"\n");
}
void Generator::GenerateExtension(const GeneratorOptions& options,
io::Printer* printer,
const FieldDescriptor* field) const {
string extension_scope =
(field->extension_scope()
? GetMessagePath(options, field->extension_scope())
: GetFilePath(options, field->file()));
printer->Print(
"\n"
"/**\n"
" * A tuple of {field number, class constructor} for the extension\n"
" * field named `$name$`.\n"
" * @type {!jspb.ExtensionFieldInfo.<$extensionType$>}\n"
" */\n"
"$class$.$name$ = new jspb.ExtensionFieldInfo(\n",
"name", JSObjectFieldName(options, field),
"class", extension_scope,
"extensionType", JSFieldTypeAnnotation(
options, field,
/* is_setter_argument = */ false,
/* force_present = */ true,
/* singular_if_not_packed = */ false));
printer->Print(
" $index$,\n"
" {$name$: 0},\n"
" $ctor$,\n"
" /** @type {?function((boolean|undefined),!jspb.Message=): "
"!Object} */ (\n"
" $toObject$),\n"
" $repeated$);\n",
"index", SimpleItoa(field->number()),
"name", JSObjectFieldName(options, field),
"ctor", (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE ?
SubmessageTypeRef(options, field) : string("null")),
"toObject", (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE ?
(SubmessageTypeRef(options, field) + ".toObject") :
string("null")),
"repeated", (field->is_repeated() ? "1" : "0"));
printer->Print(
"\n"
"$extendName$Binary[$index$] = new jspb.ExtensionFieldBinaryInfo(\n"
" $class$.$name$,\n"
" $binaryReaderFn$,\n"
" $binaryWriterFn$,\n"
" $binaryMessageSerializeFn$,\n"
" $binaryMessageDeserializeFn$,\n",
"extendName",
JSExtensionsObjectName(options, field->file(), field->containing_type()),
"index", SimpleItoa(field->number()), "class", extension_scope, "name",
JSObjectFieldName(options, field), "binaryReaderFn",
JSBinaryReaderMethodName(options, field), "binaryWriterFn",
JSBinaryWriterMethodName(options, field), "binaryMessageSerializeFn",
(field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE)
? (SubmessageTypeRef(options, field) + ".serializeBinaryToWriter")
: "undefined",
"binaryMessageDeserializeFn",
(field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE)
? (SubmessageTypeRef(options, field) + ".deserializeBinaryFromReader")
: "undefined");
printer->Print(" $isPacked$);\n", "isPacked",
(field->is_packed() ? "true" : "false"));
printer->Print(
"// This registers the extension field with the extended class, so that\n"
"// toObject() will function correctly.\n"
"$extendName$[$index$] = $class$.$name$;\n"
"\n",
"extendName", JSExtensionsObjectName(options, field->file(),
field->containing_type()),
"index", SimpleItoa(field->number()),
"class", extension_scope,
"name", JSObjectFieldName(options, field));
}
bool GeneratorOptions::ParseFromOptions(
const std::vector< std::pair< string, string > >& options,
string* error) {
for (int i = 0; i < options.size(); i++) {
if (options[i].first == "add_require_for_enums") {
if (options[i].second != "") {
*error = "Unexpected option value for add_require_for_enums";
return false;
}
add_require_for_enums = true;
} else if (options[i].first == "binary") {
if (options[i].second != "") {
*error = "Unexpected option value for binary";
return false;
}
binary = true;
} else if (options[i].first == "testonly") {
if (options[i].second != "") {
*error = "Unexpected option value for testonly";
return false;
}
testonly = true;
} else if (options[i].first == "error_on_name_conflict") {
if (options[i].second != "") {
*error = "Unexpected option value for error_on_name_conflict";
return false;
}
error_on_name_conflict = true;
} else if (options[i].first == "output_dir") {
output_dir = options[i].second;
} else if (options[i].first == "namespace_prefix") {
namespace_prefix = options[i].second;
} else if (options[i].first == "library") {
library = options[i].second;
} else if (options[i].first == "import_style") {
if (options[i].second == "closure") {
import_style = kImportClosure;
} else if (options[i].second == "commonjs") {
import_style = kImportCommonJs;
} else if (options[i].second == "browser") {
import_style = kImportBrowser;
} else if (options[i].second == "es6") {
import_style = kImportEs6;
} else {
*error = "Unknown import style " + options[i].second + ", expected " +
"one of: closure, commonjs, browser, es6.";
}
} else if (options[i].first == "extension") {
extension = options[i].second;
} else if (options[i].first == "one_output_file_per_input_file") {
if (!options[i].second.empty()) {
*error = "Unexpected option value for one_output_file_per_input_file";
return false;
}
one_output_file_per_input_file = true;
} else {
// Assume any other option is an output directory, as long as it is a bare
// `key` rather than a `key=value` option.
if (options[i].second != "") {
*error = "Unknown option: " + options[i].first;
return false;
}
output_dir = options[i].first;
}
}
if (import_style != kImportClosure &&
(add_require_for_enums || testonly || !library.empty() ||
error_on_name_conflict || extension != ".js" ||
one_output_file_per_input_file)) {
*error =
"The add_require_for_enums, testonly, library, error_on_name_conflict, "
"extension, and one_output_file_per_input_file options should only be "
"used for import_style=closure";
return false;
}
return true;
}
GeneratorOptions::OutputMode GeneratorOptions::output_mode() const {
// We use one output file per input file if we are not using Closure or if
// this is explicitly requested.
if (import_style != kImportClosure || one_output_file_per_input_file) {
return kOneOutputFilePerInputFile;
}
// If a library name is provided, we put everything in that one file.
if (!library.empty()) {
return kEverythingInOneFile;
}
// Otherwise, we create one output file per type.
return kOneOutputFilePerType;
}
void Generator::GenerateFilesInDepOrder(
const GeneratorOptions& options,
io::Printer* printer,
const std::vector<const FileDescriptor*>& files) const {
// Build a std::set over all files so that the DFS can detect when it recurses
// into a dep not specified in the user's command line.
std::set<const FileDescriptor*> all_files(files.begin(), files.end());
// Track the in-progress set of files that have been generated already.
std::set<const FileDescriptor*> generated;
for (int i = 0; i < files.size(); i++) {
GenerateFileAndDeps(options, printer, files[i], &all_files, &generated);
}
}
void Generator::GenerateFileAndDeps(
const GeneratorOptions& options,
io::Printer* printer,
const FileDescriptor* root,
std::set<const FileDescriptor*>* all_files,
std::set<const FileDescriptor*>* generated) const {
// Skip if already generated.
if (generated->find(root) != generated->end()) {
return;
}
generated->insert(root);
// Generate all dependencies before this file's content.
for (int i = 0; i < root->dependency_count(); i++) {
const FileDescriptor* dep = root->dependency(i);
GenerateFileAndDeps(options, printer, dep, all_files, generated);
}
// Generate this file's content. Only generate if the file is part of the
// original set requested to be generated; i.e., don't take all transitive
// deps down to the roots.
if (all_files->find(root) != all_files->end()) {
GenerateClassesAndEnums(options, printer, root);
}
}
void Generator::GenerateFile(const GeneratorOptions& options,
io::Printer* printer,
const FileDescriptor* file) const {
GenerateHeader(options, printer);
// Generate "require" statements.
if (options.import_style == GeneratorOptions::kImportCommonJs) {
printer->Print("var jspb = require('google-protobuf');\n");
printer->Print("var goog = jspb;\n");
printer->Print("var global = Function('return this')();\n\n");
for (int i = 0; i < file->dependency_count(); i++) {
const string& name = file->dependency(i)->name();
printer->Print(
"var $alias$ = require('$file$');\n",
"alias", ModuleAlias(name),
"file",
GetRootPath(file->name(), name) + GetJSFilename(options, name));
}
}
std::set<string> provided;
std::set<const FieldDescriptor*> extensions;
for (int i = 0; i < file->extension_count(); i++) {
// We honor the jspb::ignore option here only when working with
// Closure-style imports. Use of this option is discouraged and so we want
// to avoid adding new support for it.
if (options.import_style == GeneratorOptions::kImportClosure &&
IgnoreField(file->extension(i))) {
continue;
}
provided.insert(GetFilePath(options, file) + "." +
JSObjectFieldName(options, file->extension(i)));
extensions.insert(file->extension(i));
}
FindProvidesForFile(options, printer, file, &provided);
GenerateProvides(options, printer, &provided);
std::vector<const FileDescriptor*> files;
files.push_back(file);
if (options.import_style == GeneratorOptions::kImportClosure) {
GenerateRequiresForLibrary(options, printer, files, &provided);
}
GenerateClassesAndEnums(options, printer, file);
// Generate code for top-level extensions. Extensions nested inside messages
// are emitted inside GenerateClassesAndEnums().
for (std::set<const FieldDescriptor*>::const_iterator it = extensions.begin();
it != extensions.end(); ++it) {
GenerateExtension(options, printer, *it);
}
if (options.import_style == GeneratorOptions::kImportCommonJs) {
printer->Print("goog.object.extend(exports, $package$);\n",
"package", GetFilePath(options, file));
}
// Emit well-known type methods.
for (FileToc* toc = well_known_types_js; toc->name != NULL; toc++) {
string name = string("google/protobuf/") + toc->name;
if (name == StripProto(file->name()) + ".js") {
printer->Print(toc->data);
}
}
}
bool Generator::GenerateAll(const std::vector<const FileDescriptor*>& files,
const string& parameter,
GeneratorContext* context,
string* error) const {
std::vector< std::pair< string, string > > option_pairs;
ParseGeneratorParameter(parameter, &option_pairs);
GeneratorOptions options;
if (!options.ParseFromOptions(option_pairs, error)) {
return false;
}
if (options.output_mode() == GeneratorOptions::kEverythingInOneFile) {
// All output should go in a single file.
string filename = options.output_dir + "/" + options.library +
options.GetFileNameExtension();
google::protobuf::scoped_ptr<io::ZeroCopyOutputStream> output(context->Open(filename));
GOOGLE_CHECK(output.get());
io::Printer printer(output.get(), '$');
// Pull out all extensions -- we need these to generate all
// provides/requires.
std::vector<const FieldDescriptor*> extensions;
for (int i = 0; i < files.size(); i++) {
for (int j = 0; j < files[i]->extension_count(); j++) {
const FieldDescriptor* extension = files[i]->extension(j);
extensions.push_back(extension);
}
}
GenerateHeader(options, &printer);
std::set<string> provided;
FindProvides(options, &printer, files, &provided);
FindProvidesForFields(options, &printer, extensions, &provided);
GenerateProvides(options, &printer, &provided);
GenerateTestOnly(options, &printer);
GenerateRequiresForLibrary(options, &printer, files, &provided);
GenerateFilesInDepOrder(options, &printer, files);
for (int i = 0; i < extensions.size(); i++) {
if (ShouldGenerateExtension(extensions[i])) {
GenerateExtension(options, &printer, extensions[i]);
}
}
if (printer.failed()) {
return false;
}
} else if (options.output_mode() == GeneratorOptions::kOneOutputFilePerType) {
std::set<const void*> allowed_set;
if (!GenerateJspbAllowedSet(options, files, &allowed_set, error)) {
return false;
}
for (int i = 0; i < files.size(); i++) {
const FileDescriptor* file = files[i];
for (int j = 0; j < file->message_type_count(); j++) {
const Descriptor* desc = file->message_type(j);
if (allowed_set.count(desc) == 0) {
continue;
}
string filename = GetMessageFileName(options, desc);
google::protobuf::scoped_ptr<io::ZeroCopyOutputStream> output(
context->Open(filename));
GOOGLE_CHECK(output.get());
io::Printer printer(output.get(), '$');
GenerateHeader(options, &printer);
std::set<string> provided;
FindProvidesForMessage(options, &printer, desc, &provided);
GenerateProvides(options, &printer, &provided);
GenerateTestOnly(options, &printer);
GenerateRequiresForMessage(options, &printer, desc, &provided);
GenerateClass(options, &printer, desc);
if (printer.failed()) {
return false;
}
}
for (int j = 0; j < file->enum_type_count(); j++) {
const EnumDescriptor* enumdesc = file->enum_type(j);
if (allowed_set.count(enumdesc) == 0) {
continue;
}
string filename = GetEnumFileName(options, enumdesc);
google::protobuf::scoped_ptr<io::ZeroCopyOutputStream> output(
context->Open(filename));
GOOGLE_CHECK(output.get());
io::Printer printer(output.get(), '$');
GenerateHeader(options, &printer);
std::set<string> provided;
FindProvidesForEnum(options, &printer, enumdesc, &provided);
GenerateProvides(options, &printer, &provided);
GenerateTestOnly(options, &printer);
GenerateEnum(options, &printer, enumdesc);
if (printer.failed()) {
return false;
}
}
// File-level extensions (message-level extensions are generated under
// the enclosing message).
if (allowed_set.count(file) == 1) {
string filename = GetExtensionFileName(options, file);
google::protobuf::scoped_ptr<io::ZeroCopyOutputStream> output(
context->Open(filename));
GOOGLE_CHECK(output.get());
io::Printer printer(output.get(), '$');
GenerateHeader(options, &printer);
std::set<string> provided;
std::vector<const FieldDescriptor*> fields;
for (int j = 0; j < files[i]->extension_count(); j++) {
if (ShouldGenerateExtension(files[i]->extension(j))) {
fields.push_back(files[i]->extension(j));
}
}
FindProvidesForFields(options, &printer, fields, &provided);
GenerateProvides(options, &printer, &provided);
GenerateTestOnly(options, &printer);
GenerateRequiresForExtensions(options, &printer, fields, &provided);
for (int j = 0; j < files[i]->extension_count(); j++) {
if (ShouldGenerateExtension(files[i]->extension(j))) {
GenerateExtension(options, &printer, files[i]->extension(j));
}
}
}
}
} else /* options.output_mode() == kOneOutputFilePerInputFile */ {
// Generate one output file per input (.proto) file.
for (int i = 0; i < files.size(); i++) {
const google::protobuf::FileDescriptor* file = files[i];
string filename =
options.output_dir + "/" + GetJSFilename(options, file->name());
google::protobuf::scoped_ptr<io::ZeroCopyOutputStream> output(context->Open(filename));
GOOGLE_CHECK(output.get());
io::Printer printer(output.get(), '$');
GenerateFile(options, &printer, file);
if (printer.failed()) {
return false;
}
}
}
return true;
}
} // namespace js
} // namespace compiler
} // namespace protobuf
} // namespace google
| 52,350 |
870 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.format;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
/**
* Formatter that will aggregate entries for various display purposes.
*/
public abstract class AggregatingFormatter extends DefaultFormatter {
@Override
public String next() {
Iterator<Entry<Key,Value>> si = super.getScannerIterator();
checkState(true);
while (si.hasNext())
aggregateStats(si.next());
return getStats();
}
/**
* Generate statistics from each {@link Entry}, called for each entry to be iterated over.
*
* @param next
* the next entry to aggregate
*/
protected abstract void aggregateStats(Entry<Key,Value> next);
/**
* Finalize the aggregation and return the result. Called once at the end.
*
* @return the aggregation results, suitable for printing to the console
*/
protected abstract String getStats();
}
| 505 |
14,668 | <gh_stars>1000+
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMEOS_SERVICES_LIBASSISTANT_GRPC_SERVICES_STATUS_PROVIDER_H_
#define CHROMEOS_SERVICES_LIBASSISTANT_GRPC_SERVICES_STATUS_PROVIDER_H_
#include "base/observer_list.h"
#include "base/observer_list_types.h"
#include "chromeos/assistant/internal/proto/shared/proto/v2/delegate/event_handler_interface.pb.h"
#include "chromeos/services/libassistant/grpc/external_services/grpc_services_observer.h"
#include "chromeos/services/libassistant/grpc/services_status_observer.h"
namespace chromeos {
namespace libassistant {
// Component monitoring Libassistant gRPC services status, exposing method to
// get current services status and notify observers on status change.
class ServicesStatusProvider
: public GrpcServicesObserver<::assistant::api::OnHeartbeatEventRequest> {
public:
ServicesStatusProvider();
ServicesStatusProvider(const ServicesStatusProvider&) = delete;
ServicesStatusProvider& operator=(const ServicesStatusProvider&) = delete;
~ServicesStatusProvider() override;
// GrpcServiceObserver implementation:
void OnGrpcMessage(
const ::assistant::api::OnHeartbeatEventRequest& request) override;
void AddObserver(ServicesStatusObserver* observer);
void RemoveObserver(ServicesStatusObserver* observer);
private:
ServicesStatus status_ = ServicesStatus::OFFLINE;
base::ObserverList<ServicesStatusObserver> observers_;
};
} // namespace libassistant
} // namespace chromeos
#endif // CHROMEOS_SERVICES_LIBASSISTANT_GRPC_SERVICES_STATUS_PROVIDER_H_
| 512 |
2,776 | <reponame>Brianspha/Colonisation<gh_stars>1000+
{
"Execute binaries from npm packages.\n%s": "Esegui il binario del pacchetto npm.\n%s",
"Package to be installed.": "Pacchetto da installare.",
"Location of the npm cache.": "Percorso della cache npm.",
"Skip installation if a package is missing.": "Salta l'installazione se un pacchetto non è disponibile.",
"Path to user npmrc.": "Percorso utente per npmrc.",
"Execute string as if inside `npm run-script`.": "Esegui l'argomento come se fosse all'interno di `npm run-script`.",
"Shell to execute the command with, if any.": "Shell con cui eseguire il comando, se presente.",
"Generate shell code to use npx as the \"command not found\" fallback.": "Generare un codice di shell per usare npx come ripiego quando il comando non esiste.",
"Ignores existing binaries in $PATH, or in the local project. This forces npx to do a temporary install and use the latest version.": "Ignora i binari esistenti in $PATH, oppure nel progetto locale. Questo forza temporaneamente npx a installare e usare l'ultima versione.",
"npm binary to use for internal operations.": "Binario npm da usare per le operazioni interne.",
"For the full documentation, see the manual page for npx(1).": "Per la documentazione completa, vedere la pagina del manuale npx(1).",
"Unable to guess a binary name from %s. Please use --package.": "Impossibile supporre il nome del binario da %s. Prego usare --package.",
"\nERROR: You must supply a command.\n": "\nERRORE: E necessario fornire un comando.\n",
"Command failed: %s %s": "Comando fallito: %s %s",
"Install for %s failed with code %s": "Installazione di %s fallita con codice %s",
"%s not found. Trying with npx...": "%s non trovato. Prova con npx...",
"command not found: %s": "comando non trovato: %s",
"options": "opzioni",
"command": "comando",
"version": "versione",
"command-arg": "argomento-del-comando",
"command-string": "stringa-di-comando",
"shell": "shell",
"package": "pacchetto",
"npx: installed %s in %ss": "npx: installato %s in %ss",
"Suppress output from npx itself. Subcommands will not be affected.": "Sopprimere l'output da npx stesso. I sottocomandi non saranno interessati."
}
| 754 |
306 | #pragma once
#include <stdint.h>
void init_stacktrace(uint8_t* data, uint32_t size);
void stacktrace_print(); | 43 |
2,003 | <filename>src/common/metric/percentile_counter.h
#pragma once
// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "common/metric/collector.h"
#include "common/metric/subscriber.h"
#include "common/metric/collector_report_publisher.h"
#include "leveldb/util/histogram.h"
namespace tera {
class PercentileCounter;
class PercentileCollector : public Collector {
public:
virtual ~PercentileCollector() {}
// return a instant value of the metric for tera to dump log and other usage
PercentileCollector(PercentileCounter* pc) : pc_(pc){};
inline virtual int64_t Collect() override;
private:
PercentileCounter* pc_;
};
class PercentileCounter {
public:
// create a metric with name and label
// label_str format: k1:v1,k2:v2,...
// can build by LabelStringBuilder().Append("k1",
// "v1").Append("k2","v2").ToString();
PercentileCounter(const std::string& name, const std::string& label_str, double percentile,
SubscriberTypeList type_list = {SubscriberType::LATEST})
: percentile_(percentile), registered_(false) {
// parse metric id
MetricId::ParseFromStringWithThrow(name, label_str, &metric_id_);
// legal label str format, do register
registered_ = CollectorReportPublisher::GetInstance().AddCollector(
metric_id_, std::unique_ptr<Collector>(new PercentileCollector(this)), type_list);
}
PercentileCounter(const std::string& name, double percentile,
SubscriberTypeList type_list = {SubscriberType::LATEST})
: percentile_(percentile), registered_(false) {
// parse metric id
MetricId::ParseFromStringWithThrow(name, "", &metric_id_);
// legal label str format, do register
registered_ = CollectorReportPublisher::GetInstance().AddCollector(
metric_id_, std::unique_ptr<Collector>(new PercentileCollector(this)), type_list);
}
virtual ~PercentileCounter() {
if (registered_) {
// do unregister
CollectorReportPublisher::GetInstance().DeleteCollector(metric_id_);
}
}
bool IsRegistered() const { return registered_; }
int64_t Get() {
double percentile_value = hist_.Percentile(percentile_);
if (isnan(percentile_value)) {
return -1;
}
return (int64_t)percentile_value;
}
void Clear() { hist_.Clear(); }
void Append(int64_t v) { hist_.Add((double)v); }
// Never copyied
PercentileCounter(const PercentileCounter&) = delete;
PercentileCounter& operator=(const PercentileCounter&) = delete;
private:
double percentile_;
bool registered_;
MetricId metric_id_;
leveldb::Histogram hist_;
};
int64_t PercentileCollector::Collect() {
int64_t val = (int64_t)pc_->Get();
pc_->Clear();
return val;
}
} | 974 |
310 | {
"name": "Day One (iOS)",
"description": "A journal app.",
"url": "https://itunes.apple.com/us/app/day-one-journal/id421706526"
} | 55 |
30,023 | <filename>homeassistant/components/analytics/__init__.py
"""Send instance and usage analytics."""
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import HomeAssistant
from homeassistant.helpers.event import async_call_later, async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from .analytics import Analytics
from .const import ATTR_ONBOARDED, ATTR_PREFERENCES, DOMAIN, INTERVAL, PREFERENCE_SCHEMA
async def async_setup(hass: HomeAssistant, _: ConfigType) -> bool:
"""Set up the analytics integration."""
analytics = Analytics(hass)
# Load stored data
await analytics.load()
async def start_schedule(_event):
"""Start the send schedule after the started event."""
# Wait 15 min after started
async_call_later(hass, 900, analytics.send_analytics)
# Send every day
async_track_time_interval(hass, analytics.send_analytics, INTERVAL)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, start_schedule)
websocket_api.async_register_command(hass, websocket_analytics)
websocket_api.async_register_command(hass, websocket_analytics_preferences)
hass.data[DOMAIN] = analytics
return True
@websocket_api.require_admin
@websocket_api.websocket_command({vol.Required("type"): "analytics"})
@websocket_api.async_response
async def websocket_analytics(
hass: HomeAssistant,
connection: websocket_api.connection.ActiveConnection,
msg: dict,
) -> None:
"""Return analytics preferences."""
analytics: Analytics = hass.data[DOMAIN]
connection.send_result(
msg["id"],
{ATTR_PREFERENCES: analytics.preferences, ATTR_ONBOARDED: analytics.onboarded},
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required("type"): "analytics/preferences",
vol.Required("preferences", default={}): PREFERENCE_SCHEMA,
}
)
@websocket_api.async_response
async def websocket_analytics_preferences(
hass: HomeAssistant,
connection: websocket_api.connection.ActiveConnection,
msg: dict,
) -> None:
"""Update analytics preferences."""
preferences = msg[ATTR_PREFERENCES]
analytics: Analytics = hass.data[DOMAIN]
await analytics.save_preferences(preferences)
await analytics.send_analytics()
connection.send_result(
msg["id"],
{ATTR_PREFERENCES: analytics.preferences},
)
| 907 |
5,279 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.io.gcp.firestore.it;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.Assume.assumeThat;
import com.google.api.core.ApiFunction;
import com.google.api.core.ApiFuture;
import com.google.api.core.ApiFutures;
import com.google.cloud.firestore.CollectionReference;
import com.google.cloud.firestore.DocumentReference;
import com.google.cloud.firestore.Firestore;
import com.google.cloud.firestore.FirestoreOptions;
import com.google.cloud.firestore.WriteBatch;
import com.google.cloud.firestore.WriteResult;
import com.google.cloud.firestore.spi.v1.FirestoreRpc;
import com.google.cloud.firestore.v1.FirestoreClient.ListCollectionIdsPagedResponse;
import com.google.cloud.firestore.v1.FirestoreClient.ListDocumentsPagedResponse;
import com.google.firestore.v1.BatchWriteRequest;
import com.google.firestore.v1.Document;
import com.google.firestore.v1.ListCollectionIdsRequest;
import com.google.firestore.v1.ListDocumentsRequest;
import com.google.firestore.v1.RunQueryRequest;
import com.google.firestore.v1.RunQueryResponse;
import com.google.firestore.v1.StructuredQuery;
import com.google.firestore.v1.StructuredQuery.CollectionSelector;
import com.google.firestore.v1.StructuredQuery.Direction;
import com.google.firestore.v1.StructuredQuery.FieldReference;
import com.google.firestore.v1.StructuredQuery.Order;
import com.google.firestore.v1.StructuredQuery.Projection;
import com.google.firestore.v1.Write;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.time.Clock;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Streams;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.MoreExecutors;
import org.junit.rules.TestRule;
import org.junit.runner.Description;
import org.junit.runners.model.Statement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
final class FirestoreTestingHelper implements TestRule {
private static final Logger LOG = LoggerFactory.getLogger(FirestoreTestingHelper.class);
static final String BASE_COLLECTION_ID;
static {
Instant now = Clock.systemUTC().instant();
DateTimeFormatter formatter =
DateTimeFormatter.ISO_LOCAL_DATE_TIME.withZone(ZoneId.from(ZoneOffset.UTC));
BASE_COLLECTION_ID = formatter.format(now);
}
enum CleanupMode {
ALWAYS,
ON_SUCCESS_ONLY,
NEVER
}
enum DataLayout {
Shallow,
Deep
}
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
@interface TestDataLayoutHint {
DataLayout value() default DataLayout.Shallow;
}
private static final FirestoreOptions FIRESTORE_OPTIONS = FirestoreOptions.getDefaultInstance();
private final Firestore fs = FIRESTORE_OPTIONS.getService();
private final FirestoreRpc rpc = (FirestoreRpc) FIRESTORE_OPTIONS.getRpc();
private final CleanupMode cleanupMode;
private Class<?> testClass;
private String testName;
private DataLayout dataLayout;
private int docIdCounter;
private int colIdCounter;
private boolean testSuccess = false;
@SuppressWarnings(
"initialization.fields.uninitialized") // testClass and testName are managed via #apply
public FirestoreTestingHelper(CleanupMode cleanupMode) {
this.cleanupMode = cleanupMode;
}
@Override
public Statement apply(@NonNull Statement base, Description description) {
testClass = description.getTestClass();
testName = description.getMethodName();
TestDataLayoutHint hint = description.getAnnotation(TestDataLayoutHint.class);
if (hint != null) {
dataLayout = hint.value();
} else {
dataLayout = DataLayout.Shallow;
}
return new Statement() {
@Override
public void evaluate() throws Throwable {
try {
base.evaluate();
testSuccess = true;
} finally {
if (cleanupMode == CleanupMode.ALWAYS
|| (cleanupMode == CleanupMode.ON_SUCCESS_ONLY && testSuccess)) {
try {
cleanUp(getBaseDocumentPath());
} catch (Exception e) {
if (LOG.isDebugEnabled() || LOG.isTraceEnabled()) {
LOG.debug("Error while running cleanup", e);
} else {
LOG.info(
"Error while running cleanup: {} (set log level higher for stacktrace)",
e.getMessage() == null ? e.getClass().getName() : e.getMessage());
}
}
}
}
}
};
}
Firestore getFs() {
return fs;
}
String getDatabase() {
return String.format(
"projects/%s/databases/%s",
FIRESTORE_OPTIONS.getProjectId(), FIRESTORE_OPTIONS.getDatabaseId());
}
String getDocumentRoot() {
return getDatabase() + "/documents";
}
DocumentReference getBaseDocument() {
return getBaseDocument(fs, testClass, testName);
}
String getBaseDocumentPath() {
return String.format("%s/%s", getDocumentRoot(), getBaseDocument().getPath());
}
String docId() {
return id("doc", docIdCounter++);
}
String colId() {
return id("col", colIdCounter++);
}
DocumentGenerator documentGenerator(int to, String collectionId) {
return documentGenerator(to, collectionId, false);
}
DocumentGenerator documentGenerator(int to, String collectionId, boolean addBazDoc) {
return new DocumentGenerator(to, collectionId, addBazDoc);
}
Stream<String> listCollectionIds(String parent) {
ListCollectionIdsRequest lcir = ListCollectionIdsRequest.newBuilder().setParent(parent).build();
// LOGGER.debug("lcir = {}", lcir);
ListCollectionIdsPagedResponse response = rpc.listCollectionIdsPagedCallable().call(lcir);
return StreamSupport.stream(response.iteratePages().spliterator(), false)
.flatMap(page -> StreamSupport.stream(page.getValues().spliterator(), false))
.map(colId -> String.format("%s/%s", parent, colId));
}
Stream<String> listDocumentIds(String collectionPath) {
int index = collectionPath.lastIndexOf('/');
String parent = collectionPath.substring(0, index);
String collectionId = collectionPath.substring(index + 1);
ListDocumentsRequest ldr =
ListDocumentsRequest.newBuilder()
.setParent(parent)
.setCollectionId(collectionId)
.setShowMissing(true)
.build();
// LOGGER.debug("ldr = {}", ldr);
ListDocumentsPagedResponse response = rpc.listDocumentsPagedCallable().call(ldr);
return StreamSupport.stream(response.iteratePages().spliterator(), false)
.flatMap(page -> page.getResponse().getDocumentsList().stream())
.map(Document::getName)
.filter(s -> !s.isEmpty());
}
Stream<String> listDocumentsViaQuery(String collectionPath) {
int index = collectionPath.lastIndexOf('/');
String parent = collectionPath.substring(0, index);
String collectionId = collectionPath.substring(index + 1);
FieldReference nameField = FieldReference.newBuilder().setFieldPath("__name__").build();
RunQueryRequest rqr =
RunQueryRequest.newBuilder()
.setParent(parent)
.setStructuredQuery(
StructuredQuery.newBuilder()
.addFrom(CollectionSelector.newBuilder().setCollectionId(collectionId))
.addOrderBy(
Order.newBuilder()
.setField(nameField)
.setDirection(Direction.ASCENDING)
.build())
.setSelect(Projection.newBuilder().addFields(nameField).build()))
.build();
// LOGGER.debug("rqr = {}", rqr);
return StreamSupport.stream(rpc.runQueryCallable().call(rqr).spliterator(), false)
.filter(RunQueryResponse::hasDocument)
.map(RunQueryResponse::getDocument)
.map(Document::getName);
}
private void cleanUp(String baseDocument) {
LOG.debug("Running cleanup...");
Batcher batcher = new Batcher();
walkDoc(batcher, baseDocument);
batcher.flush();
LOG.debug("Running cleanup complete");
}
private void walkDoc(Batcher batcher, String baseDocument) {
batcher.checkState();
listCollectionIds(baseDocument).forEach(col -> walkCol(batcher, col));
batcher.add(baseDocument);
}
private void walkCol(Batcher batcher, String baseCollection) {
batcher.checkState();
if (dataLayout == DataLayout.Shallow) {
// queries are much faster for flat listing when we don't need to try and account for
// `show_missing`
listDocumentsViaQuery(baseCollection).forEach(batcher::add);
// flush any pending writes before we start recursively walking down the tree
batcher.flush();
}
listDocumentIds(baseCollection).forEach(doc -> walkDoc(batcher, doc));
}
static DocumentReference getBaseDocument(Firestore fs, Class<?> testClass, String testName) {
return fs.collection("beam")
.document("IT")
.collection(BASE_COLLECTION_ID)
.document(testClass.getSimpleName())
.collection("test")
.document(testName);
}
static <T> Stream<List<T>> chunkUpDocIds(List<T> things) {
return Streams.stream(Iterables.partition(things, 500));
}
static <T> ApiFunction<List<List<T>>, List<T>> flattenListList() {
return input -> {
List<T> retVal = new ArrayList<>();
for (List<T> writeResults : input) {
retVal.addAll(writeResults);
}
return retVal;
};
}
@SuppressWarnings("nullness")
static String assumeEnvVarSet(@NonNull String name) {
LOG.debug(">>> assumeEnvVarSet(name : {})", name);
try {
String value = System.getenv(name);
LOG.debug("value = {}", value);
assumeThat(name + " not set", value, is(notNullValue()));
return value;
} finally {
LOG.debug("<<< assumeEnvVarSet(name : {})", name);
}
}
private static String id(String docOrCol, int counter) {
return String.format("%s-%05d", docOrCol, counter);
}
final class DocumentGenerator {
private final List<String> documentIds;
private final String collectionId;
private final boolean addBazDoc;
private DocumentGenerator(int to, String collectionId, boolean addBazDoc) {
this.documentIds =
Collections.unmodifiableList(
IntStream.rangeClosed(1, to).mapToObj(i -> docId()).collect(Collectors.toList()));
this.collectionId = collectionId;
this.addBazDoc = addBazDoc;
}
ApiFuture<List<WriteResult>> generateDocuments() {
// create some documents for listing and asserting in the test
CollectionReference baseCollection = getBaseDocument().collection(collectionId);
List<ApiFuture<List<WriteResult>>> futures =
Streams.concat(
chunkUpDocIds(documentIds)
.map(
chunk -> {
WriteBatch batch = fs.batch();
chunk.stream()
.map(baseCollection::document)
.forEach(ref -> batch.set(ref, ImmutableMap.of("foo", "bar")));
return batch.commit();
}),
Stream.of(Optional.of(addBazDoc))
.filter(o -> o.filter(b -> b).isPresent())
.map(
x -> {
WriteBatch batch = fs.batch();
batch.set(baseCollection.document(), ImmutableMap.of("foo", "baz"));
return batch.commit();
}))
.collect(Collectors.toList());
return ApiFutures.transform(
ApiFutures.allAsList(futures),
FirestoreTestingHelper.flattenListList(),
MoreExecutors.directExecutor());
}
List<String> getDocumentIds() {
return documentIds;
}
List<String> expectedDocumentPaths() {
return documentIds.stream()
.map(
id ->
String.format(
"%s/%s/%s",
getDocumentRoot(), getBaseDocument().collection(collectionId).getPath(), id))
.collect(Collectors.toList());
}
}
@SuppressWarnings({
"nullness",
"initialization.fields.uninitialized"
}) // batch is managed as part of use
private final class Batcher {
private static final int MAX_BATCH_SIZE = 500;
private final BatchWriteRequest.Builder batch;
private boolean failed;
public Batcher() {
batch = BatchWriteRequest.newBuilder().setDatabase(getDatabase());
this.failed = false;
}
public void add(String docName) {
checkState();
batch.addWrites(Write.newBuilder().setDelete(docName));
}
private void checkState() {
requireNotFailed();
maybeFlush();
}
private void requireNotFailed() {
if (failed) {
throw new IllegalStateException(
"Previous batch commit failed, unable to enqueue new operation");
}
}
private void maybeFlush() {
if (batch.getWritesCount() == MAX_BATCH_SIZE) {
flush();
}
}
private void flush() {
if (batch.getWritesCount() == 0) {
return;
}
try {
LOG.trace("Flushing {} elements...", batch.getWritesCount());
rpc.batchWriteCallable().futureCall(batch.build()).get(30, TimeUnit.SECONDS);
LOG.trace("Flushing {} elements complete", batch.getWritesCount());
batch.clearWrites();
} catch (InterruptedException | ExecutionException | TimeoutException e) {
failed = true;
throw new RuntimeException(e);
}
}
}
}
| 5,942 |
1,063 | [
{
"category": "``ec2``",
"description": "Update ec2 client to latest version",
"type": "api-change"
},
{
"category": "``s3``",
"description": "Update s3 client to latest version",
"type": "api-change"
},
{
"category": "``resourcegroupstaggingapi``",
"description": "Update resourcegroupstaggingapi client to latest version",
"type": "api-change"
},
{
"category": "``cloudfront``",
"description": "Update cloudfront client to latest version",
"type": "api-change"
},
{
"category": "``s3``",
"description": "Add support for opting into using the us-east-1 regional endpoint.",
"type": "enhancement"
},
{
"category": "``opsworkscm``",
"description": "Update opsworkscm client to latest version",
"type": "api-change"
}
] | 318 |
4,535 | '''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
import numpy as np
import keras.callbacks
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
from example_correctness_test_utils import TrainingHistory, StopwatchManager
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
train_input_truncation = 1500
test_input_truncation = 200
x_train = x_train[:train_input_truncation]
y_train = y_train[:train_input_truncation]
x_test = x_test[:test_input_truncation]
y_test = y_test[:test_input_truncation]
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
history = TrainingHistory()
sw_manager = StopwatchManager(stop_watch, compile_stop_watch)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[history, sw_manager])
output.contents = np.array([history.acc, history.loss, history.val_acc, history.val_loss])
| 915 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.