max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
2,268 | //========= Copyright Valve Corporation, All rights reserved. ============//
//
// cglmprogram.h
// GLMgr buffers (index / vertex)
// ... maybe add PBO later as well
//===============================================================================
#ifndef CGLMBUFFER_H
#define CGLMBUFFER_H
#pragma once
//===============================================================================
extern bool g_bUsePseudoBufs;
// forward declarations
class GLMContext;
enum EGLMBufferType
{
kGLMVertexBuffer,
kGLMIndexBuffer,
kGLMUniformBuffer, // for bindable uniform
kGLMPixelBuffer, // for PBO
kGLMNumBufferTypes
};
// pass this in "options" to constructor to make a dynamic buffer
#define GLMBufferOptionDynamic 0x00000001
struct GLMBuffLockParams
{
uint m_nOffset;
uint m_nSize;
bool m_bNoOverwrite;
bool m_bDiscard;
};
#define GL_STATIC_BUFFER_SIZE ( 2048 * 1024 )
#define GL_MAX_STATIC_BUFFERS 2
extern void glBufferSubDataMaxSize( GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid *data, uint nMaxSizePerCall = 128 * 1024 );
//===============================================================================
#if GL_ENABLE_INDEX_VERIFICATION
struct GLDynamicBuf_t
{
GLenum m_nGLType;
uint m_nHandle;
uint m_nActualBufSize;
uint m_nSize;
uint m_nLockOffset;
uint m_nLockSize;
};
class CGLMBufferSpanManager
{
CGLMBufferSpanManager( const CGLMBufferSpanManager& );
CGLMBufferSpanManager& operator= ( const CGLMBufferSpanManager& );
public:
CGLMBufferSpanManager();
~CGLMBufferSpanManager();
void Init( GLMContext *pContext, EGLMBufferType nBufType, uint nInitialCapacity, uint nBufSize, bool bDynamic );
void Deinit();
inline GLMContext *GetContext() const { return m_pCtx; }
inline GLenum GetGLBufType() const { return ( m_nBufType == kGLMVertexBuffer ) ? GL_ARRAY_BUFFER_ARB : GL_ELEMENT_ARRAY_BUFFER_ARB; }
struct ActiveSpan_t
{
uint m_nStart;
uint m_nEnd;
GLDynamicBuf_t m_buf;
bool m_bOriginalAlloc;
inline ActiveSpan_t() { }
inline ActiveSpan_t( uint nStart, uint nEnd, GLDynamicBuf_t &buf, bool bOriginalAlloc ) : m_nStart( nStart ), m_nEnd( nEnd ), m_buf( buf ), m_bOriginalAlloc( bOriginalAlloc ) { Assert( nStart <= nEnd ); }
};
ActiveSpan_t *AddSpan( uint nOffset, uint nMaxSize, uint nActualSize, bool bDiscard, bool bNoOverwrite );
void DiscardAllSpans();
bool IsValid( uint nOffset, uint nSize ) const;
private:
bool AllocDynamicBuf( uint nSize, GLDynamicBuf_t &buf );
void ReleaseDynamicBuf( GLDynamicBuf_t &buf );
GLMContext *m_pCtx;
EGLMBufferType m_nBufType;
uint m_nBufSize;
bool m_bDynamic;
CUtlVector<ActiveSpan_t> m_ActiveSpans;
CUtlVector<ActiveSpan_t> m_DeletedSpans;
int m_nSpanEndMax;
int m_nNumAllocatedBufs;
int m_nTotalBytesAllocated;
};
#endif // GL_ENABLE_INDEX_VERIFICATION
class CGLMBuffer
{
public:
void Lock( GLMBuffLockParams *pParams, char **pAddressOut );
void Unlock( int nActualSize = -1, const void *pActualData = NULL );
friend class GLMContext; // only GLMContext can make CGLMBuffer objects
friend class GLMTester;
friend struct IDirect3D9;
friend struct IDirect3DDevice9;
CGLMBuffer( GLMContext *pCtx, EGLMBufferType type, uint size, uint options );
~CGLMBuffer();
void SetModes( bool bAsyncMap, bool bExplicitFlush, bool bForce = false );
void FlushRange( uint offset, uint size );
#if GL_ENABLE_INDEX_VERIFICATION
bool IsSpanValid( uint nOffset, uint nSize ) const;
#endif
GLMContext *m_pCtx; // link back to parent context
EGLMBufferType m_type;
uint m_nSize;
uint m_nActualSize;
bool m_bDynamic;
GLenum m_buffGLTarget; // GL_ARRAY_BUFFER_ARB / GL_ELEMENT_BUFFER_ARB
GLuint m_nHandle; // name of this program in the context
uint m_nRevision; // bump anytime the size changes or buffer is orphaned
bool m_bEnableAsyncMap; // mirror of the buffer state
bool m_bEnableExplicitFlush; // mirror of the buffer state
bool m_bMapped; // is it currently mapped
uint m_dirtyMinOffset; // when equal, range is empty
uint m_dirtyMaxOffset;
float *m_pLastMappedAddress;
int m_nPinnedMemoryOfs;
bool m_bPseudo; // true if the m_name is 0, and the backing is plain RAM
// in pseudo mode, there is just one RAM buffer that acts as the backing.
// expectation is that this mode would only be used for dynamic indices.
// since indices have to be consumed (copied to command stream) prior to return from a drawing call,
// there's no need to do any fencing or multibuffering. orphaning in particular becomes a no-op.
char *m_pActualPseudoBuf; // storage for pseudo buffer
char *m_pPseudoBuf; // storage for pseudo buffer
char *m_pStaticBuffer;
GLMBuffLockParams m_LockParams;
static char ALIGN16 m_StaticBuffers[ GL_MAX_STATIC_BUFFERS ][ GL_STATIC_BUFFER_SIZE ] ALIGN16_POST;
static bool m_bStaticBufferUsed[ GL_MAX_STATIC_BUFFERS ];
#if GL_ENABLE_INDEX_VERIFICATION
CGLMBufferSpanManager m_BufferSpanManager;
#endif
#if GL_ENABLE_UNLOCK_BUFFER_OVERWRITE_DETECTION
uint m_nDirtyRangeStart;
uint m_nDirtyRangeEnd;
#endif
};
#endif // CGLMBUFFER_H
| 2,000 |
657 | <gh_stars>100-1000
/*
* Copyright (c) 2017 <NAME> <https://github.com/bschwind>
* Copyright (c) 2019 <NAME> <<EMAIL>>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of itscontributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file tsl4531.h
* @defgroup tsl4531 tsl4531
* @{
*
* ESP-IDF driver for digital ambient light sensor TSL4531
*
* Ported from esp-open-rtos
*
* Copyright (c) 2017 <NAME> <https://github.com/bschwind>\n
* Copyright (c) 2019 <NAME> <<EMAIL>>
*
* BSD Licensed as described in the file LICENSE
*/
#ifndef __TSL4531_H__
#define __TSL4531_H__
#include <stdint.h>
#include <stdbool.h>
#include <i2cdev.h>
#include <esp_err.h>
#ifdef __cplusplus
extern "C" {
#endif
#define TSL4531_I2C_ADDR 0x29
/**
* Integration time
*/
typedef enum
{
TSL4531_INTEGRATION_400MS = 0x00, //!< Default
TSL4531_INTEGRATION_200MS = 0x01,
TSL4531_INTEGRATION_100MS = 0x02,
} tsl4531_integration_time_t;
/**
* Part IDs
*/
typedef enum
{
TSL4531_PART_TSL45317 = 0x08,
TSL4531_PART_TSL45313 = 0x09,
TSL4531_PART_TSL45315 = 0x0A,
TSL4531_PART_TSL45311 = 0x0B
} tsl4531_part_id_t;
/**
* Device descriptor
*/
typedef struct {
i2c_dev_t i2c_dev;
tsl4531_integration_time_t integration_time;
bool skip_power_save;
tsl4531_part_id_t part_id;
} tsl4531_t;
/**
* @brief Initialize device descriptor
*
* @param dev Device descriptor
* @param port I2C port
* @param sda_gpio SDA GPIO pin
* @param scl_gpio SCL GPIO pin
* @return `ESP_OK` on success
*/
esp_err_t tsl4531_init_desc(tsl4531_t *dev, i2c_port_t port, gpio_num_t sda_gpio, gpio_num_t scl_gpio);
/**
* @brief Free device descriptor
*
* @param dev Device descriptor
* @return `ESP_OK` on success
*/
esp_err_t tsl4531_free_desc(tsl4531_t *dev);
/**
* @brief Initialize device
*
* @param dev Device descriptor
* @return `ESP_OK` on success
*/
esp_err_t tsl4531_init(tsl4531_t *dev);
/**
* @brief Configure device
*
* @param dev Device descriptor
* @param integration_time Integration time
* @param skip_power_save PowerSave Mode. When true, the power save states are
* skipped following a light integration cycle for shorter sampling rates
* @return `ESP_OK` on success
*/
esp_err_t tsl4531_config(tsl4531_t *dev, tsl4531_integration_time_t integration_time, bool skip_power_save);
/**
* @brief Read conversion results in lux
*
* @param dev Device descriptor
* @param[out] lux Conversion result in lux
* @return `ESP_OK` on success
*/
esp_err_t tsl4531_read_lux(tsl4531_t *dev, uint16_t *lux);
#ifdef __cplusplus
}
#endif
/**@}*/
#endif // __TSL4531_H__
| 1,456 |
335 | {
"word": "Hyperparasite",
"definitions": [
"A parasite whose host is itself a parasite."
],
"parts-of-speech": "Noun"
} | 63 |
792 | {
"name": "<NAME>",
"location": "Cluj",
"country": "RO",
"region": "Europe",
"organizers": ["alexandra21p","danacalcaianu","sergiucc","Mankirk","sebikovacs","alexnm"],
"website": "http://nodeschool.io/cluj",
"twitter": "",
"repo": "http://github.com/nodeschool/cluj"
}
| 122 |
1,863 | <filename>PhysX_3.4/Source/LowLevelCloth/src/windows/CuDeviceVector.h
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2018 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
#include "foundation/PxMath.h" // for swap
#include "cudamanager/PxCudaMemoryManager.h"
#include "cudamanager/PxCudaContextManager.h"
#include "CuDevicePointer.h"
#include "PsArray.h"
#include "PsUtilities.h"
namespace physx
{
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4365) // 'action' : conversion from 'type_1' to 'type_2', signed/unsigned mismatch
#endif
namespace cloth
{
// STL-style vector that holds POD types in CUDA device memory. The interface
// is not complete, add whatever you need from the std::vector interface.
template <typename T>
class CuDeviceVector
{
public:
typedef CuDevicePointer<T> iterator;
typedef CuDevicePointer<const T> const_iterator;
CuDeviceVector(physx::PxCudaContextManager* ctx) : mManager(0)
{
PX_ASSERT(ctx);
if(ctx)
mManager = ctx->getMemoryManager();
}
CuDeviceVector(const CuDeviceVector& other) : mManager(other.getMemoryManager())
{
PX_ASSERT(mManager);
operator=(other);
}
CuDeviceVector(physx::PxCudaContextManager* ctx, const T* first, const T* last) : mManager(0)
{
PX_ASSERT(ctx);
if(ctx)
{
mManager = ctx->getMemoryManager();
assign(first, last);
}
}
template <typename Alloc>
CuDeviceVector(const shdfnd::Array<T, Alloc>& other)
{
operator=(other);
}
~CuDeviceVector()
{
PX_ASSERT(mManager);
mManager->free(physx::PxCudaBufferMemorySpace::T_GPU, mFirst.dev());
}
CuDeviceVector& operator=(const CuDeviceVector& other)
{
resize(other.size());
checkSuccess(cuMemcpyDtoD(mFirst.dev(), other.mFirst.dev(), other.size() * sizeof(T)));
return *this;
}
template <typename Alloc>
CuDeviceVector& operator=(const shdfnd::Array<T, Alloc>& other)
{
const T* first = other.empty() ? 0 : &other.front();
assign(first, first + other.size());
return *this;
}
bool empty() const
{
return mLast == mFirst;
}
size_t size() const
{
return size_t(mLast - mFirst);
}
size_t capacity() const
{
return mEnd - mFirst;
}
iterator begin()
{
return mFirst;
}
iterator end()
{
return mLast;
}
const_iterator begin() const
{
return mFirst;
}
const_iterator end() const
{
return mLast;
}
void push_back(const T& v)
{
if(mLast == mEnd)
reserve(PxMax<size_t>(1, capacity() * 2));
*mLast++ = v;
}
void push_back(const T* first, const T* last)
{
if(mEnd - mLast < last - first)
reserve(PxMax<size_t>(2 * capacity(), mLast - mFirst + last - first));
if(first != last)
checkSuccess(cuMemcpyHtoD(mLast.dev(), first, sizeof(T) * (last - first)));
mLast += last - first;
}
void erase(iterator it)
{
size_t byteSize = (mLast - it - 1) * sizeof(T);
if(byteSize)
{
CUdeviceptr tmp = 0, dst = it.dev();
PX_ASSERT(mManager);
tmp = mManager->alloc(physx::PxCudaBufferMemorySpace::T_GPU, byteSize,
PX_ALLOC_INFO("cloth::CuDeviceVector::T_GPU", CLOTH));
checkSuccess(cuMemcpyDtoD(tmp, dst + sizeof(T), byteSize));
checkSuccess(cuMemcpyDtoD(dst, tmp, byteSize));
mManager->free(physx::PxCudaBufferMemorySpace::T_GPU, tmp);
}
--mLast;
}
void reserve(size_t n)
{
if(n <= capacity())
return;
CUdeviceptr newFirst = 0, oldFirst = mFirst.dev();
PX_ASSERT(mManager);
newFirst = mManager->alloc(physx::PxCudaBufferMemorySpace::T_GPU, sizeof(T) * n,
PX_ALLOC_INFO("cloth::CuDeviceVector::T_GPU", CLOTH));
checkSuccess(cuMemcpyDtoD(newFirst, oldFirst, sizeof(T) * size()));
mManager->free(physx::PxCudaBufferMemorySpace::T_GPU, oldFirst);
iterator first(reinterpret_cast<T*>(newFirst));
mEnd = first + n;
mLast = first + size();
mFirst = first;
}
void resize(size_t n)
{
if(capacity() < n)
reserve(PxMax(n, capacity() * 2));
mLast = mFirst + n;
}
void assign(const T* first, const T* last)
{
size_t n = last - first;
resize(n);
checkSuccess(cuMemcpyHtoD(mFirst.dev(), first, n * sizeof(T)));
}
void swap(CuDeviceVector& other)
{
shdfnd::swap(mFirst, other.mFirst);
shdfnd::swap(mLast, other.mLast);
shdfnd::swap(mEnd, other.mEnd);
}
// match PxArray interface
void remove(size_t i)
{
erase(begin() + i);
}
void pushBack(const T& v)
{
push_back(v);
}
physx::PxCudaMemoryManager* getMemoryManager() const
{
return mManager;
}
private:
iterator mFirst, mLast, mEnd;
physx::PxCudaMemoryManager* mManager;
};
} // namespace cloth
} // namespace physx
#if PX_VC
#pragma warning(pop)
#endif
namespace physx
{
namespace shdfnd
{
template <typename T>
void swap(physx::cloth::CuDeviceVector<T>& first, physx::cloth::CuDeviceVector<T>& second)
{
first.swap(second);
}
}
}
| 2,430 |
412 | #include <assert.h>
void main()
{
switch(0)
{
int a;
default:
a = 0;
if(a)
assert(0);
}
switch(1)
{
int b;
b = 42;
}
int *p = (void *)0;
switch(2)
{
int c;
case 3:
p = &c;
case 2:
break;
}
assert(p == 0);
switch(3)
{
int d;
case 3:
p = &d;
*p = 42;
break;
default:
d = 1;
break;
}
assert(*p == 42); // invalid dereference
switch(0)
{
int e = 42;
int f = 42;
case 0:
assert(e == 42); // does not hold as the initialisation is unreachable
default:
assert(f == 42); // does not hold as the initialisation is unreachable
}
}
| 315 |
631 | <filename>opengl_helper/data_set.py
import abc
from typing import Callable, List, Tuple
from opengl_helper.shader import BaseShader
from opengl_helper.vertex_data_handler import VertexDataHandler, LayeredVertexDataHandler, OverflowingVertexDataHandler
from rendering.rendering_config import RenderingConfig
class BaseRenderSet:
def __init__(self, shader: BaseShader, render_func: Callable, element_count_func: Callable):
__metaclass__ = abc.ABCMeta
self.shader: BaseShader = shader
self.uniform_settings: List[str] = []
self.render_func: Callable = render_func
self.element_count_func: Callable = element_count_func
def set_uniform_label(self, data: List[str]):
if self.shader is not None:
self.shader.set_uniform_label(data)
def set_uniform_data(self, data: List[Tuple[str, any, any]]):
if self.shader is not None:
self.shader.set_uniform_data(data)
def set_uniform_labeled_data(self, config: RenderingConfig):
if self.shader is not None:
self.shader.set_uniform_labeled_data(config)
@abc.abstractmethod
def render(self):
pass
class RenderSet(BaseRenderSet):
def __init__(self, shader: BaseShader, data_handler: VertexDataHandler, render_func: Callable,
element_count_func: Callable):
super().__init__(shader, render_func, element_count_func)
self.data_handler: VertexDataHandler = data_handler
def render(self):
if self.shader is not None:
self.shader.use()
self.data_handler.set(True)
self.render_func(self.element_count_func())
class LayeredRenderSet(BaseRenderSet):
def __init__(self, shader: BaseShader, data_handler: LayeredVertexDataHandler, render_func: Callable,
element_count_func: Callable):
super().__init__(shader, render_func, element_count_func)
self.data_handler: LayeredVertexDataHandler = data_handler
self.buffer_divisor: List[Tuple[int, int]] = []
def set_buffer_divisor(self, buffer_divisor: List[Tuple[int, int]]):
self.buffer_divisor: List[Tuple[int, int]] = buffer_divisor
def render(self):
if self.shader is not None:
self.shader.use()
for buffer in iter(self.data_handler):
buffer.set(True)
buffer.buffer_divisor = self.buffer_divisor
self.render_func(
self.element_count_func(self.data_handler.current_layer_id,
self.data_handler.current_sub_buffer_id))
class OverflowingRenderSet(BaseRenderSet):
def __init__(self, shader: BaseShader, data_handler: OverflowingVertexDataHandler, render_func: Callable,
element_count_func: Callable):
super().__init__(shader, render_func, element_count_func)
self.data_handler: OverflowingVertexDataHandler = data_handler
def render_sub(self, buffer_index: int = 0):
if self.shader is not None:
self.shader.use()
self.data_handler.set_buffer(buffer_index)
self.data_handler.set(True)
def render(self):
if self.shader is not None:
self.shader.use()
for i in range(len(self.data_handler.targeted_overflowing_buffer_objects[0][0].handle)):
self.data_handler.set_buffer(i)
self.data_handler.set(True)
self.render_func(self.element_count_func(i))
| 1,535 |
388 | package com.amazonaws.services.kinesis.producer;
import com.amazonaws.services.schemaregistry.common.configs.GlueSchemaRegistryConfiguration;
import com.amazonaws.services.schemaregistry.serializers.GlueSchemaRegistrySerializer;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class GlueSchemaRegistrySerializerInstanceTest {
private static final String REGION = "us-west-1";
private GlueSchemaRegistrySerializerInstance glueSchemaRegistrySerializerInstance = new GlueSchemaRegistrySerializerInstance();
@Test
public void testGet_Returns_SingletonInstance() {
KinesisProducerConfiguration configuration = new KinesisProducerConfiguration();
configuration.setRegion(REGION);
GlueSchemaRegistrySerializer serializer1 =
glueSchemaRegistrySerializerInstance.get(configuration);
assertNotNull(serializer1);
GlueSchemaRegistrySerializer serializer2 =
glueSchemaRegistrySerializerInstance.get(configuration);
assertEquals(serializer1.hashCode(), serializer2.hashCode());
}
@Test
public void testGet_Returns_WhenGlueConfigurationIsExplicitlyConfigured() {
KinesisProducerConfiguration configuration = new KinesisProducerConfiguration();
configuration.setGlueSchemaRegistryConfiguration(new GlueSchemaRegistryConfiguration(REGION));
GlueSchemaRegistrySerializer serializer =
glueSchemaRegistrySerializerInstance.get(configuration);
assertNotNull(serializer);
}
} | 530 |
1,093 | <reponame>rahulsai9550/c
/*
============================================================================
Name : TCP_Sender.h
Author : GodisC00L
Description : Simple TCP sender. Expected input {IP address, port, number of parts, file name}
============================================================================
*/
#ifndef C_1_TCP_SENDER_H
#define C_1_TCP_SENDER_H
#include <stdio.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#endif //C_1_TCP_SENDER_H
| 205 |
2,460 | /**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fabric8.kubernetes.client.internal.readiness;
import io.fabric8.kubernetes.api.model.ServiceBuilder;
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
import io.fabric8.kubernetes.api.model.apps.StatefulSetSpec;
import io.fabric8.kubernetes.api.model.apps.StatefulSetStatus;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
class ReadinessTest {
private Readiness readiness;
@BeforeEach
void setUp() {
readiness = Readiness.getInstance();
}
@Test
void testStatefulSetReadinessNoSpecNoStatus() {
StatefulSet statefulSet = new StatefulSet();
assertFalse(readiness.isReady(statefulSet));
assertFalse(Readiness.isStatefulSetReady(statefulSet));
}
@Test
void testStatefulSetReadinessNoSpec() {
StatefulSetStatus status = new StatefulSetStatus();
StatefulSet statefulSet = new StatefulSet();
statefulSet.setStatus(status);
assertFalse(readiness.isReady(statefulSet));
assertFalse(Readiness.isStatefulSetReady(statefulSet));
status.setReadyReplicas(1);
assertFalse(readiness.isReady(statefulSet));
assertFalse(Readiness.isStatefulSetReady(statefulSet));
}
@Test
void testStatefulSetReadinessNoStatus() {
StatefulSetSpec spec = new StatefulSetSpec();
spec.setReplicas(1);
StatefulSet statefulSet = new StatefulSet();
statefulSet.setSpec(spec);
assertFalse(readiness.isReady(statefulSet));
assertFalse(Readiness.isStatefulSetReady(statefulSet));
}
@Test
void testStatefulSetReadinessNotEnoughReadyReplicas() {
StatefulSetStatus status = new StatefulSetStatus();
status.setReadyReplicas(1);
status.setReplicas(2);
StatefulSetSpec spec = new StatefulSetSpec();
spec.setReplicas(2);
StatefulSet statefulSet = new StatefulSet();
statefulSet.setStatus(status);
statefulSet.setSpec(spec);
assertFalse(readiness.isReady(statefulSet));
assertFalse(Readiness.isStatefulSetReady(statefulSet));
}
@Test
void testStatefulSetReadiness() {
StatefulSetStatus status = new StatefulSetStatus();
status.setReadyReplicas(2);
status.setReplicas(2);
StatefulSetSpec spec = new StatefulSetSpec();
spec.setReplicas(2);
StatefulSet statefulSet = new StatefulSet();
statefulSet.setStatus(status);
statefulSet.setSpec(spec);
assertTrue(readiness.isReady(statefulSet));
assertTrue(Readiness.isStatefulSetReady(statefulSet));
}
@Test
void testReadinessWithNonNullResource() {
assertTrue(readiness.isReady(new ServiceBuilder().withNewMetadata().withName("svc1").endMetadata().build()));
}
@Test
void testReadinessNullResource() {
assertFalse(readiness.isReady(null));
}
}
| 1,152 |
356 | <filename>command.c
/*
+----------------------------------------------------------------------+
| Copyright (c) 1997-2017 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| <EMAIL> so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: <NAME> <<EMAIL>> |
+----------------------------------------------------------------------+
*/
#include <stdio.h>
#include <unistd.h>
#include <event2/buffer.h>
#include <event2/bufferevent.h>
#include <event2/bufferevent_struct.h>
const static char *NEW_LINE = "\n";
const static int MAX_BUF_SIZE = 128;
void nsq_subscribe(struct bufferevent *bev, const char *topic, const char *channel) {
char b[MAX_BUF_SIZE];
size_t n;
n = sprintf(b, "SUB %s %s%s", topic, channel, NEW_LINE);
bufferevent_write(bev, b, n);
}
void nsq_ready(struct bufferevent *bev, int count) {
char b[MAX_BUF_SIZE];
size_t n;
n = sprintf(b, "RDY %d%s", count, NEW_LINE);
bufferevent_write(bev, b, n);
}
void nsq_finish(struct bufferevent *bev, const char *id) {
char b[MAX_BUF_SIZE];
size_t n;
n = sprintf(b, "FIN %s%s", id, NEW_LINE);
bufferevent_write(bev, b, n);
}
void nsq_touch(struct bufferevent *bev, const char *id) {
char b[MAX_BUF_SIZE];
size_t n;
n = sprintf(b, "TOUCH %s%s", id, NEW_LINE);
//bufferevent_write(bev, b, n);
evutil_socket_t fd = bufferevent_getfd(bev);
//int res = buffer_write(bev->output, fd);
int res = write(fd, b, n);
}
void nsq_nop(struct bufferevent *bev) {
char b[MAX_BUF_SIZE];
size_t n;
n = sprintf(b, "NOP%s", NEW_LINE);
bufferevent_write(bev, b, n);
}
void nsq_requeue(struct bufferevent *bev, const char *id, int timeout_ms) {
char b[MAX_BUF_SIZE];
size_t n;
n = sprintf(b, "REQ %s %d%s", id, timeout_ms, NEW_LINE);
bufferevent_write(bev, b, n);
}
| 1,019 |
558 | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, <NAME>, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: <NAME> <<EMAIL>>
// ==========================================================================
#ifndef CORE_INCLUDE_SEQAN_BAM_IO_BAM_WRITER_H_
#define CORE_INCLUDE_SEQAN_BAM_IO_BAM_WRITER_H_
namespace seqan {
// ============================================================================
// Forwards
// ============================================================================
// ============================================================================
// Tags, Classes, Enums
// ============================================================================
// TODO(holtgrew): Allow writing BAM to stdout? Extend Stream<Bgzf>?
class BamWriter_ :
public XamWriter_
{
public:
// The BGZF stream to write to.
Stream<Bgzf> _stream;
// Flag indicating whether there was an error or not.
// TODO(holtgrew): Could we also use streamError()?
bool _isGood;
BamWriter_() :
XamWriter_()
{}
BamWriter_(CharString const & filename);
// XamWriter_ interface.
virtual int open(CharString const & filename);
virtual bool isGood();
virtual int writeHeader(BamHeader const & header,
BamIOContext<StringSet<CharString> > const & context);
virtual int writeRecord(BamAlignmentRecord const & record,
BamIOContext<StringSet<CharString> > const & context);
virtual int flush();
virtual int close();
};
// ============================================================================
// Metafunctions
// ============================================================================
// ============================================================================
// Functions
// ============================================================================
// ----------------------------------------------------------------------------
// Member Function BamWriter_::BamWriter_()
// ----------------------------------------------------------------------------
inline BamWriter_::BamWriter_(CharString const & filename) :
XamWriter_(filename)
{
this->open(filename);
}
// ----------------------------------------------------------------------------
// Member Function BamWriter_::open()
// ----------------------------------------------------------------------------
inline int BamWriter_::open(CharString const & filename)
{
if (!seqan::open(this->_stream, toCString(filename), "w"))
{
_isGood = false;
return 1;
}
return 0;
}
// ----------------------------------------------------------------------------
// Member Function BamWriter_::isGood()
// ----------------------------------------------------------------------------
inline bool BamWriter_::isGood()
{
return this->_isGood;
}
// ----------------------------------------------------------------------------
// Member Function BamWriter_::writeHeader()
// ----------------------------------------------------------------------------
inline int BamWriter_::writeHeader(BamHeader const & header,
BamIOContext<StringSet<CharString> > const & context)
{
return write2(this->_stream, header, context, Bam());
}
// ----------------------------------------------------------------------------
// Member Function BamWriter_::writeRecord()
// ----------------------------------------------------------------------------
inline int BamWriter_::writeRecord(BamAlignmentRecord const & record,
BamIOContext<StringSet<CharString> > const & context)
{
return write2(this->_stream, record, context, Bam());
}
// ----------------------------------------------------------------------------
// Member Function BamWriter_::flush()
// ----------------------------------------------------------------------------
inline int BamWriter_::flush()
{
return streamFlush(this->_stream);
}
// ----------------------------------------------------------------------------
// Member Function BamWriter_::close()
// ----------------------------------------------------------------------------
inline int BamWriter_::close()
{
seqan::close(this->_stream);
return 0;
}
} // namespace seqan
#endif // #ifndef CORE_INCLUDE_SEQAN_BAM_IO_BAM_WRITER_H_
| 1,566 |
491 | #!/usr/bin/env python3
#
# Copyright 2020 Xiaomi Corporation (authors: <NAME>)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R symbol_table_test_py
import unittest
import k2
class TestSymbolTable(unittest.TestCase):
def test(self):
s = '''
a 1
b 2
'''
symbol_table = k2.SymbolTable.from_str(s)
assert symbol_table.get('a') == 1
assert symbol_table.get(1) == 'a'
assert symbol_table.get('b') == 2
assert symbol_table.get(2) == 'b'
assert symbol_table.get(0) == '<eps>'
assert symbol_table.get('<eps>') == 0
assert symbol_table['a'] == 1
assert symbol_table[1] == 'a'
assert symbol_table['b'] == 2
assert symbol_table[2] == 'b'
assert symbol_table[0] == '<eps>'
assert symbol_table['<eps>'] == 0
assert 1 in symbol_table
assert 'a' in symbol_table
assert 2 in symbol_table
assert 'b' in symbol_table
assert symbol_table.ids == [0, 1, 2]
assert symbol_table.symbols == ['<eps>', 'a', 'b']
symbol_table.add('c')
assert symbol_table['c'] == 3
symbol_table.add('d', 10)
assert symbol_table['d'] == 10
symbol_table.add('e')
assert symbol_table['e'] == 11
assert symbol_table.ids == [0, 1, 2, 3, 10, 11]
assert symbol_table.symbols == ['<eps>', 'a', 'b', 'c', 'd', 'e']
s = '''
a 1
b 2
h 12
'''
sym = k2.SymbolTable.from_str(s)
merged = symbol_table.merge(sym)
assert merged.ids == [0, 1, 2, 3, 10, 11, 12]
assert merged.symbols == ['<eps>', 'a', 'b', 'c', 'd', 'e', 'h']
assert merged[12] == 'h'
assert merged['h'] == 12
assert merged['e'] == 11
assert merged[11] == 'e'
if __name__ == '__main__':
unittest.main()
| 1,078 |
332 | <filename>spring-xd-dirt/src/main/java/org/springframework/xd/dirt/server/admin/deployment/zk/ZKStreamDeploymentHandler.java<gh_stars>100-1000
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.xd.dirt.server.admin.deployment.zk;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import org.apache.curator.framework.CuratorFramework;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.util.Assert;
import org.springframework.xd.dirt.cluster.Container;
import org.springframework.xd.dirt.cluster.NoContainerException;
import org.springframework.xd.dirt.container.store.ContainerRepository;
import org.springframework.xd.dirt.core.DeploymentUnitStatus;
import org.springframework.xd.dirt.core.Stream;
import org.springframework.xd.dirt.server.admin.deployment.ContainerMatcher;
import org.springframework.xd.dirt.server.admin.deployment.DeploymentUnitStateCalculator;
import org.springframework.xd.dirt.server.admin.deployment.ModuleDeploymentStatus;
import org.springframework.xd.dirt.server.admin.deployment.StreamRuntimePropertiesProvider;
import org.springframework.xd.dirt.stream.StreamFactory;
import org.springframework.xd.dirt.zookeeper.Paths;
import org.springframework.xd.dirt.zookeeper.ZooKeeperUtils;
import org.springframework.xd.module.ModuleDeploymentProperties;
import org.springframework.xd.module.ModuleDescriptor;
/**
* Deployment handler that is responsible for deploying Stream.
*
* @author <NAME>
* @author <NAME>
* @author <NAME>
*/
public class ZKStreamDeploymentHandler extends ZKDeploymentHandler {
/**
* Logger.
*/
private static final Logger logger = LoggerFactory.getLogger(ZKStreamDeploymentHandler.class);
/**
* Factory to construct {@link org.springframework.xd.dirt.core.Stream} instance
*/
@Autowired
private StreamFactory streamFactory;
/**
* Matcher that applies container matching criteria
*/
@Autowired
private ContainerMatcher containerMatcher;
/**
* Repository for the containers
*/
@Autowired
private ContainerRepository containerRepository;
/**
* Utility that writes module deployment requests to ZK path
*/
@Autowired
private ModuleDeploymentWriter moduleDeploymentWriter;
/**
* Deployment unit state calculator
*/
@Autowired
private DeploymentUnitStateCalculator stateCalculator;
/**
* Deploy the stream with the given name.
* @param streamName the stream name
* @throws Exception
*/
public void deploy(String streamName) throws Exception {
CuratorFramework client = zkConnection.getClient();
deployStream(client, DeploymentLoader.loadStream(client, streamName, streamFactory));
}
/**
* Issue deployment requests for the modules of the given stream.
*
* @param stream stream to be deployed
*
* @throws InterruptedException
*/
private void deployStream(CuratorFramework client, Stream stream) throws InterruptedException {
// Ensure that the path for modules used by the container to write
// ephemeral nodes exists. The presence of this path is assumed
// by the supervisor when it calculates stream state when it is
// assigned leadership. See XD-2170 for details.
try {
client.create().creatingParentsIfNeeded().forPath(
Paths.build(Paths.STREAM_DEPLOYMENTS, stream.getName(), Paths.MODULES));
}
catch (Exception e) {
ZooKeeperUtils.wrapAndThrowIgnoring(e, KeeperException.NodeExistsException.class);
}
String statusPath = Paths.build(Paths.STREAM_DEPLOYMENTS, stream.getName(), Paths.STATUS);
// assert that the deployment status has been correctly set to "deploying"
DeploymentUnitStatus deployingStatus = null;
try {
deployingStatus = new DeploymentUnitStatus(ZooKeeperUtils.bytesToMap(
client.getData().forPath(statusPath)));
}
catch (Exception e) {
// an exception indicates that the status has not been set
}
Assert.state(deployingStatus != null
&& deployingStatus.getState() == DeploymentUnitStatus.State.deploying,
String.format("Expected 'deploying' status for stream '%s'; current status: %s",
stream.getName(), deployingStatus));
try {
Collection<ModuleDeploymentStatus> deploymentStatuses = new ArrayList<ModuleDeploymentStatus>();
DefaultModuleDeploymentPropertiesProvider deploymentPropertiesProvider =
new DefaultModuleDeploymentPropertiesProvider(stream);
for (Iterator<ModuleDescriptor> descriptors = stream.getDeploymentOrderIterator(); descriptors.hasNext(); ) {
ModuleDescriptor descriptor = descriptors.next();
ModuleDeploymentProperties deploymentProperties = deploymentPropertiesProvider.propertiesForDescriptor(descriptor);
// write out all of the required modules for this stream (including runtime properties);
// this does not actually perform a deployment...this data is used in case there are not
// enough containers to deploy the stream
StreamRuntimePropertiesProvider partitionPropertiesProvider =
new StreamRuntimePropertiesProvider(stream, deploymentPropertiesProvider);
int moduleCount = deploymentProperties.getCount();
if (moduleCount == 0) {
createModuleDeploymentRequestsPath(client, descriptor,
partitionPropertiesProvider.propertiesForDescriptor(descriptor));
}
else {
for (int i = 0; i < moduleCount; i++) {
createModuleDeploymentRequestsPath(client, descriptor,
partitionPropertiesProvider.propertiesForDescriptor(descriptor));
}
}
try {
// find the containers that can deploy these modules
Collection<Container> containers = containerMatcher.match(descriptor, deploymentProperties,
containerRepository.findAll());
// write out the deployment requests targeted to the containers obtained above;
// a new instance of StreamPartitionPropertiesProvider is created since this
// object is responsible for generating unique sequence ids for modules
StreamRuntimePropertiesProvider deploymentRuntimeProvider =
new StreamRuntimePropertiesProvider(stream, deploymentPropertiesProvider);
deploymentStatuses.addAll(moduleDeploymentWriter.writeDeployment(
descriptor, deploymentRuntimeProvider, containers));
}
catch (NoContainerException e) {
logger.warn("No containers available for deployment of module '{}' for stream '{}'",
descriptor.getModuleLabel(), stream.getName());
}
}
DeploymentUnitStatus status = stateCalculator.calculate(stream, deploymentPropertiesProvider,
deploymentStatuses);
logger.info("Deployment status for stream '{}': {}", stream.getName(), status);
client.setData().forPath(statusPath, ZooKeeperUtils.mapToBytes(status.toMap()));
}
catch (InterruptedException e) {
throw e;
}
catch (Exception e) {
throw ZooKeeperUtils.wrapThrowable(e);
}
}
}
| 2,285 |
476 | {
"title": "SQL",
"description": "Our project utilizes a database via SQL",
"tags" : "WebApp, API, MessageServer",
"minimum_risk_required" : "Low Risk",
"questions": {
"General": [
"We are not writing SQL and are utilizing the already-existing database abstraction functions available in our development",
"We are utilizing bind variables for any SQL statements, utilizing safe db functions",
"We are not building SQL statements by using string concatenation"
]
}
}
| 147 |
348 | <gh_stars>100-1000
{"nom":"Orus","circ":"1ère circonscription","dpt":"Ariège","inscrits":30,"abs":13,"votants":17,"blancs":1,"nuls":0,"exp":16,"res":[{"nuance":"REM","nom":"<NAME>","voix":12},{"nuance":"FI","nom":"<NAME>","voix":4}]} | 96 |
969 | <filename>src/app/musicplayer/util/Resources.java
package app.musicplayer.util;
public final class Resources {
public static final String FXML = "/app/musicplayer/view/";
public static final String IMG = "/app/musicplayer/util/img/";
public static final String CSS = "/app/musicplayer/util/css/";
public static String JAR;
public static final String APIBASE = "http://ws.audioscrobbler.com/2.0/?";
public static final String APIKEY = "57ee3318536b23ee81d6b27e36997cde";
private Resources() {}
} | 176 |
453 | <filename>ChaseWhisply/src/main/java/fr/tvbarthel/games/chasewhisply/beta/BetaUtils.java
package fr.tvbarthel.games.chasewhisply.beta;
public class BetaUtils {
public static final String KEY_SHARED_PREFERENCES = "BetaKeySharedPreferences";
public static final String KEY_SENSOR_DELAY = "BetaKeySensorDelay";
public static final String KEY_COMPATIBILITY_MODE_ACTIVATED = "BetaKeyCompatibilityModeActivated";
}
| 148 |
580 | <filename>codechef/flow006.cpp
//https://www.codechef.com/problems/FLOW006
#include <iostream>
using namespace std;
int main()
{
int t, total;
long long n = 12345;
cin >> t;
while (t--)
{
total = 0;
cin >> n;
while (n != 0)
{
total += n % 10;
n = n / 10;
}
cout << total << endl;
}
return 0;
} | 216 |
3,195 | from __future__ import division
from __future__ import print_function
import mxnext as X
import math
import mxnet as mx
from utils.patch_config import patch_config_as_nothrow
class RepPoints(object):
def __init__(self):
pass
@staticmethod
def get_train_symbol(backbone, neck, head):
label = X.var("gt_bbox")
feat = backbone.get_rpn_feature()
feat = neck.get_rpn_feature(feat)
loss = head.get_loss(feat, label)
return X.group(loss)
@staticmethod
def get_test_symbol(backbone, neck, head):
im_info = X.var("im_info")
im_id = X.var("im_id")
rec_id = X.var("rec_id")
feat = backbone.get_rpn_feature()
feat = neck.get_rpn_feature(feat)
cls_score, bbox_xyxy = head.get_prediction(feat, im_info)
return X.group([rec_id, im_id, im_info, cls_score, bbox_xyxy])
class RepPointsHead(object):
def __init__(self, pHead):
self.p = patch_config_as_nothrow(pHead)
num_points = self.p.point_generate.num_points
self.dcn_kernel = int(math.sqrt(num_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
assert self.dcn_kernel * self.dcn_kernel == num_points, \
"The points number should be square."
assert self.dcn_kernel % 2 == 1, "The dcn kernel size should be odd."
# init moment method
dtype = "float16" if self.p.fp16 else "float32"
self.moment_transfer = X.var(
name="moment_transfer", shape=(2,), init=X.zero_init(), lr_mult=0.01, dtype=dtype
)
# init bias for cls
prior_prob = 0.01
pi = -math.log((1 - prior_prob) / prior_prob)
# shared classification weight and bias
self.cls_conv1_weight = X.var("cls_conv1_weight", init=X.gauss(std=0.01))
self.cls_conv1_bias = X.var("cls_conv1_bias", init=X.zero_init())
self.cls_conv2_weight = X.var("cls_conv2_weight", init=X.gauss(std=0.01))
self.cls_conv2_bias = X.var("cls_conv2_bias", init=X.zero_init())
self.cls_conv3_weight = X.var("cls_conv3_weight", init=X.gauss(std=0.01))
self.cls_conv3_bias = X.var("cls_conv3_bias", init=X.zero_init())
self.cls_conv_weight = X.var("cls_conv_weight", init=X.gauss(std=0.01))
self.cls_conv_bias = X.var("cls_conv_bias", init=X.zero_init())
self.cls_out_weight = X.var("cls_out_weight", init=X.gauss(std=0.01))
self.cls_out_bias = X.var("cls_out_bias", init=X.constant(pi))
# shared regression weight and bias
self.reg_conv1_weight = X.var("reg_conv1_weight", init=X.gauss(std=0.01))
self.reg_conv1_bias = X.var("reg_conv1_bias", init=X.zero_init())
self.reg_conv2_weight = X.var("reg_conv2_weight", init=X.gauss(std=0.01))
self.reg_conv2_bias = X.var("reg_conv2_bias", init=X.zero_init())
self.reg_conv3_weight = X.var("reg_conv3_weight", init=X.gauss(std=0.01))
self.reg_conv3_bias = X.var("reg_conv3_bias", init=X.zero_init())
self.pts_init_conv_weight = X.var("pts_init_conv_weight", init=X.gauss(std=0.01))
self.pts_init_conv_bias = X.var("pts_init_conv_bias", init=X.zero_init())
self.pts_init_out_weight = X.var("pts_init_out_weight", init=X.gauss(std=0.01))
self.pts_init_out_bias = X.var("pts_init_out_bias", init=X.zero_init())
self.pts_refine_conv_weight = X.var("pts_refine_conv_weight", init=X.gauss(std=0.01))
self.pts_refine_conv_bias = X.var("pts_refine_conv_bias", init=X.zero_init())
self.pts_refine_out_weight = X.var("pts_refine_out_weight", init=X.gauss(std=0.01))
self.pts_refine_out_bias = X.var("pts_refine_out_bias", init=X.zero_init())
self._pts_out_inits = None
self._pts_out_refines = None
self._cls_outs = None
def _cls_subnet(self, conv_feat, stride):
p = self.p
norm = p.normalizer
conv_channel = p.head.conv_channel
# classification subset
cls_conv1 = X.conv(
data=conv_feat,
kernel=3,
filter=conv_channel,
weight=self.cls_conv1_weight,
bias=self.cls_conv1_bias,
no_bias=False,
name="cls_conv1"
)
cls_conv1 = norm(cls_conv1, name="cls_conv1_bn_s{}".format(stride))
cls_conv1_relu = X.relu(cls_conv1)
cls_conv2 = X.conv(
data=cls_conv1_relu,
kernel=3,
filter=conv_channel,
weight=self.cls_conv2_weight,
bias=self.cls_conv2_bias,
no_bias=False,
name="cls_conv2"
)
cls_conv2 = norm(cls_conv2, name="cls_conv2_bn_s{}".format(stride))
cls_conv2_relu = X.relu(cls_conv2)
cls_conv3 = X.conv(
data=cls_conv2_relu,
kernel=3,
filter=conv_channel,
weight=self.cls_conv3_weight,
bias=self.cls_conv3_bias,
no_bias=False,
name="cls_conv3"
)
cls_conv3 = norm(cls_conv3, name="cls_conv3_bn_s{}".format(stride))
cls_conv3_relu = X.relu(cls_conv3)
if p.fp16:
cls_conv3_relu = X.to_fp32(cls_conv3_relu, name="cls_conv3_fp32")
return cls_conv3_relu
def _reg_subnet(self, conv_feat, stride):
p = self.p
norm = p.normalizer
conv_channel = p.head.conv_channel
# regression subnet
reg_conv1 = X.conv(
data=conv_feat,
kernel=3,
filter=conv_channel,
weight=self.reg_conv1_weight,
bias=self.reg_conv1_bias,
no_bias=False,
name="reg_conv1"
)
reg_conv1 = norm(reg_conv1, name="reg_conv1_bn_s{}".format(stride))
reg_conv1_relu = X.relu(reg_conv1)
reg_conv2 = X.conv(
data=reg_conv1_relu,
kernel=3,
filter=conv_channel,
weight=self.reg_conv2_weight,
bias=self.reg_conv2_bias,
no_bias=False,
name="reg_conv2"
)
reg_conv2 = norm(reg_conv2, name="reg_conv2_bn_s{}".format(stride))
reg_conv2_relu = X.relu(reg_conv2)
reg_conv3 = X.conv(
data=reg_conv2_relu,
kernel=3,
filter=conv_channel,
weight=self.reg_conv3_weight,
bias=self.reg_conv3_bias,
no_bias=False,
name="reg_conv3"
)
reg_conv3 = norm(reg_conv3, name="reg_conv3_bn_s{}".format(stride))
reg_conv3_relu = X.relu(reg_conv3)
if p.fp16:
reg_conv3_relu = X.to_fp32(reg_conv3_relu, name="reg_conv3_fp32")
return reg_conv3_relu
def _init_pts(self, reg_feat):
p = self.p
point_conv_channel = p.head.point_conv_channel
pts_output_channel = p.point_generate.num_points * 2
pts_init_conv = X.conv(
data=reg_feat,
kernel=3,
filter=point_conv_channel,
weight=self.pts_init_conv_weight,
bias=self.pts_init_conv_bias,
no_bias=False,
name="pts_init_conv"
)
pts_init_conv_relu = X.relu(pts_init_conv)
pts_init_out = X.conv(
data=pts_init_conv_relu,
kernel=1,
filter=pts_output_channel,
weight=self.pts_init_out_weight,
bias=self.pts_init_out_bias,
no_bias=False,
name="pts_init_out"
)
return pts_init_out
def _refine_pts(self, cls_feat, reg_feat, dcn_offset, pts_init_out):
p = self.p
point_conv_channel = p.head.point_conv_channel
num_class = p.num_class
output_channel = num_class - 1
pts_output_channel = p.point_generate.num_points * 2
cls_conv = mx.symbol.contrib.DeformableConvolution(
data=cls_feat,
offset=dcn_offset,
kernel=(self.dcn_kernel, self.dcn_kernel),
pad=(self.dcn_pad, self.dcn_pad),
stride=(1, 1),
dilate=(1, 1),
num_filter=point_conv_channel,
weight=self.cls_conv_weight,
bias=self.cls_conv_bias,
no_bias=False,
name="cls_conv"
)
cls_conv_relu = X.relu(cls_conv)
cls_out = X.conv(
data=cls_conv_relu,
kernel=1,
filter=output_channel,
weight=self.cls_out_weight,
bias=self.cls_out_bias,
no_bias=False,
name="cls_out"
)
pts_refine_conv = mx.symbol.contrib.DeformableConvolution(
data=reg_feat,
offset=dcn_offset,
kernel=(self.dcn_kernel, self.dcn_kernel),
pad=(self.dcn_pad, self.dcn_pad),
stride=(1, 1),
dilate=(1, 1),
num_filter=point_conv_channel,
weight=self.pts_refine_conv_weight,
bias=self.pts_refine_conv_bias,
no_bias=False,
name="pts_refine_conv"
)
pts_refine_conv_relu = X.relu(pts_refine_conv)
pts_refine_out = X.conv(
data=pts_refine_conv_relu,
kernel=1,
filter=pts_output_channel,
weight=self.pts_refine_out_weight,
bias=self.pts_refine_out_bias,
no_bias=False,
name="pts_refine_out"
)
pts_refine_out = pts_refine_out + X.block_grad(pts_init_out)
return pts_refine_out, cls_out
def get_output(self, conv_feat):
if self._pts_out_inits is not None and self._pts_out_refines is not None and \
self._cls_outs is not None:
return self._pts_out_inits, self._pts_out_refines, self._cls_outs
p = self.p
stride = p.point_generate.stride
# init base offset for dcn
from models.RepPoints.point_ops import _gen_offsets
dcn_base_offset = _gen_offsets(
mx.symbol, dcn_kernel=self.dcn_kernel, dcn_pad=self.dcn_pad
)
pts_out_inits = dict()
pts_out_refines = dict()
cls_outs = dict()
for s in stride:
# cls subnet with shared params across multiple strides
cls_feat = self._cls_subnet(conv_feat=conv_feat["stride%s" % s], stride=s)
# reg subnet with shared params across multiple strides
reg_feat = self._reg_subnet(conv_feat=conv_feat["stride%s" % s], stride=s)
# predict offsets on each center points
pts_out_init = self._init_pts(reg_feat)
# grad multiples 0.1 for offsets subnet
pts_out_init_grad_mul = 0.9 * X.block_grad(pts_out_init) + 0.1 * pts_out_init
# dcn uses offsets on grids as input,
# thus the predicted offsets substract base dcn offsets here before using dcn.
pts_out_init_offset = mx.symbol.broadcast_sub(pts_out_init_grad_mul, dcn_base_offset)
# use offsets on features to refine box and cls
pts_out_refine, cls_out = self._refine_pts(
cls_feat,
reg_feat,
pts_out_init_offset,
pts_out_init
)
pts_out_inits["stride%s" % s] = pts_out_init
pts_out_refines["stride%s" % s] = pts_out_refine
cls_outs["stride%s" % s] = cls_out
self._pts_out_inits = pts_out_inits
self._pts_out_refines = pts_out_refines
self._cls_outs = cls_outs
return self._pts_out_inits, self._pts_out_refines, self._cls_outs
def get_loss(self, conv_feat, gt_bbox):
from models.RepPoints.point_ops import (
_gen_points, _offset_to_pts, _point_target, _offset_to_boxes, _points2bbox)
p = self.p
batch_image = p.batch_image
num_points = p.point_generate.num_points
scale = p.point_generate.scale
stride = p.point_generate.stride
transform = p.point_generate.transform
target_scale = p.point_target.target_scale
num_pos = p.point_target.num_pos
pos_iou_thr = p.bbox_target.pos_iou_thr
neg_iou_thr = p.bbox_target.neg_iou_thr
min_pos_iou = p.bbox_target.min_pos_iou
pts_out_inits, pts_out_refines, cls_outs = self.get_output(conv_feat)
points = dict()
bboxes = dict()
pts_coordinate_preds_inits = dict()
pts_coordinate_preds_refines = dict()
for s in stride:
# generate points on base coordinate according to stride and size of feature map
points["stride%s" % s] = _gen_points(mx.symbol, pts_out_inits["stride%s" % s], s)
# generate bbox after init stage
bboxes["stride%s" % s] = _offset_to_boxes(
mx.symbol,
points["stride%s" % s],
X.block_grad(pts_out_inits["stride%s" % s]),
s,
transform,
moment_transfer=self.moment_transfer
)
# generate final offsets in init stage
pts_coordinate_preds_inits["stride%s" % s] = _offset_to_pts(
mx.symbol,
points["stride%s" % s],
pts_out_inits["stride%s" % s],
s,
num_points
)
# generate final offsets in refine stage
pts_coordinate_preds_refines["stride%s" % s] = _offset_to_pts(
mx.symbol,
points["stride%s" % s],
pts_out_refines["stride%s" % s],
s,
num_points
)
# for init stage, use points assignment
point_proposals = mx.symbol.tile(
X.concat([points["stride%s" % s] for s in stride], axis=1, name="point_concat"),
reps=(batch_image, 1, 1)
)
points_labels_init, points_gts_init, points_weight_init = _point_target(
mx.symbol,
point_proposals,
gt_bbox,
batch_image,
"point",
scale=target_scale,
num_pos=num_pos
)
# for refine stage, use max iou assignment
box_proposals = X.concat(
[bboxes["stride%s" % s] for s in stride], axis=1, name="box_concat"
)
points_labels_refine, points_gts_refine, points_weight_refine = _point_target(
mx.symbol,
box_proposals,
gt_bbox,
batch_image,
"box",
pos_iou_thr=pos_iou_thr,
neg_iou_thr=neg_iou_thr,
min_pos_iou=min_pos_iou
)
bboxes_out_strides = dict()
for s in stride:
cls_outs["stride%s" % s] = X.reshape(
X.transpose(data=cls_outs["stride%s" % s], axes=(0, 2, 3, 1)),
(0, -3, -2)
)
bboxes_out_strides["stride%s" % s] = mx.symbol.repeat(mx.symbol.ones_like(
mx.symbol.slice_axis(cls_outs["stride%s" % s], begin=0, end=1, axis=-1)),
repeats=4, axis=-1) * s
# cls branch
cls_outs_concat = X.concat(
[cls_outs["stride%s" % s] for s in stride], axis=1, name="cls_concat"
)
cls_loss = X.focal_loss(
data=cls_outs_concat,
label=points_labels_refine,
normalization='valid',
alpha=p.focal_loss.alpha,
gamma=p.focal_loss.gamma,
grad_scale=1.0,
workspace=1500,
name="cls_loss"
)
# init box branch
pts_inits_concat_ = X.concat(
[pts_coordinate_preds_inits["stride%s" % s] for s in stride],
axis=1,
name="pts_init_concat_"
)
pts_inits_concat = X.reshape(pts_inits_concat_, (-3, -2), name="pts_inits_concat")
bboxes_inits_concat_ = _points2bbox(
mx.symbol,
pts_inits_concat,
transform,
y_first=False,
moment_transfer=self.moment_transfer
)
bboxes_inits_concat = X.reshape(bboxes_inits_concat_, (-4, batch_image, -1, -2))
normalize_term = X.concat(
[bboxes_out_strides["stride%s" % s] for s in stride], axis=1, name="normalize_term"
) * scale
pts_init_loss = X.smooth_l1(
data=(bboxes_inits_concat - points_gts_init) / normalize_term,
scalar=3.0,
name="pts_init_l1_loss"
)
pts_init_loss = pts_init_loss * points_weight_init
pts_init_loss = X.bbox_norm(
data=pts_init_loss,
label=points_labels_init,
name="pts_init_norm_loss"
)
pts_init_loss = X.make_loss(
data=pts_init_loss,
grad_scale=0.5,
name="pts_init_loss"
)
points_init_labels = X.block_grad(points_labels_refine, name="points_init_labels")
# refine box branch
pts_refines_concat_ = X.concat(
[pts_coordinate_preds_refines["stride%s" % s] for s in stride],
axis=1,
name="pts_refines_concat_"
)
pts_refines_concat = X.reshape(pts_refines_concat_, (-3, -2), name="pts_refines_concat")
bboxes_refines_concat_ = _points2bbox(
mx.symbol,
pts_refines_concat,
transform,
y_first=False,
moment_transfer=self.moment_transfer
)
bboxes_refines_concat = X.reshape(bboxes_refines_concat_, (-4, batch_image, -1, -2))
pts_refine_loss = X.smooth_l1(
data=(bboxes_refines_concat - points_gts_refine) / normalize_term,
scalar=3.0,
name="pts_refine_l1_loss"
)
pts_refine_loss = pts_refine_loss * points_weight_refine
pts_refine_loss = X.bbox_norm(
data=pts_refine_loss,
label=points_labels_refine,
name="pts_refine_norm_loss"
)
pts_refine_loss = X.make_loss(
data=pts_refine_loss,
grad_scale=1.0,
name="pts_refine_loss"
)
points_refine_labels = X.block_grad(points_labels_refine, name="point_refine_labels")
return cls_loss, pts_init_loss, pts_refine_loss, points_init_labels, points_refine_labels
def get_prediction(self, conv_feat, im_info):
from models.RepPoints.point_ops import _gen_points, _points2bbox
p = self.p
batch_image = p.batch_image
stride = p.point_generate.stride
transform = p.point_generate.transform
pre_nms_top_n = p.proposal.pre_nms_top_n
pts_out_inits, pts_out_refines, cls_outs = self.get_output(conv_feat)
cls_score_dict = dict()
bbox_xyxy_dict = dict()
for s in stride:
# NOTE: pre_nms_top_n_ is hard-coded as -1 because the number of proposals is less
# than pre_nms_top_n in these low-resolution feature maps. Also note that one should
# select the appropriate params here if using low-resolution images as input.
pre_nms_top_n_ = pre_nms_top_n if s <= 32 else -1
points_ = _gen_points(mx.symbol, pts_out_inits["stride%s" % s], s)
preds_refines_ = _points2bbox(
mx.symbol,
pts_out_refines["stride%s" % s],
transform,
moment_transfer=self.moment_transfer
)
preds_refines_ = X.reshape(
X.transpose(data=preds_refines_, axes=(0, 2, 3, 1)),
(0, -3, -2)
)
cls_ = X.reshape(
X.transpose(data=cls_outs["stride%s" % s], axes=(0, 2, 3, 1)),
(0, -3, -2)
)
scores_ = X.sigmoid(cls_)
max_scores_ = mx.symbol.max(scores_, axis=-1)
max_index_ = mx.symbol.topk(max_scores_, axis=1, k=pre_nms_top_n_)
scores_dict = dict()
bboxes_dict = dict()
for i in range(batch_image):
max_index_i = X.reshape(
mx.symbol.slice_axis(max_index_, axis=0, begin=i, end=i + 1), (-1,)
)
scores_i = X.reshape(
mx.symbol.slice_axis(scores_, axis=0, begin=i, end=i + 1), (-3, -2)
)
points_i = X.reshape(points_, (-3, -2))
preds_refines_i = X.reshape(
mx.symbol.slice_axis(preds_refines_, axis=0, begin=i, end=i + 1), (-3, -2)
)
scores_i = mx.symbol.take(scores_i, max_index_i)
points_i = mx.symbol.take(points_i, max_index_i)
preds_refines_i = mx.symbol.take(preds_refines_i, max_index_i)
points_i = mx.symbol.slice_axis(points_i, axis=-1, begin=0, end=2)
points_xyxy_i = X.concat(
[points_i, points_i], axis=-1, name="points_xyxy_b{}_s{}".format(i, s)
)
bboxes_i = preds_refines_i * s + points_xyxy_i
im_info_i = mx.symbol.slice_axis(im_info, axis=0, begin=i, end=i + 1)
h_i, w_i, _ = mx.symbol.split(im_info_i, num_outputs=3, axis=1)
l_i, t_i, r_i, b_i = mx.symbol.split(bboxes_i, num_outputs=4, axis=1)
clip_l_i = mx.symbol.maximum(mx.symbol.broadcast_minimum(l_i, w_i - 1.0), 0.0)
clip_t_i = mx.symbol.maximum(mx.symbol.broadcast_minimum(t_i, h_i - 1.0), 0.0)
clip_r_i = mx.symbol.maximum(mx.symbol.broadcast_minimum(r_i, w_i - 1.0), 0.0)
clip_b_i = mx.symbol.maximum(mx.symbol.broadcast_minimum(b_i, h_i - 1.0), 0.0)
clip_bboxes_i = X.concat(
[clip_l_i, clip_t_i, clip_r_i, clip_b_i],
axis=1,
name="clip_bboxes_b{}_s{}".format(i, s)
)
scores_dict["img%s" % i] = scores_i
bboxes_dict["img%s" % i] = clip_bboxes_i
cls_score_ = mx.symbol.stack(
*[scores_dict["img%s" % i] for i in range(batch_image)], axis=0
)
pad_zeros_ = mx.symbol.zeros_like(
mx.symbol.slice_axis(cls_score_, axis=-1, begin=0, end=1)
)
cls_score_ = X.concat([pad_zeros_, cls_score_], axis=-1, name="cls_score_s{}".format(s))
bboxes_ = mx.symbol.stack(
*[bboxes_dict["img%s" % i] for i in range(batch_image)], axis=0
)
cls_score_dict["stride%s" % s] = cls_score_
bbox_xyxy_dict["stride%s" % s] = bboxes_
cls_score = X.concat(
[cls_score_dict["stride%s" % s] for s in stride], axis=1, name="cls_score_concat"
)
bbox_xyxy = X.concat(
[bbox_xyxy_dict["stride%s" % s] for s in stride], axis=1, name="bbox_xyxy_concat"
)
return cls_score, bbox_xyxy
class RepPointsNeck(object):
def __init__(self, pNeck):
self.p = patch_config_as_nothrow(pNeck)
self.fpn_feat = None
def add_norm(self, sym):
p = self.p
if p.normalizer.__name__ == "fix_bn":
pass
elif p.normalizer.__name__ in ["sync_bn", "local_bn", "gn", "dummy"]:
sym = p.normalizer(sym)
else:
raise NotImplementedError("Unsupported normalizer: {}".format(p.normalizer.__name__))
return sym
def get_fpn_neck(self, data):
if self.fpn_feat is not None:
return self.fpn_feat
c2, c3, c4, c5 = data
xavier_init = mx.init.Xavier(factor_type="in", rnd_type="uniform", magnitude=3)
# P5
p5 = X.conv(
data=c5,
filter=256,
no_bias=False,
weight=X.var(name="P5_lateral_weight", init=xavier_init),
bias=X.var(name="P5_lateral_bias", init=X.zero_init()),
name="P5_lateral"
)
p5 = self.add_norm(p5)
p5_conv = X.conv(
data=p5,
kernel=3,
filter=256,
no_bias=False,
weight=X.var(name="P5_conv_weight", init=xavier_init),
bias=X.var(name="P5_conv_bias", init=X.zero_init()),
name="P5_conv"
)
p5_conv = self.add_norm(p5_conv)
# P4
p5_up = mx.sym.UpSampling(
p5,
scale=2,
sample_type="nearest",
name="P5_upsampling",
num_args=1
)
p4_la = X.conv(
data=c4,
filter=256,
no_bias=False,
weight=X.var(name="P4_lateral_weight", init=xavier_init),
bias=X.var(name="P4_lateral_bias", init=X.zero_init()),
name="P4_lateral"
)
p4_la = self.add_norm(p4_la)
p5_clip = mx.sym.slice_like(p5_up, p4_la, name="P4_clip")
p4 = mx.sym.add_n(p5_clip, p4_la, name="P4_sum")
p4_conv = X.conv(
data=p4,
kernel=3,
filter=256,
no_bias=False,
weight=X.var(name="P4_conv_weight", init=xavier_init),
bias=X.var(name="P4_conv_bias", init=X.zero_init()),
name="P4_conv"
)
p4_conv = self.add_norm(p4_conv)
# P3
p4_up = mx.sym.UpSampling(
p4,
scale=2,
sample_type="nearest",
name="P4_upsampling",
num_args=1
)
p3_la = X.conv(
data=c3,
filter=256,
no_bias=False,
weight=X.var(name="P3_lateral_weight", init=xavier_init),
bias=X.var(name="P3_lateral_bias", init=X.zero_init()),
name="P3_lateral"
)
p3_la = self.add_norm(p3_la)
p4_clip = mx.sym.slice_like(p4_up, p3_la, name="P3_clip")
p3 = mx.sym.add_n(p4_clip, p3_la, name="P3_sum")
p3_conv = X.conv(
data=p3,
kernel=3,
filter=256,
no_bias=False,
weight=X.var(name="P3_conv_weight", init=xavier_init),
bias=X.var(name="P3_conv_bias", init=X.zero_init()),
name="P3_conv"
)
p3_conv = self.add_norm(p3_conv)
# P6
p6 = X.conv(
data=p5_conv,
kernel=3,
stride=2,
filter=256,
no_bias=False,
weight=X.var(name="P6_conv_weight", init=xavier_init),
bias=X.var(name="P6_conv_bias", init=X.zero_init()),
name="P6_conv"
)
p6 = self.add_norm(p6)
# P7
p6_relu = X.relu(data=p6, name="P6_relu")
p7 = X.conv(
data=p6_relu,
kernel=3,
stride=2,
filter=256,
no_bias=False,
weight=X.var(name="P7_conv_weight", init=xavier_init),
bias=X.var(name="P7_conv_bias", init=X.zero_init()),
name="P7_conv"
)
p7 = self.add_norm(p7)
self.fpn_feat = dict(
stride8=p3_conv,
stride16=p4_conv,
stride32=p5_conv,
stride64=p6,
stride128=p7
)
return self.fpn_feat
def get_rpn_feature(self, rpn_feat):
return self.get_fpn_neck(rpn_feat)
def get_rcnn_feature(self, rcnn_feat):
return self.get_fpn_neck(rcnn_feat)
| 15,356 |
488 | <filename>src/haru/hpdf_encoder.h
/*
* << Haru Free PDF Library >> -- hpdf_encoder.h
*
* URL: http://libharu.org
*
* Copyright (c) 1999-2006 <NAME> <<EMAIL>>
* Copyright (c) 2007-2008 <NAME> <<EMAIL>>
*
* Permission to use, copy, modify, distribute and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and
* that both that copyright notice and this permission notice appear
* in supporting documentation.
* It is provided "as is" without express or implied warranty.
*
*/
#ifndef _HPDF_ENCODER_H
#define _HPDF_ENCODER_H
#include "hpdf_consts.h"
#include "hpdf_streams.h"
#ifdef __cplusplus
extern "C" {
#endif
/*-- HPDF_Encoder ---------------------------------------*/
#define HPDF_ENCODER_SIG_BYTES 0x454E4344L
/*----------------------------------------------------------------------------*/
/*------ predefined font encodings -------------------------------------------*/
#define HPDF_ENCODING_FONT_SPECIFIC "FontSpecific"
#define HPDF_ENCODING_STANDARD "StandardEncoding"
#define HPDF_ENCODING_MAC_ROMAN "MacRomanEncoding"
#define HPDF_ENCODING_WIN_ANSI "WinAnsiEncoding"
#define HPDF_ENCODING_ISO8859_2 "ISO8859-2"
#define HPDF_ENCODING_ISO8859_3 "ISO8859-3"
#define HPDF_ENCODING_ISO8859_4 "ISO8859-4"
#define HPDF_ENCODING_ISO8859_5 "ISO8859-5"
#define HPDF_ENCODING_ISO8859_6 "ISO8859-6"
#define HPDF_ENCODING_ISO8859_7 "ISO8859-7"
#define HPDF_ENCODING_ISO8859_8 "ISO8859-8"
#define HPDF_ENCODING_ISO8859_9 "ISO8859-9"
#define HPDF_ENCODING_ISO8859_10 "ISO8859-10"
#define HPDF_ENCODING_ISO8859_11 "ISO8859-11"
#define HPDF_ENCODING_ISO8859_13 "ISO8859-13"
#define HPDF_ENCODING_ISO8859_14 "ISO8859-14"
#define HPDF_ENCODING_ISO8859_15 "ISO8859-15"
#define HPDF_ENCODING_ISO8859_16 "ISO8859-16"
#define HPDF_ENCODING_CP1250 "CP1250"
#define HPDF_ENCODING_CP1251 "CP1251"
#define HPDF_ENCODING_CP1252 "CP1252"
#define HPDF_ENCODING_CP1253 "CP1253"
#define HPDF_ENCODING_CP1254 "CP1254"
#define HPDF_ENCODING_CP1255 "CP1255"
#define HPDF_ENCODING_CP1256 "CP1256"
#define HPDF_ENCODING_CP1257 "CP1257"
#define HPDF_ENCODING_CP1258 "CP1258"
#define HPDF_ENCODING_KOI8_R "KOI8-R"
/*----------------------------------------------------------------------------*/
/*----- definition for font encoding -----------------------------------------*/
#define char_NOTDEF ".notdef"
typedef enum _HPDF_EncodingType {
HPDF_STANDARD_ENCODING = 0,
HPDF_MAC_ROMAN_ENCODING,
HPDF_WIN_ANSI_ENCODING,
HPDF_FONT_SPECIFIC,
HPDF_ENCODING_EOF
} HPDF_EncodingType;
typedef struct _HPDF_ParseText_Rec {
const HPDF_BYTE *text;
HPDF_UINT index;
HPDF_UINT len;
HPDF_ByteType byte_type;
} HPDF_ParseText_Rec;
typedef struct _HPDF_Encoder_Rec *HPDF_Encoder;
typedef HPDF_ByteType
(*HPDF_Encoder_ByteType_Func) (HPDF_Encoder encoder,
HPDF_ParseText_Rec *state);
typedef HPDF_UNICODE
(*HPDF_Encoder_ToUnicode_Func) (HPDF_Encoder encoder,
HPDF_UINT16 code);
typedef HPDF_STATUS
(*HPDF_Encoder_Write_Func) (HPDF_Encoder encoder,
HPDF_Stream out);
typedef HPDF_STATUS
(*HPDF_Encoder_Init_Func) (HPDF_Encoder encoder);
typedef void
(*HPDF_Encoder_Free_Func) (HPDF_Encoder encoder);
typedef struct _HPDF_Encoder_Rec {
HPDF_UINT32 sig_bytes;
char name[HPDF_LIMIT_MAX_NAME_LEN + 1];
HPDF_MMgr mmgr;
HPDF_Error error;
HPDF_EncoderType type;
HPDF_Encoder_ByteType_Func byte_type_fn;
HPDF_Encoder_ToUnicode_Func to_unicode_fn;
HPDF_Encoder_Write_Func write_fn;
HPDF_Encoder_Free_Func free_fn;
HPDF_Encoder_Init_Func init_fn;
/*
char lang_code[3];
char country_code[3];
*/
void *attr;
} HPDF_Encoder_Rec;
typedef enum _HPDF_BaseEncodings {
HPDF_BASE_ENCODING_STANDARD,
HPDF_BASE_ENCODING_WIN_ANSI,
HPDF_BASE_ENCODING_MAC_ROMAN,
HPDF_BASE_ENCODING_FONT_SPECIFIC,
HPDF_BASE_ENCODING_EOF
} HPDF_BaseEncodings;
HPDF_STATUS
HPDF_Encoder_Validate (HPDF_Encoder encoder);
void
HPDF_Encoder_SetParseText (HPDF_Encoder encoder,
HPDF_ParseText_Rec *state,
const HPDF_BYTE *text,
HPDF_UINT len);
HPDF_ByteType
HPDF_Encoder_ByteType (HPDF_Encoder encoder,
HPDF_ParseText_Rec *state);
HPDF_UNICODE
HPDF_Encoder_ToUnicode (HPDF_Encoder encoder,
HPDF_UINT16 code);
void
HPDF_Encoder_Free (HPDF_Encoder encoder);
/*-- HPDF_BasicEncoder ----------------------------------*/
typedef struct _HPDF_BasicEncoderAttr_Rec *HPDF_BasicEncoderAttr;
typedef struct _HPDF_BasicEncoderAttr_Rec {
char base_encoding[HPDF_LIMIT_MAX_NAME_LEN + 1];
HPDF_BYTE first_char;
HPDF_BYTE last_char;
HPDF_UNICODE unicode_map[256];
HPDF_BOOL has_differences;
HPDF_BYTE differences[256];
} HPDF_BasicEncoderAttr_Rec;
HPDF_Encoder
HPDF_BasicEncoder_New (HPDF_MMgr mmgr,
const char *encoding_name);
void
HPDF_BasicEncoder_Free (HPDF_Encoder encoder);
HPDF_STATUS
HPDF_BasicEncoder_Write (HPDF_Encoder encoder,
HPDF_Stream out);
HPDF_UNICODE
HPDF_BasicEncoder_ToUnicode (HPDF_Encoder encoder,
HPDF_UINT16 code);
/*-- HPDF_CMapEncoder ----------------------------------*/
typedef HPDF_BOOL
(*HPDF_CMapEncoder_ByteType_Func) (HPDF_Encoder encoder,
HPDF_BYTE b);
typedef struct _HPDF_CidRange_Rec {
HPDF_UINT16 from;
HPDF_UINT16 to;
HPDF_UINT16 cid;
} HPDF_CidRange_Rec;
typedef struct _HPDF_UnicodeMap_Rec {
HPDF_UINT16 code;
HPDF_UINT16 unicode;
} HPDF_UnicodeMap_Rec;
typedef struct _HPDF_CMapEncoderAttr_Rec *HPDF_CMapEncoderAttr;
typedef struct _HPDF_CMapEncoderAttr_Rec {
HPDF_UNICODE unicode_map[256][256];
HPDF_UINT16 cid_map[256][256];
HPDF_UINT16 jww_line_head[HPDF_MAX_JWW_NUM];
HPDF_List cmap_range;
HPDF_List notdef_range;
HPDF_List code_space_range;
HPDF_WritingMode writing_mode;
char registry[HPDF_LIMIT_MAX_NAME_LEN + 1];
char ordering[HPDF_LIMIT_MAX_NAME_LEN + 1];
HPDF_INT suppliment;
HPDF_CMapEncoder_ByteType_Func is_lead_byte_fn;
HPDF_CMapEncoder_ByteType_Func is_trial_byte_fn;
HPDF_INT uid_offset;
HPDF_UINT xuid[3];
} HPDF_CMapEncoderAttr_Rec;
HPDF_Encoder
HPDF_CMapEncoder_New (HPDF_MMgr mmgr,
char *name,
HPDF_Encoder_Init_Func init_fn);
HPDF_STATUS
HPDF_CMapEncoder_InitAttr (HPDF_Encoder encoder);
void
HPDF_CMapEncoder_Free (HPDF_Encoder encoder);
HPDF_STATUS
HPDF_CMapEncoder_Write (HPDF_Encoder encoder,
HPDF_Stream out);
HPDF_UNICODE
HPDF_CMapEncoder_ToUnicode (HPDF_Encoder encoder,
HPDF_UINT16 code);
HPDF_UINT16
HPDF_CMapEncoder_ToCID (HPDF_Encoder encoder,
HPDF_UINT16 code);
HPDF_STATUS
HPDF_CMapEncoder_SetParseText (HPDF_Encoder encoder,
HPDF_ParseText_Rec *state,
const HPDF_BYTE *text,
HPDF_UINT len);
HPDF_ByteType
HPDF_CMapEncoder_ByteType (HPDF_Encoder encoder,
HPDF_ParseText_Rec *state);
HPDF_STATUS
HPDF_CMapEncoder_AddCMap (HPDF_Encoder encoder,
const HPDF_CidRange_Rec *range);
HPDF_STATUS
HPDF_CMapEncoder_AddNotDefRange (HPDF_Encoder encoder,
HPDF_CidRange_Rec range);
HPDF_STATUS
HPDF_CMapEncoder_AddCodeSpaceRange (HPDF_Encoder encoder,
HPDF_CidRange_Rec range);
void
HPDF_CMapEncoder_SetUnicodeArray (HPDF_Encoder encoder,
const HPDF_UnicodeMap_Rec *array1);
HPDF_STATUS
HPDF_CMapEncoder_AddJWWLineHead (HPDF_Encoder encoder,
const HPDF_UINT16 *code);
HPDF_BOOL
HPDF_Encoder_CheckJWWLineHead (HPDF_Encoder encoder,
const HPDF_UINT16 code);
/*-- utility functions ----------------------------------*/
const char*
HPDF_UnicodeToGryphName (HPDF_UNICODE unicode);
HPDF_UNICODE
HPDF_GryphNameToUnicode (const char *gryph_name);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _HPDF_ENCODER_H */
| 5,059 |
2,296 | <filename>src/ir_MagiQuest.hpp
/*
* ir_MagiQuest.hpp
*
* Contains functions for receiving and sending LG IR Protocol in "raw" and standard format with 16 or 8 bit address and 8 bit command
* Based off the Magiquest fork of Arduino-IRremote by mpflaga https://github.com/mpflaga/Arduino-IRremote/
*
* This file is part of Arduino-IRremote https://github.com/Arduino-IRremote/Arduino-IRremote.
*
************************************************************************************
* MIT License
*
* Copyright (c) 2017-2021 <NAME> <<EMAIL>>, <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
************************************************************************************
*/
#ifndef _IR_MAGIQUEST_HPP
#define _IR_MAGIQUEST_HPP
#include <Arduino.h>
#include "IRremoteInt.h" // evaluates the DEBUG for IR_DEBUG_PRINT
//
//==============================================================================
//
// M A G I Q U E S T
//
//==============================================================================
// MSB first, 8 Start bits (zero), 32 wand id bits, 16 magnitude bits, one stop bit
// Not all start bits must be received, since protocol is MSB first and so the LSB ends up always at the right position.
#if !defined (DOXYGEN)
// MagiQuest packet is both Wand ID and magnitude of swish and flick
union magiquest_t {
uint64_t llword;
struct {
uint16_t magnitude;
uint32_t wand_id;
uint8_t StartBits; // first 8 MSB start bits are zero.
uint8_t HighByte; // just to pad the struct out to 64 bits so we can union with llword
} cmd;
};
#endif // !defined (DOXYGEN)
#define MAGIQUEST_MAGNITUDE_BITS 16 // magiquest_t.cmd.magnitude
#define MAGIQUEST_WAND_ID_BITS 32 // magiquest_t.cmd.wand_id
#define MAGIQUEST_START_BITS 8 // magiquest_t.cmd.StartBits
#define MAGIQUEST_PERIOD 1150 // Time for a full MagiQuest "bit" (1100 - 1200 usec)
#define MAGIQUEST_BITS (MAGIQUEST_MAGNITUDE_BITS + MAGIQUEST_WAND_ID_BITS) // 48 Size of the command without the start bits
// The maximum size of a packet is the sum of all 3 expected fields * 2
#define MAGIQUEST_PACKET_SIZE (MAGIQUEST_MAGNITUDE_BITS + MAGIQUEST_WAND_ID_BITS + MAGIQUEST_START_BITS) // 56
/*
* 0 = 25% mark & 75% space across 1 period
* 1150 * 0.25 = 288 usec mark
* 1150 - 288 = 862 usec space
* 1 = 50% mark & 50% space across 1 period
* 1150 * 0.5 = 575 usec mark
* 1150 - 575 = 575 usec space
*/
#define MAGIQUEST_UNIT (MAGIQUEST_PERIOD / 4)
#define MAGIQUEST_ONE_MARK (2 * MAGIQUEST_UNIT) // 576
#define MAGIQUEST_ONE_SPACE (2 * MAGIQUEST_UNIT) // 576
#define MAGIQUEST_ZERO_MARK MAGIQUEST_UNIT
#define MAGIQUEST_ZERO_SPACE (3 * MAGIQUEST_UNIT) // 864
//+=============================================================================
//
void IRsend::sendMagiQuest(uint32_t wand_id, uint16_t magnitude) {
// Set IR carrier frequency
enableIROut(38);
// 8 start bits
sendPulseDistanceWidthData(
MAGIQUEST_ONE_MARK, MAGIQUEST_ONE_SPACE, MAGIQUEST_ZERO_MARK, MAGIQUEST_ZERO_SPACE, 0, 8, PROTOCOL_IS_MSB_FIRST);
// Data
sendPulseDistanceWidthData(
MAGIQUEST_ONE_MARK, MAGIQUEST_ONE_SPACE, MAGIQUEST_ZERO_MARK, MAGIQUEST_ZERO_SPACE, wand_id, MAGIQUEST_WAND_ID_BITS,
PROTOCOL_IS_MSB_FIRST);
sendPulseDistanceWidthData(
MAGIQUEST_ONE_MARK, MAGIQUEST_ONE_SPACE, MAGIQUEST_ZERO_MARK, MAGIQUEST_ZERO_SPACE, magnitude, MAGIQUEST_MAGNITUDE_BITS,
PROTOCOL_IS_MSB_FIRST,
SEND_STOP_BIT);
IrReceiver.restartAfterSend();
}
//+=============================================================================
//
/*
* decodes a 56 bit result, which is not really compatible with standard decoder layout
*/
bool IRrecv::decodeMagiQuest() {
magiquest_t data; // Somewhere to build our code
unsigned int tOffset = 1; // Skip the gap between packets
unsigned int tMark;
unsigned int tSpace;
#if defined(DEBUG)
char bitstring[(MAGIQUEST_PACKET_SIZE + 1)];
bitstring[MAGIQUEST_PACKET_SIZE] = '\0';
#endif
// Check we have the right amount of data, magnitude and ID bits and at least 2 start bits + 1 stop bit
if (decodedIRData.rawDataPtr->rawlen < (2 * (MAGIQUEST_BITS + 3))
|| decodedIRData.rawDataPtr->rawlen > (2 * (MAGIQUEST_PACKET_SIZE + 1))) {
IR_DEBUG_PRINT(F("MagiQuest: "));
IR_DEBUG_PRINT(F("Data length="));
IR_DEBUG_PRINT(decodedIRData.rawDataPtr->rawlen);
IR_DEBUG_PRINTLN(F(" is not between 102 and 114"));
return false;
}
// Read the bits in
data.llword = 0;
while (tOffset < (unsigned int) (decodedIRData.rawDataPtr->rawlen - 1)) {
// get one mark and space pair
tMark = decodedIRData.rawDataPtr->rawbuf[tOffset++];
tSpace = decodedIRData.rawDataPtr->rawbuf[tOffset++];
IR_TRACE_PRINT(F("MagiQuest: mark="));
IR_TRACE_PRINT(tMark * MICROS_PER_TICK);
IR_TRACE_PRINT(F(" space="));
IR_TRACE_PRINTLN(tSpace * MICROS_PER_TICK);
if (matchMark(tSpace + tMark, MAGIQUEST_PERIOD)) {
if (tSpace > tMark) {
// It's a 0
data.llword <<= 1;
#if defined(DEBUG)
bitstring[(tOffset / 2) - 1] = '0';
#endif
} else {
// It's a 1
data.llword = (data.llword << 1) | 1;
#if defined(DEBUG)
bitstring[(tOffset / 2) - 1] = '1';
#endif
}
} else {
IR_DEBUG_PRINTLN(F("Mark and space does not match the constant MagiQuest period"));
return false;
}
}
IR_DEBUG_PRINTLN(bitstring);
// Success
decodedIRData.protocol = MAGIQUEST;
decodedIRData.numberOfBits = tOffset / 2;
decodedIRData.flags = IRDATA_FLAGS_EXTRA_INFO | IRDATA_FLAGS_IS_MSB_FIRST;
decodedIRData.extra = data.cmd.magnitude;
decodedIRData.decodedRawData = data.cmd.wand_id;
return true;
}
#endif // _IR_MAGIQUEST_HPP
| 2,682 |
643 | // Generated automatically from org.apache.logging.log4j.util.StringMap for testing purposes
package org.apache.logging.log4j.util;
import org.apache.logging.log4j.util.ReadOnlyStringMap;
public interface StringMap extends ReadOnlyStringMap
{
boolean equals(Object p0);
boolean isFrozen();
int hashCode();
void clear();
void freeze();
void putAll(ReadOnlyStringMap p0);
void putValue(String p0, Object p1);
void remove(String p0);
}
| 160 |
558 | <filename>syncer-jedis/src/main/java/syncer/jedis/commands/BasicCommands.java
package syncer.jedis.commands;
import syncer.jedis.DebugParams;
public interface BasicCommands {
/**
* This command is often used to test if a connection is still alive, or to measure latency.
*
* @return PONG
*/
String ping();
/**
* Ask the server to close the connection. The connection is closed as soon as all pending replies have been written to the client.
* @return OK
*/
String quit();
/**
* Delete all the keys of the currently selected DB. This command never fails.
The time-complexity for this operation is O(N), N being the number of keys in the database.
* @return OK
*/
String flushDB();
/**
* Return the number of keys in the currently-selected database.
* @return the number of key in the currently-selected database.
*/
Long dbSize();
/**
* Select the DB with having the specified zero-based numeric index.
* @param index the index
* @return a simple string reply OK
*/
String select(int index);
/**
* This command swaps two Redis databases, so that immediately all the clients connected to a
* given database will see the data of the other database, and the other way around.
* @param index1
* @param index2
* @return Simple string reply: OK if SWAPDB was executed correctly.
*/
String swapDB(int index1, int index2);
/**
* Delete all the keys of all the existing databases, not just the currently selected one.
* @return a simple string reply (OK)
*/
String flushAll();
/**
* Request for authentication in a password-protected Redis server. Redis can be instructed to require a password before allowing clients to execute commands. This is done using the requirepass directive in the configuration file.
If password matches the password in the configuration file, the server replies with the OK status code and starts accepting commands. Otherwise, an error is returned and the clients needs to try a new password.
* @param password
* @return the result of the auth
*/
String auth(String password);
/**
* The SAVE commands performs a synchronous save of the dataset producing a point in time snapshot of all the data inside the Redis instance, in the form of an RDB file.
You almost never want to call SAVE in production environments where it will block all the other clients. Instead usually BGSAVE is used. However in case of issues preventing Redis to create the background saving child (for instance errors in the fork(2) system call), the SAVE command can be a good last resort to perform the dump of the latest dataset.
* @return result of the save
*/
String save();
/**
* Save the DB in background. The OK code is immediately returned. Redis forks, the parent continues to serve the clients, the child saves the DB on disk then exits. A client may be able to check if the operation succeeded using the LASTSAVE command.
* @return ok
*/
String bgsave();
/**
* Instruct Redis to start an Append Only File rewrite process. The rewrite will create a small optimized version of the current Append Only File
* If BGREWRITEAOF fails, no data gets lost as the old AOF will be untouched.
The rewrite will be only triggered by Redis if there is not already a background process doing persistence. Specifically:
If a Redis child is creating a snapshot on disk, the AOF rewrite is scheduled but not started until the saving child producing the RDB file terminates. In this case the BGREWRITEAOF will still return an OK code, but with an appropriate message. You can check if an AOF rewrite is scheduled looking at the INFO command as of Redis 2.6.
If an AOF rewrite is already in progress the command returns an error and no AOF rewrite will be scheduled for a later time.
Since Redis 2.4 the AOF rewrite is automatically triggered by Redis, however the BGREWRITEAOF command can be used to trigger a rewrite at any time.
* @return the response of the command
*/
String bgrewriteaof();
/**
* Return the UNIX TIME of the last DB save executed with success.
* @return the unix latest save
*/
Long lastsave();
/**
* Stop all the client. Perform a SAVE (if one save point is configured).
* Flush the append only file if AOF is enabled
* quit the server
* @return only in case of error.
*/
String shutdown();
/**
* The INFO command returns information and statistics about the server in a format that is simple to parse by computers and easy to read by humans.
* @return information on the server
*/
String info();
/**
* The INFO command returns information and statistics about the server in a format that is simple to parse by computers and easy to read by humans.
* @param section (all: Return all sections, default: Return only the default set of sections, server: General information about the Redis server, clients: Client connections section, memory: Memory consumption related information, persistence: RDB and AOF related information, stats: General statistics, replication: Master/slave replication information, cpu: CPU consumption statistics, commandstats: Redis command statistics, cluster: Redis Cluster section, keyspace: Database related statistics)
* @return
*/
String info(String section);
/**
* The SLAVEOF command can change the replication settings of a slave on the fly. In the proper form SLAVEOF hostname port will make the server a slave of another server listening at the specified hostname and port.
* If a server is already a slave of some master, SLAVEOF hostname port will stop the replication against the old server and start the synchronization against the new one, discarding the old dataset.
* @param host listening at the specified hostname
* @param port server listening at the specified port
* @return result of the command.
*/
String slaveof(String host, int port);
/**
* SLAVEOF NO ONE will stop replication, turning the server into a MASTER, but will not discard the replication. So, if the old master stops working, it is possible to turn the slave into a master and set the application to use this new master in read/write. Later when the other Redis server is fixed, it can be reconfigured to work as a slave.
* @return result of the command
*/
String slaveofNoOne();
/**
* Return the index of the current database
* @return the int of the index database.
*/
int getDB();
String debug(DebugParams params);
String configResetStat();
String configRewrite();
/**
* Blocks until all the previous write commands are successfully transferred and acknowledged by
* at least the specified number of replicas.
* If the timeout, specified in milliseconds, is reached, the command returns
* even if the specified number of replicas were not yet reached.
*
* @param replicas successfully transferred and acknowledged by at least the specified number of replicas
* @param timeout the time to block in milliseconds, a timeout of 0 means to block forever
* @return the number of replicas reached by all the writes performed in the context of the current connection
*/
Long waitReplicas(int replicas, long timeout);
}
| 1,868 |
563 | <gh_stars>100-1000
package com.gentics.mesh.rest.client;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.gentics.mesh.json.JsonUtil;
import java.io.IOException;
/**
* Represents an event from the Mesh eventbus.
*/
public class EventbusEvent {
private final String address;
private final JsonNode body;
/**
* Parses a websocket text frame.
* @param rawText
* @throws IOException
*/
public EventbusEvent(String rawText) throws IOException {
ObjectNode parsed = (ObjectNode) JsonUtil.getMapper().readTree(rawText);
address = parsed.get("address").textValue();
body = parsed.get("body");
}
/**
* Get the address of the event
* @return
*/
public String getAddress() {
return address;
}
/**
* Get the body of the event
* @return
*/
public Object getBody() {
return body;
}
/**
* Tries to get the body of the event as a string. Returns null if the body is not a string.
* @return
*/
public String getBodyAsString() {
if (body.isTextual()) {
return body.textValue();
} else {
return null;
}
}
/**
* Tries to get the body of the event as a Json object. Returns null if the body is not an object.
* @return
*/
public ObjectNode getBodyAsJson() {
if (body instanceof ObjectNode) {
return (ObjectNode) body;
} else {
return null;
}
}
}
| 490 |
386 | <reponame>bradleyhenke/cortex<gh_stars>100-1000
//////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Image Engine Design nor the names of any
// other contributors to this software may be used to endorse or
// promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////
#ifndef IECOREPYTHON_OPBINDING_H
#define IECOREPYTHON_OPBINDING_H
#include "IECorePython/Export.h"
#include "IECorePython/RunTimeTypedBinding.h"
#include "IECore/CompoundParameter.h"
namespace IECorePython
{
/// A class to simplify the binding of Op derived classes.
template<typename T, typename TWrapper=T>
class OpClass : public IECorePython::RunTimeTypedClass<T, TWrapper>
{
public :
OpClass( const char *docString = nullptr )
: IECorePython::RunTimeTypedClass<T, TWrapper>( docString )
{
}
};
/// A class for wrapping Ops to allow overriding in Python.
template<typename T>
class OpWrapper : public IECorePython::RunTimeTypedWrapper<T>
{
public :
OpWrapper( PyObject *self, const std::string &description )
: IECorePython::RunTimeTypedWrapper<T>( self, description )
{
}
OpWrapper( PyObject *self, const std::string &description, IECore::ParameterPtr resultParameter )
: IECorePython::RunTimeTypedWrapper<T>( self, description, resultParameter )
{
}
OpWrapper( PyObject *self, const std::string &description, IECore::CompoundParameterPtr compoundParameter, IECore::ParameterPtr resultParameter )
: IECorePython::RunTimeTypedWrapper<T>( self, description, compoundParameter, resultParameter )
{
}
IECore::ObjectPtr doOperation( const IECore::CompoundObject * operands ) override
{
ScopedGILLock gilLock;
boost::python::object o = this->methodOverride( "doOperation" );
if( o )
{
IECore::ObjectPtr r = boost::python::extract<IECore::ObjectPtr>( o( IECore::CompoundObjectPtr( const_cast<IECore::CompoundObject *>( operands ) ) ) );
if( !r )
{
throw IECore::Exception( "doOperation() python method didn't return an Object." );
}
return r;
}
else
{
throw IECore::Exception( "doOperation() python method not defined" );
}
}
};
IECOREPYTHON_API void bindOp();
}
#endif // IECOREPYTHON_OPBINDING_H
| 1,245 |
854 | <reponame>rakhi2001/ecom7
__________________________________________________________________________________________________
sample 2 ms submission
class Solution {
public int largest1BorderedSquare(int[][] grid) {
int[][] toright = new int[grid.length][grid[0].length];
int[][] tobottom = new int[grid.length][grid[0].length];
for (int i = 0; i < grid[0].length; i++){
int temp = 0;
for(int j = 0; j < grid.length; j++){
if (grid[j][i] == 0) temp = 0;
else {
temp += 1;
tobottom[j][i] = temp;
}
}
}
int side = 0;
for (int j = 0; j < grid.length; j++){
int temp = 0;
for(int i = 0; i < grid[0].length; i++){
if (grid[j][i] == 0) temp = 0;
else {
temp += 1;
toright[j][i] = temp;
int temp_side = temp;
while(temp_side > side){
if (tobottom[j][i] >= temp_side
&& j - temp_side +1 >= 0 && toright[j - temp_side+1][i] >= temp_side
&& i-temp_side +1 >= 0 && tobottom[j][i-temp_side+1] >= temp_side){
side = temp_side;
}
temp_side--;
}
}
//System.out.println("bottom"+tobottom[j][i]);
//System.out.println(side);
}
}
return side*side;
}
}
__________________________________________________________________________________________________
sample 3 ms submission
class Solution {
public int largest1BorderedSquare(int[][] grid) {
int n = grid.length, m = grid[0].length;
int[][] hor = new int[n][m];
int[][] ver = new int[n][m];
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
if (grid[i][j] == 0) {
hor[i][j] = ver[i][j] = 0;
} else {
hor[i][j] = (j == 0) ? 1 : hor[i][j - 1] + 1;
ver[i][j] = (i == 0) ? 1 : ver[i - 1][j] + 1;
}
}
}
int max = 0;
for (int i = n - 1; i >= 0; i--) {
for (int j = m - 1; j >= 0; j--) {
int cur = Math.min(hor[i][j], ver[i][j]);
while (cur > max) {
if (ver[i][j - cur + 1] >= cur && hor[i - cur + 1][j] >= cur) {
max = Math.max(max, cur);
}
cur--;
}
}
}
return max * max;
}
}
__________________________________________________________________________________________________
| 1,652 |
5,169 | <gh_stars>1000+
{
"name": "WaveTab",
"version": "1.0.0",
"summary": "A wave UITabBarController.",
"description": "A UITabBarController that moves like a wave as the user switches between tabs",
"homepage": "https://github.com/mapierce/WaveTab",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/mapierce/WaveTab",
"tag": "1.0.0"
},
"social_media_url": "https://twitter.com/PierceMatthew",
"platforms": {
"ios": "10.0"
},
"source_files": "WaveTab/Classes/**/*",
"swift_versions": "4.2",
"frameworks": "UIKit",
"swift_version": "4.2"
}
| 279 |
416 | from netaddr import valid_mac, valid_eui64
def test_valid_mac():
assert valid_mac('00-B0-D0-86-BB-F7')
assert not valid_mac('00-1B-77-49-54-FD-12-34')
def test_valid_eui64():
assert valid_eui64('00-1B-77-49-54-FD-12-34')
assert not valid_eui64('00-B0-D0-86-BB-F7')
| 140 |
460 | #include "../../../src/xmlpatterns/schema/qxsdschemaresolver_p.h"
| 28 |
724 | <reponame>This-50m/vega
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Manage logging states and loggers."""
import os
import time
import copy
import logging
import logging.config
from modnas.utils.config import merge_config
from logging import Logger
from typing import Optional, Dict, Any
DEFAULT_LOGGING_CONF = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(asctime)s - %(name)s - %(message)s',
}
},
'handlers': {
'stream': {
'class': 'logging.StreamHandler',
'formatter': 'default',
},
'file': {
'class': 'logging.FileHandler',
'formatter': 'default',
'filename': None,
}
},
'loggers': {
'modnas': {
'handlers': ['stream', 'file'],
'level': 'INFO',
'propagate': False,
},
}
}
def get_logger(name: Optional[str] = None) -> Logger:
"""Return logger of given name."""
root = 'modnas'
return logging.getLogger(root if name is None else (name if name.startswith(root) else root + '.' + name))
def configure_logging(config: Optional[Dict[str, Any]] = None, log_dir: Optional[str] = None) -> None:
"""Config loggers."""
config_fn = logging.config.dictConfig
conf: Dict[str, Any] = copy.deepcopy(DEFAULT_LOGGING_CONF)
conf['handlers']['file']['filename'] = os.path.join(log_dir or '', '%d.log' % (int(time.time())))
merge_config(conf, config or {})
config_fn(conf)
def logged(obj, name=None):
"""Return object with logger attached."""
obj.logger = get_logger(name or obj.__module__)
return obj
| 834 |
3,428 | <gh_stars>1000+
{"id":"02201","group":"easy-ham-1","checksum":{"type":"MD5","value":"1eb2f7b1a998eb4e08c50f05312b48bd"},"text":"From <EMAIL> Thu Oct 3 12:24:10 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>ass<EMAIL>int.org\nReceived: from localhost (jalapeno [127.0.0.1])\n\tby jmason.org (Postfix) with ESMTP id AAC2316F20\n\tfor <jm@localhost>; Thu, 3 Oct 2002 12:23:19 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Thu, 03 Oct 2002 12:23:19 +0100 (IST)\nReceived: from dogma.slashnull.org (localhost [127.0.0.1]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g9381uK20003 for\n <<EMAIL>>; Thu, 3 Oct 2002 09:01:56 +0100\nMessage-Id: <<EMAIL>>\nTo: yyyy<EMAIL>ass<EMAIL>int.org\nFrom: gamasutra <<EMAIL>>\nSubject: Shader Integration: Merging Shading Technologies on the Nintendo\n Gamecube\nDate: Thu, 03 Oct 2002 08:01:56 -0000\nContent-Type: text/plain; encoding=utf-8\n\nURL: http://www.newsisfree.com/click/-4,8494551,159/\nDate: 2002-10-03T05:50:07+01:00\n\nRendering polygons is not as easy as it used to be, the reason being the vast \namount of different rendering techniques, methods and algorithms available. Not \nonly is choosing the right ones a problem, even worse, all that are selected \nneed to work together. There are some algorithms which simply do not work \ntogether, and in that case, only one may be used whilt the other one needs to \nbe replaced witha more compatible method. This feature explores how Factor 5 \napproached the problem when developing Rogue Leader for Gamecube.\n\n\n"} | 594 |
320 | <filename>MoPubSDKTests/MPVASTCompanionAdViewDelegateHandler.h
//
// MPVASTCompanionAdViewDelegateHandler.h
//
// Copyright 2018-2021 Twitter, Inc.
// Licensed under the MoPub SDK License Agreement
// http://www.mopub.com/legal/sdk-license-agreement/
//
#import <Foundation/Foundation.h>
#import "MPVASTCompanionAdView.h"
NS_ASSUME_NONNULL_BEGIN
@interface MPVASTCompanionAdViewDelegateHandler : NSObject <MPVASTCompanionAdViewDelegate>
@property (nonatomic, copy, nullable) UIViewController * (^viewControllerForPresentingModalMRAIDExpandedViewBlock)(void);
@property (nonatomic, copy, nullable) void (^companionAdViewDidTriggerEventBlock)(MPVASTCompanionAdView *companionAdView, MPVASTResourceViewEvent event);
@property (nonatomic, copy, nullable) void (^companionAdViewDidTriggerOverridingClickThroughBlock)(MPVASTCompanionAdView *companionAdView, NSURL *url);
@property (nonatomic, copy, nullable) void (^companionAdViewRequestDismissBlock)(MPVASTCompanionAdView *companionAdView);
@end
NS_ASSUME_NONNULL_END
| 355 |
30,023 | """Provides the ezviz DataUpdateCoordinator."""
from datetime import timedelta
import logging
from async_timeout import timeout
from pyezviz.client import EzvizClient
from pyezviz.exceptions import HTTPError, InvalidURL, PyEzvizError
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
class EzvizDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Ezviz data."""
def __init__(
self, hass: HomeAssistant, *, api: EzvizClient, api_timeout: int
) -> None:
"""Initialize global Ezviz data updater."""
self.ezviz_client = api
self._api_timeout = api_timeout
update_interval = timedelta(seconds=30)
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
def _update_data(self) -> dict:
"""Fetch data from Ezviz via camera load function."""
return self.ezviz_client.load_cameras()
async def _async_update_data(self) -> dict:
"""Fetch data from Ezviz."""
try:
async with timeout(self._api_timeout):
return await self.hass.async_add_executor_job(self._update_data)
except (InvalidURL, HTTPError, PyEzvizError) as error:
raise UpdateFailed(f"Invalid response from API: {error}") from error
| 544 |
852 | <filename>L1Trigger/L1THGCalUtilities/python/caloTruthCells_cff.py
import FWCore.ParameterSet.Config as cms
from L1Trigger.L1THGCalUtilities.caloTruthCellsProducer_cfi import caloTruthCellsProducer
caloTruthCells = cms.Sequence(caloTruthCellsProducer)
if caloTruthCellsProducer.makeCellsCollection:
## cluster and tower sequence
from L1Trigger.L1THGCal.hgcalConcentratorProducer_cfi import hgcalConcentratorProducer
from L1Trigger.L1THGCal.hgcalBackEndLayer1Producer_cfi import hgcalBackEndLayer1Producer
from L1Trigger.L1THGCal.hgcalBackEndLayer2Producer_cfi import hgcalBackEndLayer2Producer
from L1Trigger.L1THGCal.hgcalTowerMapProducer_cfi import hgcalTowerMapProducer
from L1Trigger.L1THGCal.hgcalTowerProducer_cfi import hgcalTowerProducer
hgcalTruthConcentratorProducer = hgcalConcentratorProducer.clone(
InputTriggerCells = cms.InputTag('caloTruthCellsProducer')
)
hgcalTruthBackEndLayer1Producer = hgcalBackEndLayer1Producer.clone(
InputTriggerCells = cms.InputTag('hgcalTruthConcentratorProducer:HGCalConcentratorProcessorSelection')
)
hgcalTruthBackEndLayer2Producer = hgcalBackEndLayer2Producer.clone(
InputCluster = cms.InputTag('hgcalTruthBackEndLayer1Producer:HGCalBackendLayer1Processor2DClustering')
)
hgcalTruthTowerMapProducer = hgcalTowerMapProducer.clone(
InputTriggerCells = cms.InputTag('caloTruthCellsProducer')
)
hgcalTruthTowerProducer = hgcalTowerProducer.clone(
InputTowerMaps = cms.InputTag('hgcalTruthTowerMapProducer:HGCalTowerMapProcessor')
)
caloTruthCells += cms.Sequence(
hgcalTruthConcentratorProducer *
hgcalTruthBackEndLayer1Producer *
hgcalTruthBackEndLayer2Producer *
hgcalTruthTowerMapProducer *
hgcalTruthTowerProducer
)
| 766 |
5,079 | import unittest
from nose.plugins.base import IPluginInterface
class TestPluginInterfaces(unittest.TestCase):
def test_api_methods_present(self):
from nose.loader import TestLoader
from nose.selector import Selector
exclude = [ 'loadTestsFromGenerator',
'loadTestsFromGeneratorMethod'
]
selfuncs = [ f for f in dir(Selector)
if f.startswith('want') ]
loadfuncs = [ f for f in dir(TestLoader)
if f.startswith('load') and not f in exclude ]
others = ['addDeprecated', 'addError', 'addFailure',
'addSkip', 'addSuccess', 'startTest', 'stopTest',
'prepareTest', 'begin', 'report'
]
expect = selfuncs + loadfuncs + others
pd = dir(IPluginInterface)
for f in expect:
assert f in pd, "No %s in IPluginInterface" % f
assert getattr(IPluginInterface, f).__doc__, \
"No docs for %f in IPluginInterface" % f
def test_no_instantiate(self):
try:
p = IPluginInterface()
except TypeError:
pass
else:
assert False, \
"Should not be able to instantiate IPluginInterface"
if __name__ == '__main__':
unittest.main()
| 713 |
884 | <gh_stars>100-1000
{
"documentVersion": "1.0",
"jsonSchemaSemanticVersion": "1.0.0",
"manifestName": "Miscellaneous",
"entities": [
{
"type": "LocalEntity",
"entityName": "WorkflowInstantFlowEndpoints",
"entityPath": "WorkflowInstantFlowEndpoints.1.0.1.cdm.json/WorkflowInstantFlowEndpoints"
}
]
} | 138 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-g3fq-3v3g-mh32",
"modified": "2021-04-13T18:40:52Z",
"published": "2021-04-16T19:52:58Z",
"aliases": [
"CVE-2021-29438"
],
"summary": "Improper Neutralization of Script-Related HTML Tags in a Web Page (Basic XSS) in @nextcloud/dialogs",
"details": "### Impact\n\nThe Nextcloud dialogs library before 3.1.2 did insufficiently escape text input passed to a toast. If your application displays toasts with user-supplied input, this could lead to a XSS vulnerability.\n\n_Note_: Nextcloud Server employs a strict Content Security Policy that mitigates the risk of these XSS vulnerabilities.\n\n### Patches\n\nThe vulnerability has been patched in version 3.1.2. If you need to display HTML in the toast, explicitly pass the `options.isHTML` config flag.\n\n### Workarounds\n\nMake sure no user-supplied input flows into toasts.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.1/AV:N/AC:L/PR:L/UI:R/S:U/C:L/I:L/A:N"
}
],
"affected": [
{
"package": {
"ecosystem": "npm",
"name": "@nextcloud/dialogs"
},
"ranges": [
{
"type": "ECOSYSTEM",
"events": [
{
"introduced": "0"
},
{
"fixed": "3.1.2"
}
]
}
]
}
],
"references": [
{
"type": "WEB",
"url": "https://github.com/nextcloud/nextcloud-dialogs/security/advisories/GHSA-g3fq-3v3g-mh32"
},
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2021-29438"
},
{
"type": "WEB",
"url": "https://www.npmjs.com/package/@nextcloud/dialogs"
}
],
"database_specific": {
"cwe_ids": [
"CWE-79",
"CWE-80"
],
"severity": "MODERATE",
"github_reviewed": true
}
} | 892 |
1,935 | /*
Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANNTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER INN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR INN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
/**
Testcase Scenarios :
1) Add multiple Memcpy nodes to graph and verify node execution is
working as expected.
*/
#include <hip_test_common.hh>
#include <hip_test_checkers.hh>
/**
* Functional Test adds memcpy nodes of types H2D, D2D and D2H to graph
* and verifies execution sequence by launching graph.
*/
TEST_CASE("Unit_hipGraphAddMemcpyNode_Functional") {
constexpr int width{10}, height{10}, depth{10};
hipArray *devArray1, *devArray2;
hipChannelFormatKind formatKind = hipChannelFormatKindSigned;
hipMemcpy3DParms myparams;
uint32_t size = width * height * depth * sizeof(int);
hipGraph_t graph;
hipGraphNode_t memcpyNode;
std::vector<hipGraphNode_t> dependencies;
hipStream_t streamForGraph;
hipGraphExec_t graphExec;
int *hData = reinterpret_cast<int*>(malloc(size));
int *hOutputData = reinterpret_cast<int *>(malloc(size));
REQUIRE(hData != nullptr);
REQUIRE(hOutputData != nullptr);
memset(hData, 0, size);
memset(hOutputData, 0, size);
HIP_CHECK(hipStreamCreate(&streamForGraph));
// Initialize host buffer
for (int i = 0; i < depth; i++) {
for (int j = 0; j < height; j++) {
for (int k = 0; k < width; k++) {
hData[i*width*height + j*width + k] = i*width*height + j*width + k;
}
}
}
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(sizeof(int)*8,
0, 0, 0, formatKind);
HIP_CHECK(hipMalloc3DArray(&devArray1, &channelDesc,
make_hipExtent(width, height, depth), hipArrayDefault));
HIP_CHECK(hipMalloc3DArray(&devArray2, &channelDesc,
make_hipExtent(width, height, depth), hipArrayDefault));
HIP_CHECK(hipGraphCreate(&graph, 0));
// Host to Device
memset(&myparams, 0x0, sizeof(hipMemcpy3DParms));
myparams.srcPos = make_hipPos(0, 0, 0);
myparams.dstPos = make_hipPos(0, 0, 0);
myparams.extent = make_hipExtent(width , height, depth);
myparams.srcPtr = make_hipPitchedPtr(hData, width * sizeof(int),
width, height);
myparams.dstArray = devArray1;
myparams.kind = hipMemcpyHostToDevice;
HIP_CHECK(hipGraphAddMemcpyNode(&memcpyNode, graph, nullptr, 0, &myparams));
dependencies.push_back(memcpyNode);
// Device to Device
memset(&myparams, 0x0, sizeof(hipMemcpy3DParms));
myparams.srcPos = make_hipPos(0, 0, 0);
myparams.dstPos = make_hipPos(0, 0, 0);
myparams.srcArray = devArray1;
myparams.dstArray = devArray2;
myparams.extent = make_hipExtent(width, height, depth);
myparams.kind = hipMemcpyDeviceToDevice;
HIP_CHECK(hipGraphAddMemcpyNode(&memcpyNode, graph, dependencies.data(),
dependencies.size(), &myparams));
dependencies.clear();
dependencies.push_back(memcpyNode);
// Device to host
memset(&myparams, 0x0, sizeof(hipMemcpy3DParms));
myparams.srcPos = make_hipPos(0, 0, 0);
myparams.dstPos = make_hipPos(0, 0, 0);
myparams.dstPtr = make_hipPitchedPtr(hOutputData, width * sizeof(int),
width, height);
myparams.srcArray = devArray2;
myparams.extent = make_hipExtent(width, height, depth);
myparams.kind = hipMemcpyDeviceToHost;
HIP_CHECK(hipGraphAddMemcpyNode(&memcpyNode, graph, dependencies.data(),
dependencies.size(), &myparams));
// Instantiate and launch the graph
HIP_CHECK(hipGraphInstantiate(&graphExec, graph, nullptr, nullptr, 0));
HIP_CHECK(hipGraphLaunch(graphExec, streamForGraph));
HIP_CHECK(hipStreamSynchronize(streamForGraph));
// Check result
HipTest::checkArray(hData, hOutputData, width, height, depth);
HIP_CHECK(hipGraphExecDestroy(graphExec));
HIP_CHECK(hipGraphDestroy(graph));
HIP_CHECK(hipStreamDestroy(streamForGraph));
hipFreeArray(devArray1);
hipFreeArray(devArray2);
free(hData);
free(hOutputData);
}
| 1,850 |
563 | package com.gentics.mesh.search.verticle.eventhandler;
import static com.gentics.mesh.core.rest.MeshEvent.INDEX_CHECK_REQUEST;
import static com.gentics.mesh.core.rest.MeshEvent.INDEX_CHECK_START;
import static com.gentics.mesh.core.rest.MeshEvent.INDEX_CHECK_FINISHED;
import java.util.Collection;
import java.util.Collections;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.gentics.mesh.core.data.search.request.SearchRequest;
import com.gentics.mesh.core.rest.MeshEvent;
import com.gentics.mesh.search.IndexHandlerRegistry;
import com.gentics.mesh.search.verticle.MessageEvent;
import dagger.Lazy;
import io.reactivex.Flowable;
import io.vertx.core.Vertx;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
/**
* Event handler, that will check the currently existing indices (for existence and correctness of the mapping)
*/
@Singleton
public class CheckIndicesHandler implements EventHandler {
private static final Logger log = LoggerFactory.getLogger(CheckIndicesHandler.class);
private final Lazy<IndexHandlerRegistry> registry;
private final Vertx vertx;
/**
* Create an instance
* @param registry index handler registry
* @param vertx vertx
*/
@Inject
public CheckIndicesHandler(Lazy<IndexHandlerRegistry> registry, Vertx vertx) {
this.registry = registry;
this.vertx = vertx;
}
@Override
public Collection<MeshEvent> handledEvents() {
return Collections.singletonList(INDEX_CHECK_REQUEST);
}
@Override
public Flowable<SearchRequest> handle(MessageEvent messageEvent) {
return syncIndices().doOnSubscribe(ignore -> {
log.debug("Processing index check job.");
vertx.eventBus().publish(INDEX_CHECK_START.address, null);
}).doFinally(() -> {
log.debug("Index check job finished.");
vertx.eventBus().publish(INDEX_CHECK_FINISHED.address, null);
});
}
protected Flowable<SearchRequest> syncIndices() {
return Flowable.fromIterable(registry.get().getHandlers())
.flatMap(handler -> handler.check().toFlowable());
}
}
| 677 |
688 | <reponame>YuiZh0u/TextGAN-PyTorch<filename>models/generator.py
# -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : config.py
# @Time : Created at 2019-03-18
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import math
import torch
import torch.nn as nn
import config as cfg
from utils.helpers import truncated_normal_
class LSTMGenerator(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu=False):
super(LSTMGenerator, self).__init__()
self.name = 'vanilla'
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.padding_idx = padding_idx
self.gpu = gpu
self.temperature = 1.0
self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
self.lstm2out = nn.Linear(hidden_dim, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
self.init_params()
def forward(self, inp, hidden, need_hidden=False):
"""
Embeds input and applies LSTM
:param inp: batch_size * seq_len
:param hidden: (h, c)
:param need_hidden: if return hidden, use for sampling
"""
emb = self.embeddings(inp) # batch_size * len * embedding_dim
if len(inp.size()) == 1:
emb = emb.unsqueeze(1) # batch_size * 1 * embedding_dim
out, hidden = self.lstm(emb, hidden) # out: batch_size * seq_len * hidden_dim
out = out.contiguous().view(-1, self.hidden_dim) # out: (batch_size * len) * hidden_dim
out = self.lstm2out(out) # (batch_size * seq_len) * vocab_size
# out = self.temperature * out # temperature
pred = self.softmax(out)
if need_hidden:
return pred, hidden
else:
return pred
def sample(self, num_samples, batch_size, start_letter=cfg.start_letter):
"""
Samples the network and returns num_samples samples of length max_seq_len.
:return samples: num_samples * max_seq_length (a sampled sequence in each row)
"""
num_batch = num_samples // batch_size + 1 if num_samples != batch_size else 1
samples = torch.zeros(num_batch * batch_size, self.max_seq_len).long()
# Generate sentences with multinomial sampling strategy
for b in range(num_batch):
hidden = self.init_hidden(batch_size)
inp = torch.LongTensor([start_letter] * batch_size)
if self.gpu:
inp = inp.cuda()
for i in range(self.max_seq_len):
out, hidden = self.forward(inp, hidden, need_hidden=True) # out: batch_size * vocab_size
next_token = torch.multinomial(torch.exp(out), 1) # batch_size * 1 (sampling from each row)
samples[b * batch_size:(b + 1) * batch_size, i] = next_token.view(-1)
inp = next_token.view(-1)
samples = samples[:num_samples]
return samples
def init_params(self):
for param in self.parameters():
if param.requires_grad and len(param.shape) > 0:
stddev = 1 / math.sqrt(param.shape[0])
if cfg.gen_init == 'uniform':
torch.nn.init.uniform_(param, a=-0.05, b=0.05)
elif cfg.gen_init == 'normal':
torch.nn.init.normal_(param, std=stddev)
elif cfg.gen_init == 'truncated_normal':
truncated_normal_(param, std=stddev)
def init_oracle(self):
for param in self.parameters():
if param.requires_grad:
torch.nn.init.normal_(param, mean=0, std=1)
def init_hidden(self, batch_size=cfg.batch_size):
h = torch.zeros(1, batch_size, self.hidden_dim)
c = torch.zeros(1, batch_size, self.hidden_dim)
if self.gpu:
return h.cuda(), c.cuda()
else:
return h, c
| 1,967 |
333 | <gh_stars>100-1000
#ifndef RS_GENERAL_ROW_PURGE_HPP
#define RS_GENERAL_ROW_PURGE_HPP
#include "rcConnect.h"
#include "generalRowPurge.h"
int rsGeneralRowPurge( rsComm_t *rsComm, generalRowPurgeInp_t *generalRowPurgeInp );
int _rsGeneralRowPurge( rsComm_t *rsComm, generalRowPurgeInp_t *generalRowPurgeInp );
#endif
| 136 |
938 | {
"multipart": [
{ "apply": { "model": "tconstruct:block/clear_stained_glass/pane_post" }},
{ "when": { "north": true },
"apply": { "model": "tconstruct:block/clear_stained_glass/pane_side" }
},
{ "when": { "east": true },
"apply": { "model": "tconstruct:block/clear_stained_glass/pane_side", "y": 90 }
},
{ "when": { "south": true },
"apply": { "model": "tconstruct:block/clear_stained_glass/pane_side_alt" }
},
{ "when": { "west": true },
"apply": { "model": "tconstruct:block/clear_stained_glass/pane_side_alt", "y": 90 }
},
{ "when": { "north": false },
"apply": { "model": "tconstruct:block/clear_stained_glass/pane_noside" }
},
{ "when": { "east": false },
"apply": { "model": "tconstruct:block/clear_stained_glass/pane_noside_alt" }
},
{ "when": { "south": false },
"apply": { "model": "tconstruct:block/clear_stained_glass/pane_noside_alt", "y": 90 }
},
{ "when": { "west": false },
"apply": { "model": "tconstruct:block/clear_stained_glass/pane_noside", "y": 270 }
}
]
}
| 474 |
725 | <reponame>trisadmeslek/V-Sekai-Blender-tools
# MIT License
# Copyright (c) 2018 Hotox
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Code author: Hotox
# Repo: https://github.com/michaeldegroot/cats-blender-plugin
# Edits by:
import bpy
import math
from . import common as Common
from . import armature_bones as Bones
from .register import register_wrap
from .translations import t
ignore_shapes = []
ignore_meshes = []
@register_wrap
class ScanButton(bpy.types.Operator):
bl_idname = 'cats_decimation.auto_scan'
bl_label = t('ScanButton.label')
bl_description = t('ScanButton.desc')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
if context.scene.add_shape_key == "":
return False
return True
def execute(self, context):
shape = context.scene.add_shape_key
shapes = Common.get_shapekeys_decimation_list(self, context)
count = len(shapes)
if count > 1 and shapes.index(shape) == count - 1:
context.scene.add_shape_key = shapes[count - 2]
ignore_shapes.append(shape)
return {'FINISHED'}
@register_wrap
class AddShapeButton(bpy.types.Operator):
bl_idname = 'cats_decimation.add_shape'
bl_label = t('AddShapeButton.label')
bl_description = t('AddShapeButton.desc')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
if context.scene.add_shape_key == "":
return False
return True
def execute(self, context):
shape = context.scene.add_shape_key
shapes = [x[0] for x in Common.get_shapekeys_decimation_list(self, context)]
count = len(shapes)
if count > 1 and shapes.index(shape) == count - 1:
context.scene.add_shape_key = shapes[count - 2]
ignore_shapes.append(shape)
return {'FINISHED'}
@register_wrap
class AddMeshButton(bpy.types.Operator):
bl_idname = 'cats_decimation.add_mesh'
bl_label = t('AddMeshButton.label')
bl_description = t('AddMeshButton.desc')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
if context.scene.add_mesh == "":
return False
return True
def execute(self, context):
ignore_meshes.append(context.scene.add_mesh)
for obj in bpy.context.scene.objects:
if obj.type == 'MESH':
if obj.parent and obj.parent.type == 'ARMATURE' and obj.parent.name == bpy.context.scene.armature:
if obj.name in ignore_meshes:
continue
context.scene.add_mesh = obj.name
break
return {'FINISHED'}
@register_wrap
class RemoveShapeButton(bpy.types.Operator):
bl_idname = 'cats_decimation.remove_shape'
bl_label = t('RemoveShapeButton.label')
bl_description = t('RemoveShapeButton.desc')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
shape_name = bpy.props.StringProperty()
def execute(self, context):
ignore_shapes.remove(self.shape_name)
return {'FINISHED'}
@register_wrap
class RemoveMeshButton(bpy.types.Operator):
bl_idname = 'cats_decimation.remove_mesh'
bl_label = t('RemoveMeshButton.label')
bl_description = t('RemoveMeshButton.desc')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
mesh_name = bpy.props.StringProperty()
def execute(self, context):
ignore_meshes.remove(self.mesh_name)
return {'FINISHED'}
@register_wrap
class AutoDecimateButton(bpy.types.Operator):
bl_idname = 'cats_decimation.auto_decimate'
bl_label = t('AutoDecimateButton.label')
bl_description = t('AutoDecimateButton.desc')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
armature_name = bpy.props.StringProperty(
name='armature_name',
)
preserve_seams = bpy.props.BoolProperty(
name='preserve_seams',
)
seperate_materials = bpy.props.BoolProperty(
name='seperate_materials'
)
def execute(self, context):
meshes = Common.get_meshes_objects()
if not meshes or len(meshes) == 0:
self.report({'ERROR'}, t('AutoDecimateButton.error.noMesh'))
return {'FINISHED'}
saved_data = Common.SavedData()
if context.scene.decimation_mode != 'CUSTOM':
mesh = Common.join_meshes(repair_shape_keys=False, armature_name=self.armature_name)
if self.seperate_materials:
Common.separate_by_materials(context, mesh)
self.decimate(context)
Common.join_meshes(armature_name=self.armature_name)
saved_data.load()
return {'FINISHED'}
def decimate(self, context):
print('START DECIMATION')
Common.set_default_stage()
custom_decimation = context.scene.decimation_mode == 'CUSTOM'
full_decimation = context.scene.decimation_mode == 'FULL'
half_decimation = context.scene.decimation_mode == 'HALF'
safe_decimation = context.scene.decimation_mode == 'SAFE'
smart_decimation = context.scene.decimation_mode == 'SMART'
save_fingers = context.scene.decimate_fingers
animation_weighting = context.scene.decimation_animation_weighting
animation_weighting_factor = context.scene.decimation_animation_weighting_factor
max_tris = context.scene.max_tris
meshes = []
current_tris_count = 0
tris_count = 0
meshes_obj = Common.get_meshes_objects(armature_name=self.armature_name)
for mesh in meshes_obj:
Common.set_active(mesh)
Common.switch('EDIT')
bpy.ops.mesh.quads_convert_to_tris(quad_method='BEAUTY', ngon_method='BEAUTY')
Common.switch('OBJECT')
if context.scene.decimation_remove_doubles:
Common.remove_doubles(mesh, 0.00001, save_shapes=True)
current_tris_count += Common.get_tricount(mesh.data.polygons)
if animation_weighting:
for mesh in meshes_obj:
# Weight by multiplied bone weights for every pair of bones.
# This is O(n*m^2) for n verts and m bones, generally runs relatively quickly.
weights = dict()
for vertex in mesh.data.vertices:
v_weights = [group.weight for group in vertex.groups]
v_mults = []
for idx1, w1 in enumerate(vertex.groups):
for idx2, w2 in enumerate(vertex.groups):
if idx1 != idx2:
# Weight [vgroup * vgroup] for index = <mult>
if (w1.group, w2.group) not in weights:
weights[(w1.group, w2.group)] = dict()
weights[(w1.group, w2.group)][vertex.index] = w1.weight * w2.weight
# Normalize per vertex group pair
normalizedweights = dict()
for pair, weighting in weights.items():
m_min = 1
m_max = 0
for _, weight in weighting.items():
m_min = min(m_min, weight)
m_max = max(m_max, weight)
if pair not in normalizedweights:
normalizedweights[pair] = dict()
for v_index, weight in weighting.items():
try:
normalizedweights[pair][v_index] = (weight - m_min) / (m_max - m_min)
except ZeroDivisionError:
normalizedweights[pair][v_index] = weight
newweights = dict()
for pair, weighting in normalizedweights.items():
for v_index, weight in weighting.items():
try:
newweights[v_index] = max(newweights[v_index], weight)
except KeyError:
newweights[v_index] = weight
s_weights = dict()
# Weight by relative shape key movement. This is kind of slow, but not too bad. It's O(n*m) for n verts and m shape keys,
# but shape keys contain every vert (not just the ones they impact)
# For shape key in shape keys:
if mesh.data.shape_keys is not None:
for key_block in mesh.data.shape_keys.key_blocks[1:]:
basis = mesh.data.shape_keys.key_blocks[0]
s_weights[key_block.name] = dict()
for idx, vert in enumerate(key_block.data):
s_weights[key_block.name][idx] = math.sqrt(math.pow(basis.data[idx].co[0] - vert.co[0], 2.0) +
math.pow(basis.data[idx].co[1] - vert.co[1], 2.0) +
math.pow(basis.data[idx].co[2] - vert.co[2], 2.0))
# normalize min/max vert movement
s_normalizedweights = dict()
for keyname, weighting in s_weights.items():
m_min = math.inf
m_max = 0
for _, weight in weighting.items():
m_min = min(m_min, weight)
m_max = max(m_max, weight)
if keyname not in s_normalizedweights:
s_normalizedweights[keyname] = dict()
for v_index, weight in weighting.items():
try:
s_normalizedweights[keyname][v_index] = (weight - m_min) / (m_max - m_min)
except ZeroDivisionError:
s_normalizedweights[keyname][v_index] = weight
# find max normalized movement over all shape keys
for pair, weighting in s_normalizedweights.items():
for v_index, weight in weighting.items():
try:
newweights[v_index] = max(newweights[v_index], weight)
except KeyError:
newweights[v_index] = weight
# TODO: ignore shape keys which move very little?
context.view_layer.objects.active = mesh
bpy.ops.object.vertex_group_add()
mesh.vertex_groups[-1].name = "CATS Animation"
for idx, weight in newweights.items():
mesh.vertex_groups[-1].add([idx], weight, "REPLACE")
if save_fingers:
for mesh in meshes_obj:
if len(mesh.vertex_groups) > 0:
Common.set_active(mesh)
Common.switch('EDIT')
bpy.ops.mesh.select_mode(type='VERT')
for finger in Bones.bone_finger_list:
print(finger)
vgs = [mesh.vertex_groups.get(finger + 'L'), mesh.vertex_groups.get(finger + 'R')]
for vg in vgs:
if vg:
bpy.ops.object.vertex_group_set_active(group=vg.name)
bpy.ops.object.vertex_group_select()
try:
bpy.ops.mesh.separate(type='SELECTED')
except RuntimeError:
pass
bpy.ops.object.mode_set(mode='OBJECT')
Common.unselect_all()
for mesh in meshes_obj:
Common.set_active(mesh)
tris = Common.get_tricount(mesh)
if custom_decimation and mesh.name in ignore_meshes:
Common.unselect_all()
continue
if Common.has_shapekeys(mesh):
if full_decimation:
bpy.ops.object.shape_key_remove(all=True)
meshes.append((mesh, tris))
tris_count += tris
elif smart_decimation:
if len(mesh.data.shape_keys.key_blocks) == 1:
bpy.ops.object.shape_key_remove(all=True)
else:
mesh.active_shape_key_index = 0
# Sanity check, make sure basis isn't against something weird
mesh.active_shape_key.relative_key = mesh.active_shape_key
# Add a duplicate basis key which we un-apply to fix shape keys
bpy.ops.object.shape_key_add(from_mix=False)
mesh.active_shape_key.name = "CATS Basis"
mesh.active_shape_key_index = 0
meshes.append((mesh, tris))
tris_count += tris
elif custom_decimation:
found = False
for shape in ignore_shapes:
if shape in mesh.data.shape_keys.key_blocks:
found = True
break
if found:
Common.unselect_all()
continue
bpy.ops.object.shape_key_remove(all=True)
meshes.append((mesh, tris))
tris_count += tris
elif half_decimation and len(mesh.data.shape_keys.key_blocks) < 4:
bpy.ops.object.shape_key_remove(all=True)
meshes.append((mesh, tris))
tris_count += tris
elif len(mesh.data.shape_keys.key_blocks) == 1:
bpy.ops.object.shape_key_remove(all=True)
meshes.append((mesh, tris))
tris_count += tris
else:
meshes.append((mesh, tris))
tris_count += tris
Common.unselect_all()
print(current_tris_count)
print(tris_count)
print((current_tris_count - tris_count), '>', max_tris)
if (current_tris_count - tris_count) > max_tris:
message = [t('decimate.cantDecimateWithSettings', number=str(max_tris))]
if safe_decimation:
message.append(t('decimate.safeTryOptions'))
elif half_decimation:
message.append(t('decimate.halfTryOptions'))
elif custom_decimation:
message.append(t('decimate.customTryOptions'))
if save_fingers:
if full_decimation or smart_decimation:
message.append(t('decimate.disableFingersOrIncrease'))
else:
message[1] = message[1][:-1]
message.append(t('decimate.disableFingers'))
Common.show_error(6, message)
return
try:
decimation = (max_tris - current_tris_count + tris_count) / tris_count
except ZeroDivisionError:
decimation = 1
if decimation >= 1:
Common.show_error(6, [t('decimate.noDecimationNeeded', number=str(max_tris))])
return
elif decimation <= 0:
Common.show_error(4.5, [t('decimate.cantDecimate1', number=str(max_tris)),
t('decimate.cantDecimate2')])
meshes.sort(key=lambda x: x[1])
for mesh in reversed(meshes):
mesh_obj = mesh[0]
tris = mesh[1]
Common.set_active(mesh_obj)
print(mesh_obj.name)
# Calculate new decimation ratio
try:
decimation = (max_tris - current_tris_count + tris_count) / tris_count
except ZeroDivisionError:
decimation = 1
print(decimation)
# Apply decimation mod
if not smart_decimation:
mod = mesh_obj.modifiers.new("Decimate", 'DECIMATE')
mod.ratio = decimation
mod.use_collapse_triangulate = True
if animation_weighting:
mod.vertex_group = "CATS Animation"
mod.vertex_group_factor = animation_weighting_factor
mod.invert_vertex_group = True
Common.apply_modifier(mod)
else:
Common.switch('EDIT')
bpy.ops.mesh.select_mode(type="VERT")
bpy.ops.mesh.select_all(action="SELECT")
# TODO: Fix decimation calculation when pinning seams
if self.preserve_seams:
bpy.ops.mesh.select_all(action="DESELECT")
bpy.ops.uv.seams_from_islands()
# select all seams
Common.switch('OBJECT')
me = mesh_obj.data
for edge in me.edges:
if edge.use_seam:
edge.select = True
Common.switch('EDIT')
bpy.ops.mesh.select_all(action="INVERT")
#TODO: On many meshes, un-subdividing until it's near the target verts and then decimating the rest of the way
# results in MUCH better topology. Something to figure out against 2.93
bpy.ops.mesh.decimate(ratio=decimation,
use_vertex_group=animation_weighting,
vertex_group_factor=animation_weighting_factor,
invert_vertex_group=True,
use_symmetry=True,
symmetry_axis='X')
Common.switch('OBJECT')
tris_after = len(mesh_obj.data.polygons)
print(tris)
print(tris_after)
current_tris_count = current_tris_count - tris + tris_after
tris_count = tris_count - tris
# Repair shape keys if SMART mode is enabled
if smart_decimation and Common.has_shapekeys(mesh_obj):
for idx in range(1, len(mesh_obj.data.shape_keys.key_blocks) - 1):
mesh_obj.active_shape_key_index = idx
Common.switch('EDIT')
bpy.ops.mesh.blend_from_shape(shape="CATS Basis", blend=-1.0, add=True)
Common.switch('OBJECT')
mesh_obj.shape_key_remove(key=mesh_obj.data.shape_keys.key_blocks["CATS Basis"])
mesh_obj.active_shape_key_index = 0
Common.unselect_all()
# # Check if decimated correctly
# if decimation < 0:
# print('')
# print('RECHECK!')
#
# current_tris_count = 0
# tris_count = 0
#
# for mesh in Common.get_meshes_objects():
# Common.select(mesh)
# tris = len(bpy.context.active_object.data.polygons)
# tris_count += tris
# print(tris_count)
#
# for mesh in reversed(meshes):
# mesh_obj = mesh[0]
# Common.select(mesh_obj)
#
# # Calculate new decimation ratio
# decimation = (max_tris - tris_count) / tris_count
# print(decimation)
#
# # Apply decimation mod
# mod = mesh_obj.modifiers.new("Decimate", 'DECIMATE')
# mod.ratio = decimation
# mod.use_collapse_triangulate = True
# Common.apply_modifier(mod)
#
# Common.unselect_all()
# break
@register_wrap
class AutoDecimatePresetGood(bpy.types.Operator):
bl_idname = 'cats_decimation.preset_good'
bl_label = t('DecimationPanel.preset.good.label')
bl_description = t('DecimationPanel.preset.good.description')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
bpy.context.scene.max_tris = 70000
return {'FINISHED'}
@register_wrap
class AutoDecimatePresetExcellent(bpy.types.Operator):
bl_idname = 'cats_decimation.preset_excellent'
bl_label = t('DecimationPanel.preset.excellent.label')
bl_description = t('DecimationPanel.preset.excellent.description')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
bpy.context.scene.max_tris = 32000
return {'FINISHED'}
@register_wrap
class AutoDecimatePresetQuest(bpy.types.Operator):
bl_idname = 'cats_decimation.preset_quest'
bl_label = t('DecimationPanel.preset.quest.label')
bl_description = t('DecimationPanel.preset.quest.description')
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
bpy.context.scene.max_tris = 5000
return {'FINISHED'}
| 11,277 |
348 | {"nom":"Volpajola","circ":"2ème circonscription","dpt":"Haute-Corse","inscrits":373,"abs":193,"votants":180,"blancs":3,"nuls":4,"exp":173,"res":[{"nuance":"REG","nom":"<NAME>","voix":106},{"nuance":"REM","nom":"<NAME>","voix":67}]} | 92 |
2,959 | //
// ExtendedAttributeDataDocument.h
// HexFiend_2
//
// Created by <NAME> on 2/3/19.
// Copyright © 2019 ridiculous_fish. All rights reserved.
//
#import "BaseDataDocument.h"
@interface ExtendedAttributeDataDocument : BaseDataDocument
- (instancetype)initWithAttributeName:(NSString *)name forURL:(NSURL *)url;
@end
| 105 |
2,151 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "remoting/host/win/launch_process_with_token.h"
#include <windows.h>
#include <utility>
#include "base/logging.h"
#include "base/win/scoped_handle.h"
#include "base/win/scoped_process_information.h"
#include "base/win/startup_information.h"
using base::win::ScopedHandle;
namespace {
// Copies the process token making it a primary impersonation token.
// The returned handle will have |desired_access| rights.
bool CopyProcessToken(DWORD desired_access, ScopedHandle* token_out) {
HANDLE temp_handle;
if (!OpenProcessToken(GetCurrentProcess(),
TOKEN_DUPLICATE | desired_access,
&temp_handle)) {
PLOG(ERROR) << "Failed to open process token";
return false;
}
ScopedHandle process_token(temp_handle);
if (!DuplicateTokenEx(process_token.Get(),
desired_access,
nullptr,
SecurityImpersonation,
TokenPrimary,
&temp_handle)) {
PLOG(ERROR) << "Failed to duplicate the process token";
return false;
}
token_out->Set(temp_handle);
return true;
}
// Creates a copy of the current process with SE_TCB_NAME privilege enabled.
bool CreatePrivilegedToken(ScopedHandle* token_out) {
ScopedHandle privileged_token;
DWORD desired_access = TOKEN_ADJUST_PRIVILEGES | TOKEN_IMPERSONATE |
TOKEN_DUPLICATE | TOKEN_QUERY;
if (!CopyProcessToken(desired_access, &privileged_token)) {
return false;
}
// Get the LUID for the SE_TCB_NAME privilege.
TOKEN_PRIVILEGES state;
state.PrivilegeCount = 1;
state.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
if (!LookupPrivilegeValue(nullptr, SE_TCB_NAME, &state.Privileges[0].Luid)) {
PLOG(ERROR) << "Failed to lookup the LUID for the SE_TCB_NAME privilege";
return false;
}
// Enable the SE_TCB_NAME privilege.
if (!AdjustTokenPrivileges(privileged_token.Get(), FALSE, &state, 0, nullptr,
0)) {
PLOG(ERROR) << "Failed to enable SE_TCB_NAME privilege in a token";
return false;
}
*token_out = std::move(privileged_token);
return true;
}
} // namespace
namespace remoting {
// Creates a copy of the current process token for the given |session_id| so
// it can be used to launch a process in that session.
bool CreateSessionToken(uint32_t session_id, ScopedHandle* token_out) {
ScopedHandle session_token;
DWORD desired_access = TOKEN_ADJUST_DEFAULT | TOKEN_ADJUST_SESSIONID |
TOKEN_ASSIGN_PRIMARY | TOKEN_DUPLICATE | TOKEN_QUERY;
if (!CopyProcessToken(desired_access, &session_token)) {
return false;
}
// Temporarily enable the SE_TCB_NAME privilege as it is required by
// SetTokenInformation(TokenSessionId).
ScopedHandle privileged_token;
if (!CreatePrivilegedToken(&privileged_token)) {
return false;
}
if (!ImpersonateLoggedOnUser(privileged_token.Get())) {
PLOG(ERROR) << "Failed to impersonate the privileged token";
return false;
}
// Change the session ID of the token.
DWORD new_session_id = session_id;
if (!SetTokenInformation(session_token.Get(),
TokenSessionId,
&new_session_id,
sizeof(new_session_id))) {
PLOG(ERROR) << "Failed to change session ID of a token";
// Revert to the default token.
CHECK(RevertToSelf());
return false;
}
// Revert to the default token.
CHECK(RevertToSelf());
*token_out = std::move(session_token);
return true;
}
bool LaunchProcessWithToken(
const base::FilePath& binary,
const base::CommandLine::StringType& command_line,
HANDLE user_token,
SECURITY_ATTRIBUTES* process_attributes,
SECURITY_ATTRIBUTES* thread_attributes,
const base::HandlesToInheritVector& handles_to_inherit,
DWORD creation_flags,
const base::char16* desktop_name,
ScopedHandle* process_out,
ScopedHandle* thread_out) {
base::FilePath::StringType application_name = binary.value();
base::win::StartupInformation startup_info_wrapper;
STARTUPINFO* startup_info = startup_info_wrapper.startup_info();
if (desktop_name)
startup_info->lpDesktop = const_cast<base::char16*>(desktop_name);
bool inherit_handles = false;
if (!handles_to_inherit.empty()) {
if (handles_to_inherit.size() >
std::numeric_limits<DWORD>::max() / sizeof(HANDLE)) {
DLOG(ERROR) << "Too many handles to inherit.";
return false;
}
// Ensure the handles can be inherited.
for (HANDLE handle : handles_to_inherit) {
BOOL result = SetHandleInformation(handle, HANDLE_FLAG_INHERIT,
HANDLE_FLAG_INHERIT);
PCHECK(result);
}
if (!startup_info_wrapper.InitializeProcThreadAttributeList(
/* attribute_count= */ 1)) {
PLOG(ERROR) << "InitializeProcThreadAttributeList()";
return false;
}
if (!startup_info_wrapper.UpdateProcThreadAttribute(
PROC_THREAD_ATTRIBUTE_HANDLE_LIST,
const_cast<HANDLE*>(&handles_to_inherit.at(0)),
static_cast<DWORD>(handles_to_inherit.size() * sizeof(HANDLE)))) {
PLOG(ERROR) << "UpdateProcThreadAttribute()";
return false;
}
inherit_handles = true;
creation_flags |= EXTENDED_STARTUPINFO_PRESENT;
}
PROCESS_INFORMATION temp_process_info = {};
BOOL result = CreateProcessAsUser(user_token, application_name.c_str(),
const_cast<LPWSTR>(command_line.c_str()),
process_attributes, thread_attributes,
inherit_handles, creation_flags, nullptr,
nullptr, startup_info, &temp_process_info);
if (!result) {
PLOG(ERROR) << "Failed to launch a process with a user token";
return false;
}
base::win::ScopedProcessInformation process_info(temp_process_info);
CHECK(process_info.IsValid());
process_out->Set(process_info.TakeProcessHandle());
thread_out->Set(process_info.TakeThreadHandle());
return true;
}
} // namespace remoting
| 2,583 |
889 | <reponame>limberc/HyperGAN
{
"description": "Example of using efficient attention (128x128)",
"discriminator":
{
"class": "class:hypergan.discriminators.configurable_discriminator.ConfigurableDiscriminator",
"layers":[
"conv 64 stride=2 padding=0", "relu",
"add self (efficient_attention)",
"conv 128 stride=2 padding=0", "relu",
"add self (efficient_attention)",
"conv 256 stride=2 padding=0", "relu",
"add self (efficient_attention)",
"conv 512 stride=2 padding=0", "relu",
"add self (efficient_attention)",
"linear 1"
]
},
"latent":
{
"class": "function:hypergan.distributions.uniform_distribution.UniformDistribution",
"min": -1,
"max": 1,
"z": 256
},
"generator": {
"class": "class:hypergan.discriminators.configurable_discriminator.ConfigurableDiscriminator",
"layers":[
"linear 10*10*128 initializer=(orthogonal)", "selu",
"upsample h=20 w=20","conv2d 128 padding=0 initializer=(orthogonal)","selu",
"add self (efficient_attention)",
"upsample h=36 w=36","conv2d 64 padding=0 initializer=(orthogonal)", "selu",
"add self (efficient_attention)",
"upsample h=68 w=68","conv2d 32 padding=0 initializer=(orthogonal)", "selu",
"add self (efficient_attention)",
"upsample h=130 w=130","conv2d 16 padding=0 initializer=(orthogonal)", "selu",
"efficient_attention 3",
"hardtanh"
]
},
"loss":
{
"class": "function:hypergan.losses.standard_loss.StandardLoss",
"reduce": "reduce_mean"
},
"trainer": {
"class": "function:hypergan.trainers.simultaneous_trainer.SimultaneousTrainer",
"hooks": [
{
"class": "function:hypergan.train_hooks.adversarial_norm_train_hook.AdversarialNormTrainHook",
"gammas": [-1e12, 1e12],
"offset": 1.0,
"loss": [
"dg"
],
"mode": "fake"
},
{
"class": "function:hypergan.train_hooks.adversarial_norm_train_hook.AdversarialNormTrainHook",
"gamma": -1e12,
"offset": 1.0,
"loss": [
"d"
],
"mode": "real"
}
],
"optimizer": {
"amsgrad": true,
"betas": [
0.1,
0.997
],
"class": "class:torch.optim.Adam",
"eps": 1e-08,
"lr": 0.0001,
"weight_decay": 0
}
},
"runtime": {
"anysize": true,
"train": "hypergan train [dataset] --sampler static_batch -b 8 --size 64x64x3"
},
"hypergan_version": "~1",
"class": "class:hypergan.gans.standard_gan.StandardGAN"
}
| 1,180 |
10,225 | package io.quarkus.it.bootstrap.config.extension;
import java.util.Collections;
import org.eclipse.microprofile.config.spi.ConfigSource;
import org.eclipse.microprofile.config.spi.ConfigSourceProvider;
import io.quarkus.runtime.RuntimeValue;
import io.quarkus.runtime.annotations.Recorder;
@Recorder
public class DummyBootstrapRecorder2 {
public RuntimeValue<ConfigSourceProvider> create() {
return new RuntimeValue<>(new ConfigSourceProvider() {
@Override
public Iterable<ConfigSource> getConfigSources(ClassLoader forClassLoader) {
return Collections.emptyList();
}
});
}
}
| 242 |
446 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ratis.netty;
import org.apache.ratis.thirdparty.io.netty.bootstrap.Bootstrap;
import org.apache.ratis.thirdparty.io.netty.channel.Channel;
import org.apache.ratis.thirdparty.io.netty.channel.ChannelFuture;
import org.apache.ratis.thirdparty.io.netty.channel.ChannelInitializer;
import org.apache.ratis.thirdparty.io.netty.channel.EventLoopGroup;
import org.apache.ratis.thirdparty.io.netty.channel.socket.SocketChannel;
import org.apache.ratis.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
import org.apache.ratis.thirdparty.io.netty.handler.logging.LogLevel;
import org.apache.ratis.thirdparty.io.netty.handler.logging.LoggingHandler;
import org.apache.ratis.util.JavaUtils;
import org.apache.ratis.util.LifeCycle;
import org.apache.ratis.util.NetUtils;
import java.io.Closeable;
import java.net.InetSocketAddress;
public class NettyClient implements Closeable {
private final LifeCycle lifeCycle = new LifeCycle(JavaUtils.getClassSimpleName(getClass()));
private Channel channel;
/** Connects to the given server address. */
public void connect(String serverAddress, EventLoopGroup group,
ChannelInitializer<SocketChannel> initializer)
throws InterruptedException {
final InetSocketAddress address = NetUtils.createSocketAddr(serverAddress);
lifeCycle.startAndTransition(
() -> channel = new Bootstrap()
.group(group)
.channel(NioSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO))
.handler(initializer)
.connect(address)
.sync()
.channel(),
InterruptedException.class);
}
@Override
public void close() {
lifeCycle.checkStateAndClose(() -> {
channel.close().syncUninterruptibly();
});
}
public ChannelFuture writeAndFlush(Object msg) {
lifeCycle.assertCurrentState(LifeCycle.States.RUNNING);
return channel.writeAndFlush(msg);
}
}
| 917 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.apisupport.project.ui.wizard.wizard;
import org.netbeans.junit.NbTestCase;
/**
* Tests {@link DataModel}.
*
* @author <NAME>
*/
public class DataModelTest extends NbTestCase {
public DataModelTest(String name) {
super(name);
}
public void testDataModelGenerationForCustomBranchingWizard() throws Exception {
/* XXX rewrite to use mock data
NbModuleProject project = TestBase.generateStandaloneModule(getWorkDir(), "module1");
WizardDescriptor wd = new WizardDescriptor() {};
wd.putProperty(ProjectChooserFactory.WIZARD_KEY_PROJECT, project);
DataModel data = new DataModel(wd);
// first panel data (Wizard Type)
data.setBranching(true);
data.setFileTemplateType(false);
data.setNumberOfSteps(2);
// second panel data (Name and Location)
data.setClassNamePrefix("DocBook");
data.setPackageName("org.example.module1");
CreatedModifiedFiles cmf = data.getCreatedModifiedFiles();
assertEquals("created files",
Arrays.asList(
"src/org/example/module1/DocBookVisualPanel1.form",
"src/org/example/module1/DocBookVisualPanel1.java",
"src/org/example/module1/DocBookVisualPanel2.form",
"src/org/example/module1/DocBookVisualPanel2.java",
"src/org/example/module1/DocBookWizardIterator.java",
"src/org/example/module1/DocBookWizardPanel1.java",
"src/org/example/module1/DocBookWizardPanel2.java"
),
Arrays.asList(cmf.getCreatedPaths()));
assertEquals("project.xml was modified",
Arrays.asList("nbproject/project.xml"),
Arrays.asList(cmf.getModifiedPaths()));
cmf.run();
}
public void testDataModelGenerationForFileTemplateBranchingWizard() throws Exception {
NbModuleProject project = TestBase.generateStandaloneModule(getWorkDir(), "module1");
WizardDescriptor wd = new WizardDescriptor() {};
wd.putProperty(ProjectChooserFactory.WIZARD_KEY_PROJECT, project);
DataModel data = new DataModel(wd);
// first panel data (Wizard Type)
data.setBranching(true);
data.setFileTemplateType(true);
data.setNumberOfSteps(2);
// second panel data (Name and Location)
data.setClassNamePrefix("DocBook");
data.setDisplayName("DocBook Document");
data.setCategory("Templates/XML");
data.setPackageName("org.example.module1");
CreatedModifiedFiles cmf = data.getCreatedModifiedFiles();
assertEquals("created files",
Arrays.asList(
"src/org/example/module1/DocBookVisualPanel1.form",
"src/org/example/module1/DocBookVisualPanel1.java",
"src/org/example/module1/DocBookVisualPanel2.form",
"src/org/example/module1/DocBookVisualPanel2.java",
"src/org/example/module1/DocBookWizardIterator.java",
"src/org/example/module1/DocBookWizardPanel1.java",
"src/org/example/module1/DocBookWizardPanel2.java",
"src/org/example/module1/docBook.html"
),
Arrays.asList(cmf.getCreatedPaths()));
assertEquals("modified files",
Arrays.asList(
"nbproject/project.xml",
"src/org/example/module1/resources/Bundle.properties",
"src/org/example/module1/resources/layer.xml"
),
Arrays.asList(cmf.getModifiedPaths()));
cmf.run();
}
public void testDataModelGenerationForCustomSimpleWizard() throws Exception {
NbModuleProject project = TestBase.generateStandaloneModule(getWorkDir(), "module1");
WizardDescriptor wd = new WizardDescriptor() {};
wd.putProperty(ProjectChooserFactory.WIZARD_KEY_PROJECT, project);
DataModel data = new DataModel(wd);
// first panel data (Wizard Type)
data.setBranching(false);
data.setFileTemplateType(false);
data.setNumberOfSteps(1);
// second panel data (Name and Location)
data.setClassNamePrefix("DocBook");
data.setPackageName("org.example.module1");
CreatedModifiedFiles cmf = data.getCreatedModifiedFiles();
assertEquals("created files",
Arrays.asList(
"src/org/example/module1/DocBookVisualPanel1.form",
"src/org/example/module1/DocBookVisualPanel1.java",
"src/org/example/module1/DocBookWizardAction.java",
"src/org/example/module1/DocBookWizardPanel1.java"
),
Arrays.asList(cmf.getCreatedPaths()));
assertEquals("project.xml was modified",
Arrays.asList("nbproject/project.xml"),
Arrays.asList(cmf.getModifiedPaths()));
cmf.run();
}
public void testDataModelCMFUpdated() throws Exception {
NbModuleProject project = TestBase.generateStandaloneModule(getWorkDir(), "module1");
WizardDescriptor wd = new WizardDescriptor() {};
wd.putProperty(ProjectChooserFactory.WIZARD_KEY_PROJECT, project);
DataModel data = new DataModel(wd);
data.setBranching(false);
data.setFileTemplateType(false);
data.setNumberOfSteps(1);
data.setClassNamePrefix("X");
data.setPackageName("x");
assertEquals("initial files correct",
Arrays.asList(
"src/x/XVisualPanel1.form",
"src/x/XVisualPanel1.java",
"src/x/XWizardAction.java",
"src/x/XWizardPanel1.java"
),
Arrays.asList(data.getCreatedModifiedFiles().getCreatedPaths()));
data.setClassNamePrefix("Y");
assertEquals("class name change takes effect",
Arrays.asList(
"src/x/YVisualPanel1.form",
"src/x/YVisualPanel1.java",
"src/x/YWizardAction.java",
"src/x/YWizardPanel1.java"
),
Arrays.asList(data.getCreatedModifiedFiles().getCreatedPaths()));
data.setPackageName("y");
assertEquals("package change takes effect",
Arrays.asList(
"src/y/YVisualPanel1.form",
"src/y/YVisualPanel1.java",
"src/y/YWizardAction.java",
"src/y/YWizardPanel1.java"
),
Arrays.asList(data.getCreatedModifiedFiles().getCreatedPaths()));
*/
}
}
| 3,600 |
346 | <reponame>ZJCRT/drishti<gh_stars>100-1000
// This example program shows how to use dlib's implementation of the paper:
// One Millisecond Face Alignment with an Ensemble of Regression Trees by
// <NAME> and <NAME>, CVPR 2014
//
// http://www.csc.kth.se/~vahidk/papers/KazemiCVPR14.pdf
//
// Changelog:
// - support regression in shape space for simple size reduction
// - support line indexed features (more pose invariant in some cases (per RCPR))
// - support normalized pixel differences (can improve regression in some cases)
// - support trailing ellipse parameters (embedded in point list)
// for simultaneous contour and ellipse model regression
#include "drishti/core/drishti_stdlib_string.h" // FIRST !!!
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <dlib/data_io.h>
#include <dlib/image_transforms/assign_image.h>
#include <dlib/statistics/statistics.h>
#include <dlib/image_processing/shape_predictor.h>
#include <dlib/opencv/cv_image.h>
#include <dlib/vectorstream.h>
#include <dlib/serialize.h>
#include <dlib/data_io/load_image_dataset.h>
#include "drishti/core/string_utils.h"
#include "drishti/core/Line.h"
#include "drishti/ml/shape_predictor_archive.h"
#include "drishti/ml/shape_predictor_trainer.h"
#include "drishti/geometry/Ellipse.h"
#include "drishti/core/drishti_cereal_pba.h"
#include "drishti/core/drishti_cv_cereal.h"
#include "RecipeIO.h"
#include "cxxopts.hpp"
#include <iostream>
//#define _SP dlib
#define _SP drishti::ml
// _cascade_depth = 10;
// _tree_depth = 4;
// _num_trees_per_cascade_level = 500;
// _nu = 0.1;
// _oversampling_amount = 20;
// _feature_pool_size = 400;
// _lambda = 0.1;
// _num_test_splits = 20;
// _feature_pool_region_padding = 0;
static std::vector<std::vector<double>>
get_interocular_distances(const std::vector<std::vector<dlib::full_object_detection>>& objects);
using DlibImageArray = dlib::array<dlib::array2d<uint8_t>>;
using DlibObjectSet = std::vector<std::vector<dlib::full_object_detection>>;
static void reduce_images(DlibImageArray& images_train, DlibObjectSet& faces_train, int ellipse_count, int width);
static void dump_thumbs(DlibImageArray& images_train, DlibObjectSet& faces_train, const std::string& dir, int ellipse_count);
static std::vector<int> parse_dimensions(const std::string& str);
static dlib::rectangle parse_roi(const std::string& str);
static void view_images(DlibImageArray& images_train, DlibObjectSet& faces_train)
{
for (int i = 0; i < images_train.size(); i++)
{
auto& src = images_train[i];
cv::Mat image(src.nr(), src.nc(), CV_8UC1, (void*)&src[0][0], src.width_step());
cv::Mat canvas = image.clone();
for (int j = 0; j < faces_train[i].size(); j++)
{
// reduce roi:
for (int k = 0; k < faces_train[i][j].num_parts(); k++)
{
auto& part = faces_train[i][j].part(k);
cv::Point2f p(part.x(), part.y());
cv::circle(canvas, p, 4, { 255, 255, 255 }, -1, 8);
cv::imwrite("/tmp/foo.png", canvas);
}
}
}
}
int gauze_main(int argc, char* argv[])
{
auto logger = drishti::core::Logger::create("train_shape_predictor");
const auto argumentCount = argc;
bool do_help = false;
bool do_threads = false;
bool do_thumbs = false;
bool do_verbose = false;
bool do_silent = false;
drishti::dlib::Recipe recipe;
std::string sModel;
std::string sRoi;
std::string sTest;
std::string sTrain;
std::string sRecipe;
std::string sRecipeOut;
std::string sOutput;
cxxopts::Options options("train_shape_predictor", "Command line interface for dlib shape_predictor training");
// clang-format off
options.add_options()
( "output", "Output directory for intermediate results", cxxopts::value<std::string>(sOutput))
( "train", "training file", cxxopts::value<std::string>(sTrain))
( "test", "testing file", cxxopts::value<std::string>(sTest))
( "model", "Model file", cxxopts::value<std::string>(sModel))
( "thumbs", "dump thumbnails of width n", cxxopts::value<bool>(do_thumbs))
( "roi", "exclusion roi (x,y,w,h) normalized cs", cxxopts::value<std::string>(sRoi))
// Regression parameters:
( "recipe", "Cascaded pose regression training recipe", cxxopts::value<std::string>(sRecipe))
( "boilerplate", "Output boilerplate recipe file", cxxopts::value<std::string>(sRecipeOut))
( "threads", "Use worker threads when possible", cxxopts::value<bool>(do_threads))
( "verbose", "Print verbose diagnostics", cxxopts::value<bool>(do_verbose))
( "silent", "Disable logging entirely", cxxopts::value<bool>(do_silent))
( "help", "Print the help message", cxxopts::value<bool>(do_help));
// clang-format on
auto opts = options.parse(argc, argv);
if(do_silent)
{
logger->set_level(spdlog::level::off);
}
if((argumentCount <= 1) || opts.count("help"))
{
logger->info(options.help({""}));
return 0;
}
if(!sRecipeOut.empty())
{
recipe.dimensions = { 4, 8, 12, 16, 20, 24 };
saveJSON(sRecipeOut, recipe);
return 0;
}
if(sRecipe.empty())
{
logger->error("Must specify valid training recipe.");
return 1;
}
loadJSON(sRecipe, recipe);
if(sTrain.empty())
{
logger->error("Must specify valid XML training file.");
return 1;
}
if(sModel.empty())
{
logger->error("Must specify output *.dat model file.");
return 1;
}
if(do_verbose)
{
logger->info("cascade_depth: {}", recipe.cascades);
logger->info("tree_depth: {}", recipe.depth);
logger->info("num_trees_per_cascade_level: {}", recipe.trees_per_level);
logger->info("nu: {}", recipe.nu);
logger->info("oversampling_amount: {}", recipe.oversampling);
logger->info("feature_pool_size: {}", recipe.features);
logger->info("lambda: {}", recipe.lambda);
logger->info("num_test_splits: {}", recipe.splits);
logger->info("feature_pool_region_padding: {}", recipe.padding);
logger->info("use npd: {}", recipe.npd);
logger->info("affine: {}", recipe.do_affine);
logger->info("interpolated: {}", recipe.do_interpolate);
}
dlib::array<dlib::array2d<uint8_t>> images_train, images_test;
std::vector<std::vector<dlib::full_object_detection> > faces_train, faces_test;
dlib::image_dataset_file source(sTrain);
source.skip_empty_images();
load_image_dataset(images_train, faces_train, source);
if(faces_train.empty())
{
logger->error("No shapes specified for training");
return 1;
}
if(do_thumbs)
{
dump_thumbs(images_train, faces_train, sOutput, recipe.ellipse_count);
return 0;
}
// Here we optionally downsample:
if(recipe.width > 0)
{
reduce_images(images_train, faces_train, recipe.ellipse_count, recipe.width);
}
dlib::drectangle roi;
if(!sRoi.empty())
{
roi = parse_roi(sRoi);
}
// Now make the object responsible for training the model.
_SP::shape_predictor_trainer trainer;
trainer.set_cascade_depth(recipe.cascades);
trainer.set_tree_depth(recipe.depth);
trainer.set_num_trees_per_cascade_level(recipe.trees_per_level);
trainer.set_nu(recipe.nu); // regularization (smaller nu == more regularization)
trainer.set_oversampling_amount(recipe.oversampling); // amount of oversampling for training data
trainer.set_feature_pool_size(recipe.features);
trainer.set_lambda(recipe.lambda); // feature separation (not learning rate)
trainer.set_num_test_splits(recipe.splits);
trainer.set_feature_pool_region_padding(recipe.padding);
// new parameters
if(recipe.dimensions.size())
{
trainer.set_dimensions(recipe.dimensions);
trainer.set_cascade_depth(recipe.dimensions.size());
}
trainer.set_ellipse_count(recipe.ellipse_count);
trainer.set_do_npd(recipe.npd);
trainer.set_do_affine(recipe.do_affine);
trainer.set_roi(roi);
trainer.set_do_line_indexed(recipe.do_interpolate);
trainer.set_num_threads(8);
if(do_verbose)
{
trainer.be_verbose();
}
int max_dim = faces_train[0][0].num_parts() * 2;
for(const auto &dim : recipe.dimensions)
{
CV_Assert(0 < dim && dim <= max_dim);
}
if(do_verbose)
{
logger->info("Begin training...");
}
std::map<int, float> weights;
for(const auto &w : recipe.weights)
{
weights[std::stoi(w.first)] = w.second;
}
//_SP::shape_predictor sp;
_SP::shape_predictor sp = trainer.train(images_train, faces_train, weights);
if(do_verbose)
{
logger->info("Done training...");
}
// Finally, we save the model to disk so we can use it later.
//dlib::serialize( sModel.c_str() ) << sp;
if(do_verbose)
{
logger->info("Saving to ...{}", sModel);
}
save_cpb(sModel, sp);
sp.populate_f16(); // populate half precision leaf nodes
if(do_verbose)
{
logger->info("Done saving ...{}", sModel);
}
auto train_iod = get_interocular_distances(faces_train);
double training_error = test_shape_predictor(sp, images_train, faces_train, train_iod);
if(do_verbose)
{
logger->info("Mean training error: {}", training_error);
}
if(!sTest.empty())
{
load_image_dataset(images_test, faces_test, sTest);
auto test_iod = get_interocular_distances(faces_test);
float test_error = test_shape_predictor(sp, images_test, faces_test, test_iod);
if(do_verbose)
{
logger->info("Mean testing error: {}", test_error);
}
}
return 0;
}
int main(int argc, char *argv[])
{
try
{
gauze_main(argc, argv);
}
catch (std::exception& e)
{
std::cerr << "Exception thrown:" << e.what();
}
}
// ----------------------------------------------------------------------------------------
static double interocular_distance (const dlib::full_object_detection& det)
{
double length = 0;
for(int i = 0; i < det.num_parts(); i++)
for(int j = i+1; j < det.num_parts(); j++)
{
length = std::max(double(dlib::length(det.part(i) - det.part(j))), length);
}
return length;
}
static std::vector<std::vector<double> > get_interocular_distances (const DlibObjectSet& objects)
{
std::vector<std::vector<double> > temp(objects.size());
for (unsigned long i = 0; i < objects.size(); ++i)
{
for (unsigned long j = 0; j < objects[i].size(); ++j)
{
temp[i].push_back(interocular_distance(objects[i][j]));
}
}
return temp;
}
static void dump_thumbs(DlibImageArray &images_train, DlibObjectSet &faces_train, const std::string &dir, int ellipse_count)
{
static std::vector< cv::Vec3b > rainbow // RGB (inuitive)
{
{255,0,0}, // red
{255,127,0}, // orange
{255,255,0}, // yellow
{0,255,0}, // green
{0,0,255}, // blue
{75, 0, 130}, // indigo
{139, 0, 255}, // violet
{127,127,127}, // white
};
for(auto &c : rainbow)
{
std::swap(c[0], c[2]); // RGB -> BGR
}
for(int i = 0; i < images_train.size(); i++)
{
auto &src = images_train[i];
cv::Mat image(src.nr(), src.nc(), CV_8UC1, (void *)&src[0][0], src.width_step()), small;
for(int j = 0; j < faces_train[i].size(); j++)
{
auto &roiIn = faces_train[i][j].get_rect();
cv::Rect roi(roiIn.left(), roiIn.top(), roiIn.width(), roiIn.height());
cv::Mat canvas;
try
{
if(image.channels() != 3)
{
cv::cvtColor(image(roi), canvas, cv::COLOR_GRAY2BGR);
}
else
{
canvas = image(roi).clone();
}
}
catch(...)
{
std::cout << "BAD ROI: " << i << " " << j << std::endl;
continue;
}
int end =faces_train[i][j].num_parts() - (ellipse_count * 5);
for(int k = 0; k < end; k++)
{
auto &part = faces_train[i][j].part(k);
cv::Point p(part.x(), part.y());
cv::circle(canvas, p - roi.tl(), 3, rainbow[k%rainbow.size()], -1, 8);
}
std::stringstream ss;
ss << dir << "/" << "roi_" << i << "_" << j << ".png";
cv::imwrite(ss.str(), canvas);
}
}
}
static void reduce_int(long &value, float scale)
{
value = static_cast<long>(value * scale + 0.5f);
}
static void reduce_int(dlib::rectangle &roi, float scale)
{
reduce_int(roi.top(), scale);
reduce_int(roi.left(), scale);
reduce_int(roi.right(), scale);
reduce_int(roi.bottom(), scale);
}
static void reduce_images(DlibImageArray &images_train, DlibObjectSet &faces_train, int ellipse_count, int width)
{
for(int i = 0; i < images_train.size(); i++)
{
auto &src = images_train[i];
cv::Mat image(src.nr(), src.nc(), CV_8UC1, (void *)&src[0][0], src.width_step()), small;
const float scale = float(width) / image.cols;
cv::resize(image, small, {}, scale, scale, cv::INTER_LANCZOS4);
dlib::assign_image(src, dlib::cv_image<uint8_t>(small));
for(int j = 0; j < faces_train[i].size(); j++)
{
// reduce roi:
reduce_int(faces_train[i][j].get_rect(), scale);
const int end = faces_train[i][j].num_parts() - (ellipse_count * 5);
for(int k = 0; k < end; k++)
{
auto &part = faces_train[i][j].part(k);
reduce_int(part.x(), scale);
reduce_int(part.y(), scale);
CV_Assert(part.x() < 1000 && part.y() < 1000);
}
// Handle the ellipse data (optional):
for(int k = end; k < faces_train[i][j].num_parts(); k+= 5)
{
auto & obj = faces_train[i][j];
obj.part(k+0).x() *= scale;
obj.part(k+1).x() *= scale;
obj.part(k+2).x() *= scale;
obj.part(k+3).x() *= scale;
}
}
}
}
static dlib::rectangle parse_roi(const std::string &str)
{
std::vector<std::string> tokens;
drishti::core::tokenize(str, tokens);
float l = std::stof(tokens[0]);
float t = std::stof(tokens[1]);
float r = std::stof(tokens[2]);
float b = std::stof(tokens[3]);
return dlib::drectangle(l, t, r-l, b-t);
}
| 6,887 |
823 | from io import BytesIO
try:
from clickhouse_cityhash.cityhash import CityHash128
except ImportError:
raise RuntimeError(
'Package clickhouse-cityhash is required to use compression'
)
from .native import BlockOutputStream, BlockInputStream
from ..bufferedreader import CompressedBufferedReader
from ..bufferedwriter import CompressedBufferedWriter
from ..compression import get_decompressor_cls
from ..defines import BUFFER_SIZE
from ..reader import read_binary_uint8, read_binary_uint128
from ..writer import write_binary_uint8, write_binary_uint128
class CompressedBlockOutputStream(BlockOutputStream):
def __init__(self, compressor_cls, compress_block_size, fout, context):
self.compressor_cls = compressor_cls
self.compress_block_size = compress_block_size
self.raw_fout = fout
self.compressor = self.compressor_cls()
self.fout = CompressedBufferedWriter(self.compressor, BUFFER_SIZE)
super(CompressedBlockOutputStream, self).__init__(self.fout, context)
def get_compressed_hash(self, data):
return CityHash128(data)
def finalize(self):
self.fout.flush()
compressed = self.get_compressed()
compressed_size = len(compressed)
compressed_hash = self.get_compressed_hash(compressed)
write_binary_uint128(compressed_hash, self.raw_fout)
block_size = self.compress_block_size
i = 0
while i < compressed_size:
self.raw_fout.write(compressed[i:i + block_size])
i += block_size
self.raw_fout.flush()
def get_compressed(self):
compressed = BytesIO()
if self.compressor.method_byte is not None:
write_binary_uint8(self.compressor.method_byte, compressed)
extra_header_size = 1 # method
else:
extra_header_size = 0
data = self.compressor.get_compressed_data(extra_header_size)
compressed.write(data)
return compressed.getvalue()
class CompressedBlockInputStream(BlockInputStream):
def __init__(self, fin, context):
self.raw_fin = fin
fin = CompressedBufferedReader(self.read_block, BUFFER_SIZE)
super(CompressedBlockInputStream, self).__init__(fin, context)
def get_compressed_hash(self, data):
return CityHash128(data)
def read_block(self):
compressed_hash = read_binary_uint128(self.raw_fin)
method_byte = read_binary_uint8(self.raw_fin)
decompressor_cls = get_decompressor_cls(method_byte)
decompressor = decompressor_cls(self.raw_fin)
if decompressor.method_byte is not None:
extra_header_size = 1 # method
else:
extra_header_size = 0
return decompressor.get_decompressed_data(
method_byte, compressed_hash, extra_header_size
)
| 1,141 |
796 | <reponame>ProtectorYT364/BlockLauncher<gh_stars>100-1000
package net.zhuoweizhang.mcpelauncher.texture;
import java.io.*;
import android.graphics.*;
public class PNGImageLoader implements ImageLoader {
public Bitmap load(InputStream is) {
return BitmapFactory.decodeStream(is);
}
public void save(Bitmap bmp, OutputStream os) {
bmp.compress(Bitmap.CompressFormat.PNG, 100, os);
}
}
| 141 |
1,133 | /**
*
* PixelFlow | Copyright (C) 2017 <NAME> - www.thomasdiewald.com
*
* https://github.com/diwi/PixelFlow.git
*
* A Processing/Java library for high performance GPU-Computing.
* MIT License: https://opensource.org/licenses/MIT
*
*/
package Shadertoy.Shadertoy_AbstractCorridor;
import com.jogamp.opengl.GL2;
import com.thomasdiewald.pixelflow.java.DwPixelFlow;
import com.thomasdiewald.pixelflow.java.dwgl.DwGLTexture;
import com.thomasdiewald.pixelflow.java.imageprocessing.DwShadertoy;
import com.thomasdiewald.pixelflow.java.imageprocessing.filter.DwFilter;
import processing.core.PApplet;
import processing.core.PImage;
public class Shadertoy_AbstractCorridor extends PApplet {
//
// Shadertoy Demo: https://www.shadertoy.com/view/MlXSWX
// Shadertoy Author: https://www.shadertoy.com/user/Shane
//
DwPixelFlow context;
DwShadertoy toy;
DwGLTexture tex_0 = new DwGLTexture();
DwGLTexture tex_1 = new DwGLTexture();
public void settings() {
size(1280, 720, P2D);
smooth(0);
}
public void setup() {
surface.setResizable(true);
context = new DwPixelFlow(this);
context.print();
context.printGL();
toy = new DwShadertoy(context, "data/AbstractCorridor.frag");
// load assets
PImage img0 = loadImage("../Shadertoy/Shadertoy_AbstractCorridor/data/Abstract 2.jpg");
PImage img1 = loadImage("../Shadertoy/Shadertoy_AbstractCorridor/data/Wood.jpg");
// create textures
tex_0.resize(context, GL2.GL_RGBA8, img0.width, img0.height, GL2.GL_RGBA, GL2.GL_UNSIGNED_BYTE, GL2.GL_LINEAR, GL2.GL_MIRRORED_REPEAT, 4,1);
tex_1.resize(context, GL2.GL_RGBA8, img1.width, img1.height, GL2.GL_RGBA, GL2.GL_UNSIGNED_BYTE, GL2.GL_LINEAR, GL2.GL_MIRRORED_REPEAT, 4,1);
// copy images to textures
DwFilter.get(context).copy.apply(img0, tex_0);
DwFilter.get(context).copy.apply(img1, tex_1);
// mipmap
DwShadertoy.setTextureFilter(tex_0, DwShadertoy.TexFilter.MIPMAP);
DwShadertoy.setTextureFilter(tex_1, DwShadertoy.TexFilter.MIPMAP);
frameRate(60);
}
public void draw() {
if(mousePressed){
toy.set_iMouse(mouseX, height-1-mouseY, mouseX, height-1-mouseY);
}
toy.set_iChannel(0, tex_0);
toy.set_iChannel(1, tex_1);
toy.apply(this.g);
String txt_fps = String.format(getClass().getSimpleName()+ " [size %d/%d] [frame %d] [fps %6.2f]", width, height, frameCount, frameRate);
surface.setTitle(txt_fps);
}
public static void main(String args[]) {
PApplet.main(new String[] { Shadertoy_AbstractCorridor.class.getName() });
}
} | 1,110 |
382 | <reponame>kevinvandenbreemen/SwiftJava
/// generated by: genswift.java 'java/lang|java/util|java/sql|java/awt|javax/swing' ///
/// interface javax.swing.event.MouseInputListener ///
package org.swiftjava.javax_swing;
@SuppressWarnings("JniMissingFunction")
public class MouseInputListenerProxy implements javax.swing.event.MouseInputListener {
// address of proxy object
long __swiftObject;
MouseInputListenerProxy( long __swiftObject ) {
this.__swiftObject = __swiftObject;
}
/// public abstract void java.awt.event.MouseListener.mouseClicked(java.awt.event.MouseEvent)
public native void __mouseClicked( long __swiftObject, java.awt.event.MouseEvent e );
public void mouseClicked( java.awt.event.MouseEvent e ) {
__mouseClicked( __swiftObject, e );
}
/// public abstract void java.awt.event.MouseMotionListener.mouseDragged(java.awt.event.MouseEvent)
public native void __mouseDragged( long __swiftObject, java.awt.event.MouseEvent e );
public void mouseDragged( java.awt.event.MouseEvent e ) {
__mouseDragged( __swiftObject, e );
}
/// public abstract void java.awt.event.MouseListener.mouseEntered(java.awt.event.MouseEvent)
public native void __mouseEntered( long __swiftObject, java.awt.event.MouseEvent e );
public void mouseEntered( java.awt.event.MouseEvent e ) {
__mouseEntered( __swiftObject, e );
}
/// public abstract void java.awt.event.MouseListener.mouseExited(java.awt.event.MouseEvent)
public native void __mouseExited( long __swiftObject, java.awt.event.MouseEvent e );
public void mouseExited( java.awt.event.MouseEvent e ) {
__mouseExited( __swiftObject, e );
}
/// public abstract void java.awt.event.MouseMotionListener.mouseMoved(java.awt.event.MouseEvent)
public native void __mouseMoved( long __swiftObject, java.awt.event.MouseEvent e );
public void mouseMoved( java.awt.event.MouseEvent e ) {
__mouseMoved( __swiftObject, e );
}
/// public abstract void java.awt.event.MouseListener.mousePressed(java.awt.event.MouseEvent)
public native void __mousePressed( long __swiftObject, java.awt.event.MouseEvent e );
public void mousePressed( java.awt.event.MouseEvent e ) {
__mousePressed( __swiftObject, e );
}
/// public abstract void java.awt.event.MouseListener.mouseReleased(java.awt.event.MouseEvent)
public native void __mouseReleased( long __swiftObject, java.awt.event.MouseEvent e );
public void mouseReleased( java.awt.event.MouseEvent e ) {
__mouseReleased( __swiftObject, e );
}
public native void __finalize( long __swiftObject );
public void finalize() {
__finalize( __swiftObject );
}
}
| 985 |
1,788 | package com.fingerchar.service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.conditions.update.UpdateWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.fingerchar.base.entity.BaseEntity;
import com.fingerchar.base.service.IBaseService;
import com.fingerchar.dao.ext.FcContractNftExtMapper;
import com.fingerchar.domain.FcContractNft;
import com.fingerchar.domain.FcNftItems;
import com.fingerchar.vo.FcContractNftVo;
@Service
public class FcContractNftService {
@Autowired
private IBaseService baseService;
@Autowired
private FcContractNftExtMapper contractNftExtMapper;
@Autowired
private FcCategoryService categoryService;
/**
*
* @param fcContractNft
* @param page
* @param isASC
* @param sortType
* @return
*/
public IPage<FcContractNft> querySelective(FcContractNft fcContractNft, IPage<FcContractNft> page,boolean isASC, String sortType) {
QueryWrapper<FcContractNft> wrapper = new QueryWrapper<>();
if (!StringUtils.isEmpty(fcContractNft.getName())) {
wrapper.like(FcContractNft.NAME,fcContractNft.getName());
}
if (!StringUtils.isEmpty(fcContractNft.getAddress())) {
wrapper.eq(FcContractNft.ADDRESS,fcContractNft.getAddress());
}
if (!StringUtils.isEmpty(fcContractNft.getCreator())) {
wrapper.eq(FcContractNft.CREATOR,fcContractNft.getCreator());
}
wrapper.eq(BaseEntity.DELETED,false);
if (!StringUtils.isEmpty(isASC) && !StringUtils.isEmpty(sortType)) {
wrapper.orderBy(true,isASC,sortType);
}
return baseService.findByPage(FcContractNft.class,wrapper,page);
}
/**
*
* @param fcContractNft
* @param page
* @param isASC
* @param sortType
* @return
*/
public IPage<FcContractNft> querySelective(FcContractNft fcContractNft, IPage<FcContractNft> page,boolean isASC, String sortType, Boolean nftVerify) {
QueryWrapper<FcContractNft> wrapper = new QueryWrapper<>();
if (!StringUtils.isEmpty(fcContractNft.getName())) {
wrapper.like(FcContractNft.NAME,fcContractNft.getName());
}
if (!StringUtils.isEmpty(fcContractNft.getAddress())) {
wrapper.eq(FcContractNft.ADDRESS,fcContractNft.getAddress());
}
if (!StringUtils.isEmpty(fcContractNft.getCreator())) {
wrapper.eq(FcContractNft.CREATOR,fcContractNft.getCreator());
}
//验证是否已通过审核
wrapper.eq(FcContractNft.NFT_VERIFY,nftVerify);
wrapper.eq(BaseEntity.DELETED,false);
if (!StringUtils.isEmpty(isASC) && !StringUtils.isEmpty(sortType)) {
wrapper.orderBy(true,isASC,sortType);
}
return baseService.findByPage(FcContractNft.class,wrapper,page);
}
/**
* 获取nft集合
* @param categoryId 分类id
* @param creator 作者
* @param page 分页
* @param isASC 是否升序
* @param sortType 排列的字段
* @param nftVerify nft认证状态
* @return
*/
public IPage<FcContractNftVo> queryList(String categoryId, String creator, String address, String tokenId, IPage<FcContractNftVo> page, boolean isASC, String sortType, Integer nftVerify) {
QueryWrapper<FcContractNft> wrapper = new QueryWrapper<>();
if (!StringUtils.isEmpty(categoryId)) {
wrapper.eq("nft.category_id",categoryId);
}
if (!StringUtils.isEmpty(creator)) {
wrapper.eq("nft.creator",creator);
}
if (!StringUtils.isEmpty(address)) {
wrapper.eq("nft.address",address);
}
if (!StringUtils.isEmpty(tokenId)) {
wrapper.eq("nft.token_id",tokenId);
}
if (nftVerify != null) {
//验证是否已通过审核
wrapper.eq("nft.nft_verify",nftVerify);
}
wrapper.eq("nft.deleted",false);
if (!StringUtils.isEmpty(isASC) && !StringUtils.isEmpty(sortType)) {
wrapper.orderBy(true,isASC,sortType);
}
IPage<FcContractNftVo> iPage = contractNftExtMapper.getList(page, wrapper);
List<FcContractNftVo> list = iPage.getRecords();
//将页面展示的id取出
List<Long> ids = list.stream().map(vo->vo.getId()).collect(Collectors.toList());
if(ids.isEmpty()) {
return page;
}
QueryWrapper<FcNftItems> itemsWrapper = new QueryWrapper<>();
itemsWrapper.eq(FcNftItems.DELETED, false).in(FcNftItems.NFT_ID, ids);
List<FcNftItems> itemsList = this.baseService.findByCondition(FcNftItems.class, itemsWrapper);
Map<Long, List<FcNftItems>> itemsMap = new HashMap<>();
itemsList.stream().forEach(item-> {
if(null == itemsMap.get(item.getNftId())) {
itemsMap.put(item.getNftId(), new ArrayList<>());
}
itemsMap.get(item.getNftId()).add(item);
});
Map<Long, String> categoryMap = categoryService.idAndNameMap();
List<FcNftItems> tempList;
FcNftItems item;
int itemLen = 0;
for (FcContractNftVo fcContractNftVo : list) {
tempList = itemsMap.get(fcContractNftVo.getId());
itemLen = tempList.size();
for(int i=0; i<itemLen; i++) {
item = tempList.get(i);
if (null != item){
fcContractNftVo.setPrice(item.getPrice());
if(null != item.getOnsell() && item.getOnsell()) {
if(null == fcContractNftVo.getSellQuantity()) {
fcContractNftVo.setSellQuantity(item.getSellQuantity());
} else {
fcContractNftVo.setSellQuantity(fcContractNftVo.getSellQuantity() + item.getSellQuantity());
}
}
}
}
if (null != fcContractNftVo.getCategoryId()) {
fcContractNftVo.setCategoryName(categoryMap.get(fcContractNftVo.getCategoryId()));
}
}
iPage.setRecords(list);
return iPage;
}
/**
* 根据ID更新Verify
*
* @param contract Verify记录
* @return 更新成功返回true,否则返回false
*/
public boolean verifyContract(FcContractNft contract,Integer verify) {
contract.setNftVerify(verify);
contract.setUpdateTime(System.currentTimeMillis()/1000);
Boolean ok = baseService.update(contract) > 0;
if(ok){
QueryWrapper<FcNftItems> wrapper = new QueryWrapper<>();
wrapper.eq(FcNftItems.NFT_ID,contract.getId());
FcNftItems fcNftItem = baseService.getById(FcNftItems.class, contract.getId());
if(!StringUtils.isEmpty(fcNftItem)){
FcNftItems fcNftItems = new FcNftItems();
fcNftItems.setNftId(contract.getId());
fcNftItems.setUpdateTime(System.currentTimeMillis()/1000);
fcNftItems.setNftId(fcNftItem.getNftId());
return baseService.update(fcNftItems) > 0;
}
}
return ok;
}
/**
* 根据ID禁用
*
* @param contract
* @return 更新成功返回true,否则返回false
*/
public boolean disableContract(FcContractNft contract) {
contract.setDeleted(true);
contract.setUpdateTime(System.currentTimeMillis()/1000);
return baseService.update(contract) > 0;
}
/**
* 根据ID启用
*
* @param contract
* @return 更新成功返回true,否则返回false
*/
public boolean enableContract(FcContractNft contract) {
contract.setDeleted(false);
contract.setUpdateTime(System.currentTimeMillis()/1000);
return baseService.update(contract) > 0;
}
/**
* 根据ID启用
*
* @param contract
* @return 更新成功返回true,否则返回false
*/
public boolean editVerify(FcContractNft contract,Integer verify) {
contract.setNftVerify(verify);
contract.setUpdateTime(System.currentTimeMillis()/1000);
return baseService.update(contract) > 0;
}
/**
*findById
* @param id
* @return
*/
public FcContractNft findById(Long id) {
return baseService.getById(FcContractNft.class,id);
}
/**
*
*/
public void setNftVerify() {
UpdateWrapper<FcContractNft> wrapper = new UpdateWrapper<>();
wrapper.eq(FcContractNft.NFT_VERIFY, false);
wrapper.set(FcContractNft.NFT_VERIFY, true);
this.baseService.updateByCondition(FcContractNft.class, wrapper);
}
/**
* 查询所有的类别
*
* */
public List<String> getAllToken(Long staTime){
QueryWrapper<FcContractNft> wrapper = new QueryWrapper<>();
wrapper.ge(FcContractNft.UPDATE_TIME,staTime).lt(FcContractNft.UPDATE_TIME,staTime + 24 * 60 * 60);
return contractNftExtMapper.getAllAddress(wrapper);
}
public FcContractNft getByAddressAndTokenId(String address, String tokenId) {
QueryWrapper<FcContractNft> wrapper = new QueryWrapper<>();
wrapper.eq(FcContractNft.ADDRESS, address)
.eq(FcContractNft.TOKEN_ID, tokenId)
.eq(FcContractNft.IS_SYNC, true)
.eq(BaseEntity.DELETED, false);
return this.baseService.getByCondition(FcContractNft.class, wrapper);
}
}
| 4,628 |
533 | <gh_stars>100-1000
#include "framework/operators/fusion_ops/dense_dense.h"
namespace anakin {
namespace ops {
#define INSTANCE_DENSEDENSE(Ttype, Ptype) \
template<> \
void DenseDense<Ttype, Ptype>::operator()(OpContext<Ttype>& ctx, \
const std::vector<Tensor4dPtr<Ttype> >& ins, \
std::vector<Tensor4dPtr<Ttype> >& outs) { \
auto* impl = static_cast<DenseDenseHelper<Ttype, Ptype>*>(this->_helper); \
auto& param = static_cast<DenseDenseHelper<Ttype, Ptype>*>(this->_helper)->_param_dense_dense; \
SABER_CHECK(impl->_funcs_dense_dense(ins, outs, param, ctx)); \
}
template<typename Ttype, Precision Ptype>
Status DenseDenseHelper<Ttype, Ptype>::InitParam() {
DLOG(WARNING) << "Parsing DenseDense op parameter.";
auto axis_0 = GET_PARAMETER(int, axis);
auto out_dim_0 = GET_PARAMETER_WITH_DEFAULT(int, out_dim,0);
auto bias_term_0 = GET_PARAMETER(bool, bias_term);
//now we only support 2 fc fusion
auto axis_1 = GET_PARAMETER(int, dense_1_axis);
auto out_dim_1 = GET_PARAMETER_WITH_DEFAULT(int, dense_1_out_dim,0);
auto bias_term_1 = GET_PARAMETER(bool, dense_1_bias_term);
using pblock_type = PBlock<Ttype>;
auto weights_0 = GET_PARAMETER(pblock_type, weight_1);
auto weights_1 = GET_PARAMETER(pblock_type, dense_1_weight_1);
auto weights_dtype = weights_0.h_tensor().get_dtype();
bool is_transed_0 = CHECK_PARAMETER(is_weights_transposed);
bool is_transed_1 = CHECK_PARAMETER(dense_0_is_weights_transposed);
pblock_type bias_0;
pblock_type bias_1;
if (bias_term_0){
bias_0 = GET_PARAMETER(pblock_type, weight_2);
if (bias_term_1) {
bias_1 = GET_PARAMETER(pblock_type, dense_1_weight_2);
}
}
if (weights_dtype == AK_FLOAT) {
graph::GraphGlobalMem<Ttype>::Global().template apply<Level_0>(
WeightsFusion<float, Ttype>::update_dense_weights,
weights_0, bias_0, bias_term_0, out_dim_0, is_transed_0,
weights_1, bias_1, bias_term_1, out_dim_1, is_transed_1);
} else {
graph::GraphGlobalMem<Ttype>::Global().template apply<Level_0>(
WeightsFusion<char, Ttype>::update_dense_weights,
weights_0, bias_0, bias_term_0, out_dim_0, is_transed_0,
weights_1, bias_1, bias_term_1, out_dim_1, is_transed_1);
}
if (bias_term_0 || bias_term_1) {
saber::FcParam<Ttype> fc_param(&(weights_0.d_tensor()), &(bias_1.d_tensor()), out_dim_1,
axis_1);
_param_dense_dense = fc_param;
} else {
Tensor4d<Ttype>* bias = nullptr;
saber::FcParam<Ttype> fc_param(&(weights_0.d_tensor()), bias, out_dim_1, axis_1);
_param_dense_dense = fc_param;
}
return Status::OK();
}
template<typename Ttype, Precision Ptype>
Status DenseDenseHelper<Ttype, Ptype>::Init(OpContext<Ttype>& ctx,
const std::vector<Tensor4dPtr<Ttype> >& ins,
std::vector<Tensor4dPtr<Ttype> >& outs) {
SABER_CHECK(_funcs_dense_dense.init(ins, outs, _param_dense_dense, STATIC, SABER_IMPL, ctx));
return Status::OK();
}
#ifdef USE_CUDA
template<>
Status DenseDenseHelper<NV, Precision::INT8>::Init(OpContext<NV>& ctx,
const std::vector<Tensor4dPtr<NV> >& ins,
std::vector<Tensor4dPtr<NV> >& outs) {
SABER_CHECK(_funcs_dense_dense.init(ins, outs, _param_dense_dense, SPECIFY, SABER_IMPL, ctx));
return Status::OK();
}
#endif
template<>
Status DenseDenseHelper<X86, Precision::FP32>::Init(OpContext<X86>& ctx,
const std::vector<Tensor4dPtr<X86> >& ins,
std::vector<Tensor4dPtr<X86> >& outs) {
SABER_CHECK(_funcs_dense_dense.init(ins, outs, _param_dense_dense, SPECIFY, VENDER_IMPL, ctx));
return Status::OK();
}
template<>
Status DenseDenseHelper<X86, Precision::FP16>::Init(OpContext<X86>& ctx,
const std::vector<Tensor4dPtr<X86> >& ins,
std::vector<Tensor4dPtr<X86> >& outs) {
SABER_CHECK(_funcs_dense_dense.init(ins, outs, _param_dense_dense, SPECIFY, VENDER_IMPL, ctx));
return Status::OK();
}
#ifndef USE_SGX
template<>
Status DenseDenseHelper<X86, Precision::INT8>::Init(OpContext<X86>& ctx,
const std::vector<Tensor4dPtr<X86> >& ins,
std::vector<Tensor4dPtr<X86> >& outs) {
SABER_CHECK(_funcs_dense_dense.init(ins, outs, _param_dense_dense, SPECIFY, VENDER_IMPL, ctx));
return Status::OK();
}
#endif
template<typename Ttype, Precision Ptype>
Status DenseDenseHelper<Ttype, Ptype>::InferShape(const std::vector<Tensor4dPtr<Ttype> >&
ins,
std::vector<Tensor4dPtr<Ttype> >& outs) {
SABER_CHECK(_funcs_dense_dense.compute_output_shape(ins, outs, _param_dense_dense));
return Status::OK();
}
#ifdef USE_CUDA
INSTANCE_DENSEDENSE(NV, Precision::FP32);
INSTANCE_DENSEDENSE(NV, Precision::INT8);
template class DenseDenseHelper<NV, Precision::FP32>;
ANAKIN_REGISTER_OP_HELPER(DenseDense, DenseDenseHelper, NV, Precision::FP32);
ANAKIN_REGISTER_OP_HELPER(DenseDense, DenseDenseHelper, NV, Precision::INT8);
template class DenseDenseHelper<NV, Precision::FP16>;
template class DenseDenseHelper<NV, Precision::INT8>;
#endif
#ifdef USE_ARM_PLACE
INSTANCE_DENSEDENSE(ARM, Precision::FP32);
INSTANCE_DENSEDENSE(ARM, Precision::INT8);
template<>
Status DenseDenseHelper<ARM, Precision::FP32>::Init(OpContext<ARM> &ctx,\
const std::vector<Tensor4dPtr<ARM> >& ins, \
std::vector<Tensor4dPtr<ARM> >& outs) {
SABER_CHECK(_funcs_dense_dense.init(ins, outs, _param_dense_dense, SPECIFY, SABER_IMPL, ctx));
return Status::OK();
}
template<>
Status DenseDenseHelper<ARM, Precision::INT8>::Init(OpContext<ARM> &ctx,\
const std::vector<Tensor4dPtr<ARM> >& ins, \
std::vector<Tensor4dPtr<ARM> >& outs) {
SABER_CHECK(_funcs_dense_dense.init(ins, outs, _param_dense_dense, SPECIFY, SABER_IMPL, ctx));
return Status::OK();
}
ANAKIN_REGISTER_OP_HELPER(DenseDense, DenseDenseHelper, ARM, Precision::FP32);
ANAKIN_REGISTER_OP_HELPER(DenseDense, DenseDenseHelper, ARM, Precision::INT8);
#endif
#if defined USE_X86_PLACE || defined BUILD_LITE
INSTANCE_DENSEDENSE(X86, Precision::FP32);
template class DenseDenseHelper<X86, Precision::FP32>;
ANAKIN_REGISTER_OP_HELPER(DenseDense, DenseDenseHelper, X86, Precision::FP32);
#ifndef USE_SGX
INSTANCE_DENSEDENSE(X86, Precision::INT8);
template class DenseDenseHelper<X86, Precision::INT8>;
ANAKIN_REGISTER_OP_HELPER(DenseDense, DenseDenseHelper, X86, Precision::INT8);
#endif
#endif
#ifdef AMD_GPU
INSTANCE_DENSEDENSE(AMD, Precision::FP32);
template<>
Status DenseDenseHelper<AMD, Precision::FP32>::Init(OpContext<AMD> &ctx,\
const std::vector<Tensor4dPtr<AMD> >& ins, \
std::vector<Tensor4dPtr<AMD> >& outs) {
SABER_CHECK(_funcs_dense_dense.init(ins, outs, _param_dense_dense, SPECIFY, VENDER_IMPL, ctx));
return Status::OK();
}
ANAKIN_REGISTER_OP_HELPER(DenseDense, DenseDenseHelper, AMD, Precision::FP32);
#endif
//! register op
ANAKIN_REGISTER_OP(DenseDense)
.Doc("DenseDense operator")
#ifdef USE_CUDA
.__alias__<NV, Precision::FP32>("fullconnect")
.__alias__<NV, Precision::FP32>("fc")
.__alias__<NV, Precision::INT8>("fc")
#endif
#ifdef USE_ARM_PLACE
.__alias__<ARM, Precision::FP32>("fullconnect")
.__alias__<ARM, Precision::FP32>("fc")
.__alias__<ARM, Precision::INT8>("fc")
#endif
#if defined USE_X86_PLACE || defined BUILD_LITE
.__alias__<X86, Precision::FP32>("fullconnect")
.__alias__<X86, Precision::FP32>("fc")
#endif
#ifdef AMD_GPU
.__alias__<AMD, Precision::FP32>("fullconnect")
.__alias__<AMD, Precision::FP32>("fc")
#endif
.num_in(1)
.num_out(1)
.Args<int>("axis", " axis to compute ")
.Args<int>("out_dim", " out dim ")
.Args<bool>("bias_term", " whether fc weights have bias");
} /* namespace ops */
} /* namespace anakin */
| 3,746 |
49,742 | package com.thealgorithms.datastructures.queues;
/**
* A [deque](https://en.wikipedia.org/wiki/Double-ended_queue) is short for a
* double ended queue pronounced "deck" and sometimes referred to as a head-tail
* linked list. A deque is a data structure based on a doubly linked list, but
* only supports adding and removal of nodes from the beginning and the end of
* the list.
*
* @author [<NAME>](https://github.com/iccowan)
*/
public class Deques<T> {
/**
* Node for the deque
*/
class DequeNode<S> {
/**
* Value of the node
*/
S val;
/**
* Next node in the deque from this node
*/
DequeNode<S> next = null;
/**
* Previous node in the deque from this node
*/
DequeNode<S> prev = null;
/**
* Constructor
*/
DequeNode(S val) {
this.val = val;
}
}
/**
* Head of the deque
*/
DequeNode<T> head = null;
/**
* Tail of the deque
*/
DequeNode<T> tail = null;
/**
* Size of the deque
*/
int size = 0;
/**
* Adds the specified value to the head of the deque
*
* @param val Value to add to the deque
*/
public void addFirst(T val) {
// Create a new node with the given value
DequeNode<T> newNode = new DequeNode<T>(val);
// Add the node
if (head == null) {
// If the deque is empty, add the node as the head and tail
head = newNode;
tail = newNode;
} else {
// If the deque is not empty, insert the node as the new head
newNode.next = head;
head.prev = newNode;
head = newNode;
}
size++;
}
/**
* Adds the specified value to the tail of the deque
*
* @param val Value to add to the deque
*/
public void addLast(T val) {
// Create a new node with the given value
DequeNode<T> newNode = new DequeNode<T>(val);
// Add the node
if (tail == null) {
// If the deque is empty, add the node as the head and tail
head = newNode;
tail = newNode;
} else {
// If the deque is not empty, insert the node as the new tail
newNode.prev = tail;
tail.next = newNode;
tail = newNode;
}
size++;
}
/**
* Removes and returns the first (head) value in the deque
*
* @return the value of the head of the deque
*/
public T pollFirst() {
// If the head is null, return null
if (head == null) {
return null;
}
// First, let's get the value of the old head
T oldHeadVal = head.val;
// Now, let's remove the head
if (head == tail) {
// If there is only one node, remove it
head = null;
tail = null;
} else {
// If there is more than one node, fix the references
head.next.prev = null;
DequeNode<T> oldHead = head;
head = head.next;
// Can be considered unnecessary...
// Unlinking the old head to make sure there are no random
// references possibly affecting garbage collection
oldHead.next = null;
}
size--;
return oldHeadVal;
}
/**
* Removes and returns the last (tail) value in the deque
*
* @return the value of the tail of the deque
*/
public T pollLast() {
// If the tail is null, return null
if (tail == null) {
return null;
}
// Let's get the value of the old tail
T oldTailVal = tail.val;
// Now, remove the tail
if (head == tail) {
// If there is only one node, remove it
head = null;
tail = null;
} else {
// If there is more than one node, fix the references
tail.prev.next = null;
DequeNode<T> oldTail = tail;
tail = tail.prev;
// Similarly to above, can be considered unnecessary
// See `pollFirst()` for explanation
oldTail.prev = null;
}
size--;
return oldTailVal;
}
/**
* Returns the first (head) value of the deque WITHOUT removing
*
* @return the value of the head of the deque
*/
public T peekFirst() {
return head.val;
}
/**
* Returns the last (tail) value of the deque WITHOUT removing
*
* @return the value of the tail of the deque
*/
public T peekLast() {
return tail.val;
}
/**
* Returns the size of the deque
*
* @return the size of the deque
*/
public int size() {
return size;
}
/**
* Returns whether or not the deque is empty
*
* @return whether or not the deque is empty
*/
public boolean isEmpty() {
return head == null;
}
/**
* Returns a stringified deque in a pretty form:
*
* <p>
* Head -> 1 <-> 2 <-> 3 <- Tail
*
* @return the stringified deque
*/
@Override
public String toString() {
String dequeString = "Head -> ";
DequeNode<T> currNode = head;
while (currNode != null) {
dequeString += currNode.val;
if (currNode.next != null) {
dequeString += " <-> ";
}
currNode = currNode.next;
}
dequeString += " <- Tail";
return dequeString;
}
public static void main(String[] args) {
Deques<Integer> myDeque = new Deques<Integer>();
for (int i = 0; i < 42; i++) {
if (i / 42.0 < 0.5) {
myDeque.addFirst(i);
} else {
myDeque.addLast(i);
}
}
System.out.println(myDeque);
System.out.println("Size: " + myDeque.size());
System.out.println();
myDeque.pollFirst();
myDeque.pollFirst();
myDeque.pollLast();
System.out.println(myDeque);
System.out.println("Size: " + myDeque.size());
System.out.println();
int dequeSize = myDeque.size();
for (int i = 0; i < dequeSize; i++) {
int removing = -1;
if (i / 39.0 < 0.5) {
removing = myDeque.pollFirst();
} else {
removing = myDeque.pollLast();
}
System.out.println("Removing: " + removing);
}
System.out.println(myDeque);
System.out.println(myDeque.size());
}
}
| 3,208 |
332 | package com.alibaba.datax.core.job.scheduler.processinner;
import com.alibaba.datax.common.exception.DataXException;
import com.alibaba.datax.common.util.Configuration;
import com.alibaba.datax.core.job.scheduler.AbstractScheduler;
import com.alibaba.datax.core.statistics.container.communicator.AbstractContainerCommunicator;
import com.alibaba.datax.core.taskgroup.TaskGroupContainer;
import com.alibaba.datax.core.taskgroup.runner.TaskGroupContainerRunner;
import com.alibaba.datax.core.util.FrameworkErrorCode;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
public abstract class ProcessInnerScheduler extends AbstractScheduler {
private ExecutorService taskGroupContainerExecutorService;
private ThreadPoolExecutor threadPoolExecutor;
private List<TaskGroupContainer> taskGroupContainers = new ArrayList<>();
ProcessInnerScheduler(AbstractContainerCommunicator containerCommunicator) {
super(containerCommunicator);
}
@Override
public void startAllTaskGroup(List<Configuration> configurations) {
this.taskGroupContainerExecutorService = Executors
.newFixedThreadPool(configurations.size());
for (Configuration taskGroupConfiguration : configurations) {
TaskGroupContainerRunner taskGroupContainerRunner = newTaskGroupContainerRunner(taskGroupConfiguration);
//store task group containers
taskGroupContainers.add(taskGroupContainerRunner.getTaskGroupContainer());
this.taskGroupContainerExecutorService.execute(taskGroupContainerRunner);
}
this.taskGroupContainerExecutorService.shutdown();
}
@Override
public void dealFailedStat(AbstractContainerCommunicator frameworkCollector, Throwable throwable) {
this.taskGroupContainerExecutorService.shutdownNow();
throw DataXException.asDataXException(
FrameworkErrorCode.PLUGIN_RUNTIME_ERROR, throwable);
}
@Override
public void dealKillingStat(AbstractContainerCommunicator frameworkCollector, int totalTasks) {
this.taskGroupContainerExecutorService.shutdownNow();
}
@Override
protected void cancelAllTaskGroup() {
//shutdown each task group container
taskGroupContainers.forEach(TaskGroupContainer::shutdown);
//then to close the thread pool
this.taskGroupContainerExecutorService.shutdownNow();
}
@Override
protected void adjustTaskGroupSpeed(long byteSpeed, long recordSpeed) {
taskGroupContainers.forEach(taskGroupContainer -> taskGroupContainer.adjustSpeed(byteSpeed, recordSpeed));
}
private TaskGroupContainerRunner newTaskGroupContainerRunner(
Configuration configuration) {
TaskGroupContainer taskGroupContainer = new TaskGroupContainer(configuration);
return new TaskGroupContainerRunner(taskGroupContainer);
}
}
| 966 |
13,846 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
package com.microsoft.signalr;
/**
* A callback that takes no parameters.
*/
public interface Action {
// We can't use the @FunctionalInterface annotation because it's only
// available on Android API Level 24 and above.
void invoke();
}
| 102 |
1,848 |
struct S2 {
unsigned : 11
};
| 15 |
1,355 | // Copyright (c) 2018 <NAME>
//
// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.
// Jet public headers
#include <jet/logging.h>
#include <jet/macros.h>
#ifdef JET_WINDOWS
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# ifndef NOMINMAX
# define NOMINMAX
# endif
# include <objbase.h>
#endif
// Jet private headers
#include <private_helpers.h>
| 210 |
711 | package com.java110.community.listener.inspectionPlanStaff;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.java110.community.dao.IInspectionPlanStaffServiceDao;
import com.java110.core.annotation.Java110Listener;
import com.java110.core.context.DataFlowContext;
import com.java110.core.factory.GenerateCodeFactory;
import com.java110.entity.center.Business;
import com.java110.po.inspection.InspectionPlanStaffPo;
import com.java110.utils.constant.BusinessTypeConstant;
import com.java110.utils.constant.StatusConstant;
import com.java110.utils.util.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.transaction.annotation.Transactional;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* 保存 执行计划人信息 侦听
* Created by wuxw on 2018/5/18.
*/
@Java110Listener("saveInspectionPlanStaffInfoListener")
@Transactional
public class SaveInspectionPlanStaffInfoListener extends AbstractInspectionPlanStaffBusinessServiceDataFlowListener {
private static Logger logger = LoggerFactory.getLogger(SaveInspectionPlanStaffInfoListener.class);
@Autowired
private IInspectionPlanStaffServiceDao inspectionPlanStaffServiceDaoImpl;
@Override
public int getOrder() {
return 0;
}
@Override
public String getBusinessTypeCd() {
return BusinessTypeConstant.BUSINESS_TYPE_SAVE_PLAN_STAFF;
}
/**
* 保存执行计划人信息 business 表中
*
* @param dataFlowContext 数据对象
* @param business 当前业务对象
*/
@Override
protected void doSaveBusiness(DataFlowContext dataFlowContext, Business business) {
JSONObject data = business.getDatas();
Assert.notEmpty(data, "没有datas 节点,或没有子节点需要处理");
//处理 businessInspectionPlanStaff 节点
if (data.containsKey(InspectionPlanStaffPo.class.getSimpleName())) {
Object bObj = data.get(InspectionPlanStaffPo.class.getSimpleName());
JSONArray businessInspectionPlanStaffs = null;
if (bObj instanceof JSONObject) {
businessInspectionPlanStaffs = new JSONArray();
businessInspectionPlanStaffs.add(bObj);
} else {
businessInspectionPlanStaffs = (JSONArray) bObj;
}
//JSONObject businessInspectionPlanStaff = data.getJSONObject("businessInspectionPlanStaff");
for (int bInspectionPlanStaffIndex = 0; bInspectionPlanStaffIndex < businessInspectionPlanStaffs.size(); bInspectionPlanStaffIndex++) {
JSONObject businessInspectionPlanStaff = businessInspectionPlanStaffs.getJSONObject(bInspectionPlanStaffIndex);
doBusinessInspectionPlanStaff(business, businessInspectionPlanStaff);
if (bObj instanceof JSONObject) {
dataFlowContext.addParamOut("ipStaffId", businessInspectionPlanStaff.getString("ipStaffId"));
}
}
}
}
/**
* business 数据转移到 instance
*
* @param dataFlowContext 数据对象
* @param business 当前业务对象
*/
@Override
protected void doBusinessToInstance(DataFlowContext dataFlowContext, Business business) {
JSONObject data = business.getDatas();
Map info = new HashMap();
info.put("bId", business.getbId());
info.put("operate", StatusConstant.OPERATE_ADD);
//执行计划人信息
List<Map> businessInspectionPlanStaffInfo = inspectionPlanStaffServiceDaoImpl.getBusinessInspectionPlanStaffInfo(info);
if (businessInspectionPlanStaffInfo != null && businessInspectionPlanStaffInfo.size() > 0) {
reFreshShareColumn(info, businessInspectionPlanStaffInfo.get(0));
inspectionPlanStaffServiceDaoImpl.saveInspectionPlanStaffInfoInstance(info);
if (businessInspectionPlanStaffInfo.size() == 1) {
dataFlowContext.addParamOut("ipStaffId", businessInspectionPlanStaffInfo.get(0).get("ip_staff_id"));
}
}
}
/**
* 刷 分片字段
*
* @param info 查询对象
* @param businessInfo 小区ID
*/
private void reFreshShareColumn(Map info, Map businessInfo) {
if (info.containsKey("communityId")) {
return;
}
if (!businessInfo.containsKey("community_id")) {
return;
}
info.put("communityId", businessInfo.get("community_id"));
}
/**
* 撤单
*
* @param dataFlowContext 数据对象
* @param business 当前业务对象
*/
@Override
protected void doRecover(DataFlowContext dataFlowContext, Business business) {
String bId = business.getbId();
//Assert.hasLength(bId,"请求报文中没有包含 bId");
Map info = new HashMap();
info.put("bId", bId);
info.put("statusCd", StatusConstant.STATUS_CD_VALID);
Map paramIn = new HashMap();
paramIn.put("bId", bId);
paramIn.put("statusCd", StatusConstant.STATUS_CD_INVALID);
//执行计划人信息
List<Map> inspectionPlanStaffInfo = inspectionPlanStaffServiceDaoImpl.getInspectionPlanStaffInfo(info);
if (inspectionPlanStaffInfo != null && inspectionPlanStaffInfo.size() > 0) {
reFreshShareColumn(paramIn, inspectionPlanStaffInfo.get(0));
inspectionPlanStaffServiceDaoImpl.updateInspectionPlanStaffInfoInstance(paramIn);
}
}
/**
* 处理 businessInspectionPlanStaff 节点
*
* @param business 总的数据节点
* @param businessInspectionPlanStaff 执行计划人节点
*/
private void doBusinessInspectionPlanStaff(Business business, JSONObject businessInspectionPlanStaff) {
Assert.jsonObjectHaveKey(businessInspectionPlanStaff, "ipStaffId", "businessInspectionPlanStaff 节点下没有包含 ipStaffId 节点");
if (businessInspectionPlanStaff.getString("ipStaffId").startsWith("-")) {
//刷新缓存
//flushInspectionPlanStaffId(business.getDatas());
businessInspectionPlanStaff.put("ipStaffId", GenerateCodeFactory.getGeneratorId(GenerateCodeFactory.CODE_PREFIX_ipStaffId));
}
businessInspectionPlanStaff.put("bId", business.getbId());
businessInspectionPlanStaff.put("operate", StatusConstant.OPERATE_ADD);
//保存执行计划人信息
inspectionPlanStaffServiceDaoImpl.saveBusinessInspectionPlanStaffInfo(businessInspectionPlanStaff);
}
public IInspectionPlanStaffServiceDao getInspectionPlanStaffServiceDaoImpl() {
return inspectionPlanStaffServiceDaoImpl;
}
public void setInspectionPlanStaffServiceDaoImpl(IInspectionPlanStaffServiceDao inspectionPlanStaffServiceDaoImpl) {
this.inspectionPlanStaffServiceDaoImpl = inspectionPlanStaffServiceDaoImpl;
}
}
| 2,910 |
4,262 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.processor;
import org.apache.camel.AsyncProcessor;
import org.apache.camel.AsyncProducer;
import org.apache.camel.CamelContext;
import org.apache.camel.Channel;
import org.apache.camel.Endpoint;
import org.apache.camel.Processor;
import org.apache.camel.Producer;
import org.apache.camel.Route;
import org.apache.camel.impl.engine.CamelInternalProcessor;
import org.apache.camel.impl.engine.DefaultChannel;
import org.apache.camel.impl.engine.SharedCamelInternalProcessor;
import org.apache.camel.spi.InterceptSendToEndpoint;
import org.apache.camel.spi.InternalProcessor;
import org.apache.camel.spi.InternalProcessorFactory;
import org.apache.camel.spi.SharedInternalProcessor;
import org.apache.camel.spi.UnitOfWork;
import org.apache.camel.spi.annotations.JdkService;
@JdkService(InternalProcessorFactory.FACTORY)
public class DefaultInternalProcessorFactory implements InternalProcessorFactory {
public InternalProcessor addUnitOfWorkProcessorAdvice(CamelContext camelContext, Processor processor, Route route) {
CamelInternalProcessor internal = new CamelInternalProcessor(camelContext, processor);
internal.addAdvice(new CamelInternalProcessor.UnitOfWorkProcessorAdvice(route, camelContext));
return internal;
}
public InternalProcessor addChildUnitOfWorkProcessorAdvice(
CamelContext camelContext, Processor processor, Route route, UnitOfWork parent) {
CamelInternalProcessor internal = new CamelInternalProcessor(camelContext, processor);
internal.addAdvice(new CamelInternalProcessor.ChildUnitOfWorkProcessorAdvice(route, camelContext, parent));
return internal;
}
public SharedInternalProcessor createSharedCamelInternalProcessor(CamelContext camelContext) {
return new SharedCamelInternalProcessor(
camelContext, new CamelInternalProcessor.UnitOfWorkProcessorAdvice(null, camelContext));
}
public Channel createChannel(CamelContext camelContext) {
return new DefaultChannel(camelContext);
}
public AsyncProducer createInterceptSendToEndpointProcessor(
InterceptSendToEndpoint endpoint, Endpoint delegate, AsyncProducer producer, boolean skip) {
return new InterceptSendToEndpointProcessor(endpoint, delegate, producer, skip);
}
public AsyncProcessor createWrapProcessor(Processor processor, Processor wrapped) {
return new WrapProcessor(processor, wrapped);
}
public AsyncProducer createUnitOfWorkProducer(Producer producer) {
return new UnitOfWorkProducer(producer);
}
}
| 1,024 |
428 | <reponame>cping/LGame<gh_stars>100-1000
/**
* Copyright 2008 - 2019 The Loon Game Engine Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* @project loon
* @author cping
* @email:<EMAIL>
* @version 0.5
*/
package org.test;
import loon.Screen;
import loon.Stage;
import loon.action.sprite.effect.TextEffect;
import loon.canvas.LColor;
import loon.events.FrameLoopEvent;
import loon.utils.MathUtils;
public class TextEffectTest extends Stage {
@Override
public void create() {
// 构建一个文字效果
final TextEffect text = new TextEffect();
String[] messages = { "学英语", "好大的邪恶", "这屏幕又大又圆", "坚强", "我变色了", "我是什么颜色?", "绿了,绿了", "兄弟挺住", "有人看着看着就开了浏览器",
"敬你是条汉子", "爱是一道绿光", "力量", "一回生,二回熟,慢慢就习惯了", "头上长着青青草原", "人才", "是个狼灭", "这谁顶的住啊?", "楼歪了", "我整个人都歪了",
"要想生活过得去,就得头上……", "看热闹不嫌事大", "哈哈哈哈", "赐予你力量", "你们还有没有人性", "爱的力量", "共同开发", "舔狗没出路" };
for (int i = 0; i < messages.length; i++) {
String message = messages[i];
LColor color = i % 2 == 0 ? LColor.white : LColor.red;
if (message.indexOf("变色") != -1 || message.indexOf("力量") != -1 || message.indexOf('绿') != -1
|| message.indexOf("头上") != -1) {
color = LColor.green;
}
// 注入文字
text.addText(messages[i], color, MathUtils.random(getHalfWidth(), getWidth() - 20),
MathUtils.random(20, getHeight() - 40), 60f,
message.indexOf('歪') != -1 ? MathUtils.random(0, 45) : 0, message.indexOf('大') != -1 ? 1.2f : 1f,
MathUtils.random(-1f, -5f), 0f);
}
// 注入Effect到Screen
add(text);
// 让Screen内置一个间隔0的循环事件
loop(0, new FrameLoopEvent() {
@Override
public void invoke(long elapsedTime, Screen e) {
if (text.isCompleted()) {
text.reset();
}
}
@Override
public void completed() {
}
});
add(MultiScreenTest.getBackButton(this, 2));
}
}
| 1,186 |
6,181 | //=====================================================================
// Copyright 2019 (c), Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
//=====================================================================
#ifndef CMP_MATH_VEC4_H
#define CMP_MATH_VEC4_H
//====================================================
// Vector Class definitions for CPU & Intrinsics
//====================================================
#if defined(_LINUX) || defined(_WIN32)
//============================================= VEC2
//==================================================
template <class T>
class Vec2
{
public:
T x;
T y;
// *****************************************
// Constructors
// *****************************************
/// Default constructor
Vec2() : x((T)0), y((T)0){};
/// Value constructor
Vec2(const T &vx, const T &vy) : x(vx), y(vy){};
/// Copy constructor
Vec2(const Vec2<T> &val) : x(val.x), y(val.y){};
/// Single value constructor. Sets all components to the given value
Vec2(const T &v) : x(v), y(v){};
// *****************************************
// Conversions/Assignment/Indexing
// *****************************************
/// cast to T*
operator const T *() const { return (const T *)this; };
/// cast to T*
operator T *() { return (T *)this; };
/// Indexing
const T &operator[](int i) const { return ((const T *)this)[i]; };
T &operator[](int i) { return ((T *)this)[i]; };
/// Assignment
const Vec2<T> &operator=(const Vec2<T> &rhs)
{
x = rhs.x;
y = rhs.y;
return *this;
};
// *****************************************
// Comparison
// *****************************************
/// Equality comparison
bool operator==(const Vec2<T> &rhs) const { return (x == rhs.x && y == rhs.y); };
/// Inequality comparision
bool operator!=(const Vec2<T> &rhs) const { return (x != rhs.x || y != rhs.y); };
// *****************************************
// Arithmetic
// *****************************************
/// Addition
const Vec2<T> operator+(const Vec2<T> &rhs) const { return Vec2<T>(x + rhs.x, y + rhs.y); };
/// Subtraction
const Vec2<T> operator-(const Vec2<T> &rhs) const { return Vec2<T>(x - rhs.x, y - rhs.y); };
/// Multiply by scalar
const Vec2<T> operator*(const T &v) const { return Vec2<T>(x * v, y * v); };
/// Divide by scalar
const Vec2<T> operator/(const T &v) const { return Vec2<T>(x / v, y / v); };
/// Addition in-place
Vec2<T> &operator+=(const Vec2<T> &rhs)
{
x += rhs.x;
y += rhs.y;
return *this;
};
/// Subtract in-place
Vec2<T> &operator-=(const Vec2<T> &rhs)
{
x -= rhs.x;
y -= rhs.y;
return *this;
};
/// Scalar multiply in-place
Vec2<T> &operator*=(const T &v)
{
x *= v;
y *= v;
return *this;
};
/// Scalar divide in-place
Vec2<T> &operator/=(const T &v)
{
x /= v;
y /= v;
return *this;
};
};
typedef Vec2<float> CMP_Vec2f;
typedef Vec2<float> CGU_Vec2f;
typedef Vec2<float> CGV_Vec2f;
typedef Vec2<double> CMP_Vec2d;
typedef Vec2<int> CMP_Vec2i;
//}
//============================================= VEC3
//==================================================
template <class T>
class Vec3
{
public:
T x;
T y;
T z;
// *****************************************
// Constructors
// *****************************************
/// Default constructor
Vec3() : x((T)0), y((T)0), z((T)0){};
/// Value constructor
Vec3(const T &vx, const T &vy, const T &vz) : x(vx), y(vy), z(vz){};
/// Copy constructor
Vec3(const Vec3<T> &val) : x(val.x), y(val.y), z(val.z){};
/// Single value constructor. Sets all components to the given value
Vec3(const T &v) : x(v), y(v), z(v){};
/// Array constructor. Assumes a 3-component array
Vec3(const T *v) : x(v[0]), y(v[1]), z(v[2]){};
// *****************************************
// Conversions/Assignment/Indexing
// *****************************************
/// cast to T*
operator const T *() const { return (const T *)this; };
/// cast to T*
operator T *() { return (T *)this; };
/// Assignment
const Vec3<T> &operator=(const Vec3<T> &rhs)
{
x = rhs.x;
y = rhs.y;
z = rhs.z;
return *this;
};
// *****************************************
// Comparison
// *****************************************
/// Equality comparison
bool operator==(const Vec3<T> &rhs) const { return (x == rhs.x && y == rhs.y && z == rhs.z); };
/// Inequality comparision
bool operator!=(const Vec3<T> &rhs) const { return (x != rhs.x || y != rhs.y || z != rhs.z); };
// *****************************************
// Arithmetic
// *****************************************
/// Addition
const Vec3<T> operator+(const Vec3<T> &rhs) const
{
return Vec3<T>(x + rhs.x, y + rhs.y, z + rhs.z);
};
/// Subtraction
const Vec3<T> operator-(const Vec3<T> &rhs) const
{
return Vec3<T>(x - rhs.x, y - rhs.y, z - rhs.z);
};
/// Multiply by scalar
const Vec3<T> operator*(const T &v) const { return Vec3<T>(x * v, y * v, z * v); };
/// Divide by scalar
const Vec3<T> operator/(const T &v) const { return Vec3<T>(x / v, y / v, z / v); };
/// Divide by vector
const Vec3<T> operator/(const Vec3<T> &rhs) const
{
return Vec3<T>(x / rhs.x, y / rhs.y, z / rhs.z);
};
/// Addition in-place
Vec3<T> &operator+=(const Vec3<T> &rhs)
{
x += rhs.x;
y += rhs.y;
z += rhs.z;
return *this;
};
/// Subtract in-place
Vec3<T> &operator-=(const Vec3<T> &rhs)
{
x -= rhs.x;
y -= rhs.y;
z -= rhs.z;
return *this;
};
/// Scalar multiply in-place
Vec3<T> &operator*=(const T &v)
{
x *= v;
y *= v;
z *= v;
return *this;
};
/// Scalar divide in-place
Vec3<T> &operator/=(const T &v)
{
x /= v;
y /= v;
z /= v;
return *this;
};
};
typedef Vec3<float> CGU_Vec3f;
typedef Vec3<float> CGV_Vec3f;
typedef Vec3<unsigned char> CGU_Vec3uc;
typedef Vec3<unsigned char> CGV_Vec3uc;
typedef Vec3<float> CMP_Vec3f;
typedef Vec3<double> CMP_Vec3d;
typedef Vec3<int> CMP_Vec3i;
typedef Vec3<unsigned char> CMP_Vec3uc;
//============================================= VEC4
//==================================================
template <class T>
class Vec4
{
public:
T x;
T y;
T z;
T w;
// *****************************************
// Constructors
// *****************************************
/// Default constructor
Vec4() : x((T)0), y((T)0), z((T)0), w((T)0){};
/// Value constructor
Vec4(const T &vx, const T &vy, const T &vz, const T &vw) : x(vx), y(vy), z(vz), w(vw){};
/// Copy constructor
Vec4(const Vec4<T> &val) : x(val.x), y(val.y), z(val.z), w(val.w){};
/// Single value constructor. Sets all components to the given value
Vec4(const T &v) : x(v), y(v), z(v), w(v){};
/// Array constructor. Assumes a 4-component array
Vec4(const T *v) : x(v[0]), y(v[1]), z(v[2]), w(v[3]){};
// *****************************************
// Conversions/Assignment/Indexing
// *****************************************
/// cast to T*
operator const T *() const { return (const T *)this; };
/// cast to T*
operator T *() { return (T *)this; };
/// Assignment
const Vec4<T> &operator=(const Vec4<T> &rhs)
{
x = rhs.x;
y = rhs.y;
z = rhs.z;
w = rhs.w;
return *this;
};
// *****************************************
// Comparison
// *****************************************
/// Equality comparison
bool operator==(const Vec4<T> &rhs) const
{
return (x == rhs.x && y == rhs.y && z == rhs.z && w == rhs.w);
};
/// Inequality comparision
bool operator!=(const Vec4<T> &rhs) const
{
return (x != rhs.x || y != rhs.y || z != rhs.z || w != rhs.w);
};
// *****************************************
// Arithmetic
// *****************************************
/// Addition
const Vec4<T> operator+(const Vec4<T> &rhs) const
{
return Vec4<T>(x + rhs.x, y + rhs.y, z + rhs.z, w + rhs.w);
};
/// Subtraction
const Vec4<T> operator-(const Vec4<T> &rhs) const
{
return Vec4<T>(x - rhs.x, y - rhs.y, z - rhs.z, w - rhs.w);
};
/// Multiply by scalar
const Vec4<T> operator*(const T &v) const { return Vec4<T>(x * v, y * v, z * v, w * v); };
/// Divide by scalar
const Vec4<T> operator/(const T &v) const { return Vec4<T>(x / v, y / v, z / v, w / v); };
/// Divide by vector
const Vec4<T> operator/(const Vec4<T> &rhs) const
{
return Vec4<T>(x / rhs.x, y / rhs.y, z / rhs.z, w / rhs.w);
};
/// Addition in-place
Vec4<T> &operator+=(const Vec4<T> &rhs)
{
x += rhs.x;
y += rhs.y;
z += rhs.z;
w += rhs.w;
return *this;
};
/// Subtract in-place
Vec4<T> &operator-=(const Vec4<T> &rhs)
{
x -= rhs.x;
y -= rhs.y;
z -= rhs.z;
w -= rhs.w;
return *this;
};
/// Scalar multiply in-place
Vec4<T> &operator*=(const T &v)
{
x *= v;
y *= v;
z *= v;
w *= v;
return *this;
};
/// Scalar divide in-place
Vec4<T> &operator/=(const T &v)
{
x /= v;
y /= v;
z /= v;
w /= v;
return *this;
};
};
#include <float.h>
#include <math.h>
#include <stdio.h>
typedef Vec4<float> CMP_Vec4f;
typedef Vec4<double> CMP_Vec4d;
typedef Vec4<int> CMP_Vec4i;
typedef Vec4<unsigned int> CMP_Vec4ui; // unsigned 16 bit x,y,x,w
typedef Vec4<unsigned char> CMP_Vec4uc; // unsigned 8 bit x,y,x,w
typedef Vec4<unsigned char> CGU_Vec4uc; // unsigned 8 bit x,y,x,w
typedef Vec4<unsigned char> CGV_Vec4uc; // unsigned 8 bit x,y,x,w
#endif // not ASPM_GPU
#endif // Header Guard
| 4,107 |
5,813 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.segment.column;
import org.apache.druid.query.filter.ColumnIndexSelector;
/**
* Sort of like {@link ColumnCapabilities}, except for indexes supplied by {@link ColumnIndexSelector}, provides
* information for how query processing may use indexes.
*/
public interface ColumnIndexCapabilities
{
/**
* Indicates if an index can be inverted for use with a 'NOT' filter. Some types of indexes may not be invertible,
* such as those which provide false positive matches.
*/
boolean isInvertible();
/**
* Indicates if an index is an exact match, or should also be post-filtered with a value matcher. Filters which
* are not an exact match must always use a value matcher as a post-filter, even if they have an index.
*/
boolean isExact();
ColumnIndexCapabilities merge(ColumnIndexCapabilities other);
}
| 433 |
3,897 | <reponame>mcheah-bose/mbed-os<filename>connectivity/FEATURE_BLE/libraries/TARGET_CORDIO_LL/stack/controller/sources/ble/lhci/lhci_evt_cis_slave.c<gh_stars>1000+
/*************************************************************************************************/
/*!
* \file
*
* \brief LL HCI event module implementation file.
*
* Copyright (c) 2016-2018 Arm Ltd. All Rights Reserved.
*
* Copyright (c) 2019 Packetcraft, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*************************************************************************************************/
#include "lhci_int.h"
#include "lctr_api_cis_slave.h"
#include "hci_defs.h"
#include "ll_api.h"
#include "wsf_msg.h"
#include "wsf_os.h"
#include "util/bstream.h"
/**************************************************************************************************
Local Functions
**************************************************************************************************/
/*************************************************************************************************/
/*!
* \brief Pack a CIS request event packet.
*
* \param pBuf Packed packet buffer.
* \param pEvt Generate CIS request data.
*
* \return Packet length.
*/
/*************************************************************************************************/
static uint8_t lhciPackCisReqEvt(uint8_t *pBuf, const LlCisReqInd_t *pEvt)
{
const uint8_t len = HCI_LEN_LE_CIS_REQ;
UINT8_TO_BSTREAM (pBuf, HCI_LE_CIS_REQ_EVT);
UINT16_TO_BSTREAM (pBuf, pEvt->aclHandle);
UINT16_TO_BSTREAM(pBuf, pEvt->cisHandle);
UINT8_TO_BSTREAM (pBuf, pEvt->cigId);
UINT8_TO_BSTREAM (pBuf, pEvt->cisId);
return len;
}
/**************************************************************************************************
External Functions
**************************************************************************************************/
/*************************************************************************************************/
/*!
* \brief LL CIS slave event handler.
*
* \param pEvt Buffer containing LL event.
*
* \return TRUE if event handled, FALSE otherwise.
*/
/*************************************************************************************************/
bool_t lhciSlvCisEncodeEvtPkt(LlEvt_t *pEvt)
{
uint8_t *pEvtBuf = NULL;
switch (pEvt->hdr.event)
{
case LL_CIS_REQ_IND:
if ((lhciCb.leEvtMsk & ((uint64_t)(HCI_EVT_MASK_LE_CIS_REQ_EVT) << LHCI_BYTE_TO_BITS(3))) &&
(lhciCb.evtMsk & ((uint64_t)(HCI_EVT_MASK_LE_META) << LHCI_BYTE_TO_BITS(7))))
{
if ((pEvtBuf = lhciAllocEvt(HCI_LE_META_EVT, HCI_LEN_LE_CIS_REQ)) != NULL)
{
lhciPackCisReqEvt(pEvtBuf, &pEvt->cisReqInd);
}
}
break;
default:
break;
}
if (pEvtBuf)
{
lhciSendEvt(pEvtBuf);
return TRUE;
}
return FALSE;
}
| 1,160 |
1,073 | <filename>tools/clang/test/Analysis/Inputs/system-header-simulator-for-objc-dealloc.h
#pragma clang system_header
#define nil ((id)0)
typedef signed char BOOL;
@protocol NSObject
- (BOOL)isEqual:(id)object;
- (Class)class;
@end
@interface NSObject <NSObject> {}
+ (instancetype)alloc;
- (void)dealloc;
- (id)init;
- (id)retain;
- (oneway void)release;
@end
@interface NSRunLoop : NSObject
+ (NSRunLoop *)currentRunLoop;
- (void)cancelPerformSelectorsWithTarget:(id)target;
@end
@interface NSNotificationCenter : NSObject
+ (NSNotificationCenter *)defaultCenter;
- (void)removeObserver:(id)observer;
@end
typedef struct objc_selector *SEL;
void _Block_release(const void *aBlock);
#define Block_release(...) _Block_release((const void *)(__VA_ARGS__))
@interface CIFilter : NSObject
@end
| 296 |
1,127 | // Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "kernel_base_opencl.h"
namespace kernel_selector {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// tile_params
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct tile_params : public base_params {
tile_params() : base_params(KernelType::TILE) {}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// tile_optional_params
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct tile_optional_params : optional_params {
tile_optional_params() : optional_params(KernelType::TILE) {}
};
class TileKernelRef : public KernelBaseOpenCL {
public:
TileKernelRef() : KernelBaseOpenCL("tile_ref") {}
virtual ~TileKernelRef() {}
virtual JitConstants GetJitConstants(const tile_params& params) const;
virtual CommonDispatchData SetDefault(const tile_params& params, const optional_params&) const;
KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
};
} // namespace kernel_selector
| 352 |
1,192 | <reponame>clayne/DirectXShaderCompiler<gh_stars>1000+
// RUN: %clang_cc1 -fsyntax-only -verify -pedantic %s
#pragma clang diagnostic pop // expected-warning{{pragma diagnostic pop could not pop, no matching push}}
#pragma clang diagnostic puhs // expected-warning {{pragma diagnostic expected 'error', 'warning', 'ignored', 'fatal', 'push', or 'pop'}}
int a = 'df'; // expected-warning{{multi-character character constant}}
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmultichar"
int b = 'df'; // no warning.
#pragma clang diagnostic pop
int c = 'df'; // expected-warning{{multi-character character constant}}
#pragma clang diagnostic pop // expected-warning{{pragma diagnostic pop could not pop, no matching push}}
| 231 |
2,151 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_VR_SEQUENCE_H_
#define CHROME_BROWSER_VR_SEQUENCE_H_
#include <list>
#include "base/bind.h"
#include "base/callback.h"
namespace vr {
// This is much like an animation. It is a series of callbacks associated with
// time deltas, relative to when the sequence was queued.
class Sequence {
public:
Sequence();
~Sequence();
void Tick(base::TimeTicks now);
void Add(base::OnceCallback<void()> task, base::TimeDelta delta);
bool empty() const { return tasks_.empty(); }
private:
struct SequencedTask {
SequencedTask(base::OnceCallback<void()> task, base::TimeDelta delta);
SequencedTask(SequencedTask&& other);
~SequencedTask();
base::OnceCallback<void()> task;
base::TimeDelta delta;
};
std::list<SequencedTask> tasks_;
base::TimeTicks start_time_;
bool started_ = false;
DISALLOW_COPY_AND_ASSIGN(Sequence);
};
} // namespace vr
#endif // CHROME_BROWSER_VR_SEQUENCE_H_
| 378 |
2,151 | <gh_stars>1000+
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package android.net;
import android.annotation.SystemApi;
import android.os.Parcel;
import android.os.Parcelable;
import java.util.Objects;
/**
* A network identifier along with a score for the quality of that network.
*
* @hide
*/
@SystemApi
public class ScoredNetwork implements Parcelable {
/** A {@link NetworkKey} uniquely identifying this network. */
public final NetworkKey networkKey;
/**
* The {@link RssiCurve} representing the scores for this network based on the RSSI.
*
* <p>This field is optional and may be set to null to indicate that no score is available for
* this network at this time. Such networks, along with networks for which the scorer has not
* responded, are always prioritized below scored networks, regardless of the score.
*/
public final RssiCurve rssiCurve;
/**
* A boolean value that indicates whether or not the network is believed to be metered.
*
* <p>A network can be classified as metered if the user would be
* sensitive to heavy data usage on that connection due to monetary costs,
* data limitations or battery/performance issues. A typical example would
* be a wifi connection where the user would be charged for usage.
*/
public final boolean meteredHint;
/**
* Construct a new {@link ScoredNetwork}.
*
* @param networkKey the {@link NetworkKey} uniquely identifying this network.
* @param rssiCurve the {@link RssiCurve} representing the scores for this network based on the
* RSSI. This field is optional, and may be skipped to represent a network which the scorer
* has opted not to score at this time. Passing a null value here is strongly preferred to
* not returning any {@link ScoredNetwork} for a given {@link NetworkKey} because it
* indicates to the system not to request scores for this network in the future, although
* the scorer may choose to issue an out-of-band update at any time.
*/
public ScoredNetwork(NetworkKey networkKey, RssiCurve rssiCurve) {
this(networkKey, rssiCurve, false /* meteredHint */);
}
/**
* Construct a new {@link ScoredNetwork}.
*
* @param networkKey the {@link NetworkKey} uniquely identifying this network.
* @param rssiCurve the {@link RssiCurve} representing the scores for this network based on the
* RSSI. This field is optional, and may be skipped to represent a network which the scorer
* has opted not to score at this time. Passing a null value here is strongly preferred to
* not returning any {@link ScoredNetwork} for a given {@link NetworkKey} because it
* indicates to the system not to request scores for this network in the future, although
* the scorer may choose to issue an out-of-band update at any time.
* @param meteredHint A boolean value indicating whether or not the network is believed to be
* metered.
*/
public ScoredNetwork(NetworkKey networkKey, RssiCurve rssiCurve, boolean meteredHint) {
this.networkKey = networkKey;
this.rssiCurve = rssiCurve;
this.meteredHint = meteredHint;
}
private ScoredNetwork(Parcel in) {
networkKey = NetworkKey.CREATOR.createFromParcel(in);
if (in.readByte() == 1) {
rssiCurve = RssiCurve.CREATOR.createFromParcel(in);
} else {
rssiCurve = null;
}
meteredHint = in.readByte() != 0;
}
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel out, int flags) {
networkKey.writeToParcel(out, flags);
if (rssiCurve != null) {
out.writeByte((byte) 1);
rssiCurve.writeToParcel(out, flags);
} else {
out.writeByte((byte) 0);
}
out.writeByte((byte) (meteredHint ? 1 : 0));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ScoredNetwork that = (ScoredNetwork) o;
return Objects.equals(networkKey, that.networkKey)
&& Objects.equals(rssiCurve, that.rssiCurve)
&& Objects.equals(meteredHint, that.meteredHint);
}
@Override
public int hashCode() {
return Objects.hash(networkKey, rssiCurve, meteredHint);
}
@Override
public String toString() {
return "ScoredNetwork[key=" + networkKey + ",score=" + rssiCurve
+ ",meteredHint=" + meteredHint + "]";
}
public static final Parcelable.Creator<ScoredNetwork> CREATOR =
new Parcelable.Creator<ScoredNetwork>() {
@Override
public ScoredNetwork createFromParcel(Parcel in) {
return new ScoredNetwork(in);
}
@Override
public ScoredNetwork[] newArray(int size) {
return new ScoredNetwork[size];
}
};
}
| 2,107 |
563 | <gh_stars>100-1000
from .ner.module import NER
__all__ = ['NER']
| 26 |
2,322 | /*
* Copyright 2016 <NAME> for CodeWeavers
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#ifndef __WINE_FONTSUB_H
#define __WINE_FONTSUB_H
#ifdef __cplusplus
extern "C" {
#endif
typedef void *(__cdecl *CFP_ALLOCPROC)(size_t);
typedef void *(__cdecl *CFP_REALLOCPROC)(void *, size_t);
typedef void (__cdecl *CFP_FREEPROC)(void *);
#define TTFCFP_SUBSET 0
#define TTFCFP_SUBSET1 1
#define TTFCFP_DELTA 2
#define TTFCFP_UNICODE_PLATFORMID 0
#define TTFCFP_APPLE_PLATFORMID 1
#define TTFCFP_ISO_PLATFORMID 2
#define TTFCFP_MS_PLATFORMID 3
#define TTFCFP_STD_MAC_CHAR_SET 0
#define TTFCFP_SYMBOL_CHAR_SET 0
#define TTFCFP_UNICODE_CHAR_SET 1
#define TTFCFP_DONT_CARE 0xffff
#define TTFCFP_LANG_KEEP_ALL 0
#define TTFCFP_FLAGS_SUBSET 0x0001
#define TTFCFP_FLAGS_COMPRESS 0x0002
#define TTFCFP_FLAGS_TTC 0x0004
#define TTFCFP_FLAGS_GLYPHLIST 0x0008
#define ERR_GENERIC 1000
#define ERR_MEM 1005
ULONG __cdecl CreateFontPackage(const unsigned char *src, const ULONG src_len, unsigned char **dest,
ULONG *dest_len, ULONG *written, const unsigned short flags, const unsigned short face_index,
const unsigned short format, const unsigned short lang, const unsigned short platform,
const unsigned short encoding, const unsigned short *keep_list, const unsigned short keep_len,
CFP_ALLOCPROC allocproc, CFP_REALLOCPROC reallocproc, CFP_FREEPROC freeproc, void *reserved);
#ifdef __cplusplus
}
#endif
#endif
| 798 |
483 | #include "stdinc.h"
#include "byte_pattern.h"
namespace MapJustify {
/*-----------------------------------------------*/
uintptr_t map1_v125_end;
uintptr_t map1_v125_end2;
__declspec(naked) void map1_v125_start()
{
__asm {
cmp al, ESCAPE_SEQ_1;
jz r_2;
cmp al, ESCAPE_SEQ_2;
jz r_2;
cmp al, ESCAPE_SEQ_3;
jz r_2;
cmp al, ESCAPE_SEQ_4;
jz r_2;
lea ecx, [ebp - 0x10];
lea edx, [ecx + 1];
push map1_v125_end;
ret;
r_2:
mov ecx, 3;
cmp byte ptr[ebp - 0xAC], 0x10;
lea eax, [ebp - 0xC0];
cmovnb eax, [ebp - 0xC0];
mov ax, [eax + esi + 1];
mov word ptr[ebp - 0x10 + 1], ax;
add esi, 2;
push map1_v125_end2;
ret;
}
}
/*-----------------------------------------------*/
uintptr_t map2_v125_end;
uintptr_t map2_v125_end2;
__declspec(naked) void map2_v125_start()
{
__asm {
cmp esi, edi;
jz s_4_jmp;
push 0xFFFFFFFF;
push 0;
lea eax, [ebp - 0x128];
push map2_v125_end;
ret;
s_4_jmp:
push map2_v125_end2;
ret;
}
}
/*-----------------------------------------------*/
errno_t map1_2_hook(EU4Version version) {
std::string desc = "map justify 1";
switch (version) {
case v1_25_X:
case v1_26_X:
case v1_27_X:
case v1_28_X:
case v1_28_3:
byte_pattern::temp_instance().find_pattern("8D 4D F0 8D 51 01 8A 01 41");
if (byte_pattern::temp_instance().has_size(1, desc) || byte_pattern::temp_instance().has_size(2, desc)) {
// lea ecx,[ebp-0x10]
injector::MakeJMP(byte_pattern::temp_instance().get_first().address(), map1_v125_start);
// mov al,[ecx]
map1_v125_end = byte_pattern::temp_instance().get_first().address(0x6);
// push ecx
map1_v125_end2 = byte_pattern::temp_instance().get_first().address(0xF);
// push 0FFFFFFFFh
injector::MakeJMP(byte_pattern::temp_instance().get_first().address(0x1F), map2_v125_start);
// push eax
map2_v125_end = byte_pattern::temp_instance().get_first().address(0x29);
}
else return EU4_ERROR1;
return NOERROR;
}
return EU4_ERROR1;
}
errno_t map2_end2_hook(EU4Version version) {
std::string desc = "map justify 2";
switch (version) {
case v1_25_X:
case v1_26_X:
case v1_27_X:
case v1_28_X:
case v1_28_3:
byte_pattern::temp_instance().find_pattern("8B 45 AC 8D 55 BC 6A 01");
if (byte_pattern::temp_instance().has_size(1, desc)) {
// mov eax,[ebp-0x54]
map2_v125_end2 = byte_pattern::temp_instance().get_first().address();
}
else return EU4_ERROR1;
return NOERROR;
}
return EU4_ERROR1;
}
/*-----------------------------------------------*/
uintptr_t map3_v125_end;
uintptr_t map3_v125_end2;
__declspec(naked) void map3_v125_start()
{
__asm {
cmp byte ptr[ecx + eax], ESCAPE_SEQ_1;
jz t_10;
cmp byte ptr[ecx + eax], ESCAPE_SEQ_2;
jz t_11;
cmp byte ptr[ecx + eax], ESCAPE_SEQ_3;
jz t_12;
cmp byte ptr[ecx + eax], ESCAPE_SEQ_4;
jz t_13;
movzx eax, byte ptr[ecx + eax];
jmp t_3;
t_10:
movzx eax, word ptr[ecx + eax + 1];
jmp t_1x;
t_11:
movzx eax, word ptr[ecx + eax + 1];
sub eax, SHIFT_2;
jmp t_1x;
t_12:
movzx eax, word ptr[ecx + eax + 1];
add eax, SHIFT_3;
jmp t_1x;
t_13:
movzx eax, word ptr[ecx + eax + 1];
add eax, SHIFT_4;
t_1x:
add ecx, 2;
cmp eax, NO_FONT;
ja t_3;
mov eax, NOT_DEF;
t_3:
movzx eax, ax;
mov eax, [edx + eax * 4];
test eax, eax;
jz loc_15D8053_jmp;
push map3_v125_end;
ret;
loc_15D8053_jmp:
push map3_v125_end2;
ret;
}
}
/*-----------------------------------------------*/
errno_t map3_hook(EU4Version version) {
std::string desc = "map justify 3";
switch (version) {
case v1_25_X:
case v1_26_X:
case v1_27_X:
case v1_28_X:
case v1_28_3:
byte_pattern::temp_instance().find_pattern("0F B6 04 08 8B 04 82 85 C0 74");
if (byte_pattern::temp_instance().has_size(1, desc)) {
// movzx eax, byte ptr [eax+ecx]
injector::MakeJMP(byte_pattern::temp_instance().get_first().address(), map3_v125_start);
// cmp word ptr [eax+6], 0
map3_v125_end = byte_pattern::temp_instance().get_first().address(0xB);
// inc ecx
map3_v125_end2 = byte_pattern::temp_instance().get_first().address(0x13);
}
else return EU4_ERROR1;
return NOERROR;
}
return EU4_ERROR1;
}
/*-----------------------------------------------*/
errno_t init(EU4Version version) {
errno_t result = 0;
byte_pattern::temp_instance().debug_output2("map font justify");
result |= map1_2_hook(version);
result |= map2_end2_hook(version);
result |= map3_hook(version);
return result;
}
} | 2,266 |
1,210 | <reponame>randolphwong/mcsema
// Boost random_generator.hpp header file ----------------------------------------------//
// Copyright 2010 <NAME>.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_UUID_RANDOM_GENERATOR_HPP
#define BOOST_UUID_RANDOM_GENERATOR_HPP
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/seed_rng.hpp>
#include <boost/random/uniform_int.hpp>
#include <boost/random/variate_generator.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/assert.hpp>
#include <boost/shared_ptr.hpp>
#include <limits>
namespace boost {
namespace uuids {
// generate a random-based uuid
template <typename UniformRandomNumberGenerator>
class basic_random_generator {
private:
typedef uniform_int<unsigned long> distribution_type;
typedef variate_generator<UniformRandomNumberGenerator*, distribution_type> generator_type;
struct null_deleter
{
void operator()(void const *) const {}
};
public:
typedef uuid result_type;
// default constructor creates the random number generator
basic_random_generator()
: pURNG(new UniformRandomNumberGenerator)
, generator
( pURNG.get()
, distribution_type
( (std::numeric_limits<unsigned long>::min)()
, (std::numeric_limits<unsigned long>::max)()
)
)
{
// seed the random number generator
detail::seed(*pURNG);
}
// keep a reference to a random number generator
// don't seed a given random number generator
explicit basic_random_generator(UniformRandomNumberGenerator& gen)
: pURNG(&gen, null_deleter())
, generator
( pURNG.get()
, distribution_type
( (std::numeric_limits<unsigned long>::min)()
, (std::numeric_limits<unsigned long>::max)()
)
)
{}
// keep a pointer to a random number generator
// don't seed a given random number generator
explicit basic_random_generator(UniformRandomNumberGenerator* pGen)
: pURNG(pGen, null_deleter())
, generator
( pURNG.get()
, distribution_type
( (std::numeric_limits<unsigned long>::min)()
, (std::numeric_limits<unsigned long>::max)()
)
)
{
BOOST_ASSERT(pURNG);
}
uuid operator()()
{
uuid u;
int i=0;
unsigned long random_value = generator();
for (uuid::iterator it=u.begin(); it!=u.end(); ++it, ++i) {
if (i==sizeof(unsigned long)) {
random_value = generator();
i = 0;
}
// static_cast gets rid of warnings of converting unsigned long to boost::uint8_t
*it = static_cast<uuid::value_type>((random_value >> (i*8)) & 0xFF);
}
// set variant
// must be 0b10xxxxxx
*(u.begin()+8) &= 0xBF;
*(u.begin()+8) |= 0x80;
// set version
// must be 0b0100xxxx
*(u.begin()+6) &= 0x4F; //0b01001111
*(u.begin()+6) |= 0x40; //0b01000000
return u;
}
private:
shared_ptr<UniformRandomNumberGenerator> pURNG;
generator_type generator;
};
typedef basic_random_generator<mt19937> random_generator;
}} // namespace boost::uuids
#endif //BOOST_UUID_RANDOM_GENERATOR_HPP
| 1,535 |
524 | """openid.py: an openid library for web.py
Notes:
- This will create a file called .openid_secret_key in the
current directory with your secret key in it. If someone
has access to this file they can log in as any user. And
if the app can't find this file for any reason (e.g. you
moved the app somewhere else) then each currently logged
in user will get logged out.
- State must be maintained through the entire auth process
-- this means that if you have multiple web.py processes
serving one set of URLs or if you restart your app often
then log ins will fail. You have to replace sessions and
store for things to work.
- We set cookies starting with "openid_".
"""
import os
import random
import hmac
import __init__ as web
import openid.consumer.consumer
import openid.store.memstore
sessions = {}
store = openid.store.memstore.MemoryStore()
def _secret():
try:
secret = file('.openid_secret_key').read()
except IOError:
# file doesn't exist
secret = os.urandom(20)
file('.openid_secret_key', 'w').write(secret)
return secret
def _hmac(identity_url):
return hmac.new(_secret(), identity_url).hexdigest()
def _random_session():
n = random.random()
while n in sessions:
n = random.random()
n = str(n)
return n
def status():
oid_hash = web.cookies().get('openid_identity_hash', '').split(',', 1)
if len(oid_hash) > 1:
oid_hash, identity_url = oid_hash
if oid_hash == _hmac(identity_url):
return identity_url
return None
def form(openid_loc):
oid = status()
if oid:
return '''
<form method="post" action="%s">
<img src="http://openid.net/login-bg.gif" alt="OpenID" />
<strong>%s</strong>
<input type="hidden" name="action" value="logout" />
<input type="hidden" name="return_to" value="%s" />
<button type="submit">log out</button>
</form>''' % (openid_loc, oid, web.ctx.fullpath)
else:
return '''
<form method="post" action="%s">
<input type="text" name="openid" value=""
style="background: url(http://openid.net/login-bg.gif) no-repeat; padding-left: 18px; background-position: 0 50%%;" />
<input type="hidden" name="return_to" value="%s" />
<button type="submit">log in</button>
</form>''' % (openid_loc, web.ctx.fullpath)
def logout():
web.setcookie('openid_identity_hash', '', expires=-1)
class host:
def POST(self):
# unlike the usual scheme of things, the POST is actually called
# first here
i = web.input(return_to='/')
if i.get('action') == 'logout':
logout()
return web.redirect(i.return_to)
i = web.input('openid', return_to='/')
n = _random_session()
sessions[n] = {'webpy_return_to': i.return_to}
c = openid.consumer.consumer.Consumer(sessions[n], store)
a = c.begin(i.openid)
f = a.redirectURL(web.ctx.home, web.ctx.home + web.ctx.fullpath)
web.setcookie('openid_session_id', n)
return web.redirect(f)
def GET(self):
n = web.cookies('openid_session_id').openid_session_id
web.setcookie('openid_session_id', '', expires=-1)
return_to = sessions[n]['webpy_return_to']
c = openid.consumer.consumer.Consumer(sessions[n], store)
a = c.complete(web.input(), web.ctx.home + web.ctx.fullpath)
if a.status.lower() == 'success':
web.setcookie('openid_identity_hash', _hmac(a.identity_url) + ',' + a.identity_url)
del sessions[n]
return web.redirect(return_to)
| 1,551 |
510 | <reponame>xhuohai/nncase<gh_stars>100-1000
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "evaluate_stack.h"
using namespace nncase;
using namespace nncase::runtime;
using namespace nncase::runtime::stackvm;
evaluate_stack::evaluate_stack() noexcept
: top_(0)
{
entries_.resize(64);
}
bool evaluate_stack::empty() const noexcept
{
return top_ == 0;
}
bool evaluate_stack::full() const noexcept
{
return top_ == entries_.size();
}
result<stack_entry> evaluate_stack::peek() noexcept
{
if (!empty())
return ok(entries_[top_ - 1]);
return err(nncase_errc::stackvm_stack_underflow);
}
result<stack_entry> evaluate_stack::pop() noexcept
{
if (!empty())
return ok(entries_[--top_]);
return err(nncase_errc::stackvm_stack_underflow);
}
result<void> evaluate_stack::push(stack_entry entry) noexcept
{
if (full())
entries_.resize(entries_.size() + 1);
entries_[top_++] = entry;
return ok();
}
| 515 |
4,212 | <reponame>beiMingSoft/coobjc
//
// KMGillSansLabel.h
//
//
// Created by <NAME> on 24/06/2013.
// Copyright (c) 2013 iKode Ltd. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface KMGillSansLabel : UILabel
@end
@interface KMGillSansBoldLabel : KMGillSansLabel
- (void)setFontSize:(CGFloat)size;
@end
@interface KMGillSansMediumLabel : KMGillSansLabel
- (void)setFontSize:(CGFloat)size;
@end
@interface KMGillSansRegularLabel : KMGillSansLabel
- (void)setFontSize:(CGFloat)size;
@end
@interface KMGillSansLightLabel : KMGillSansLabel
- (void)setFontSize:(CGFloat)size;
@end
| 250 |
775 | <gh_stars>100-1000
/*
* tkCursor.c --
*
* This file maintains a database of read-only cursors for the Tk
* toolkit. This allows cursors to be shared between widgets and
* also avoids round-trips to the X server.
*
* Copyright 1990 Regents of the University of California
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies. The University of California
* makes no representations about the suitability of this
* software for any purpose. It is provided "as is" without
* express or implied warranty.
*/
#ifndef lint
static char rcsid[] = "$Header: /user6/ouster/wish/RCS/tkCursor.c,v 1.12 91/10/31 11:40:41 ouster Exp $ SPRITE (Berkeley)";
#endif /* not lint */
#include "tkconfig.h"
#include "tkint.h"
/*
* One of the following data structures exists for each cursor that is
* currently active. Each structure is indexed with two hash tables
* defined below. One of the tables is idTable, and the other is either
* nameTable or dataTable, also defined below.
* .
*/
typedef struct {
Cursor cursor; /* X identifier for cursor. */
Display *display; /* Display for which cursor is valid. */
int refCount; /* Number of active uses of cursor. */
Tcl_HashTable *otherTable; /* Second table (other than idTable) used
* to index this entry. */
Tcl_HashEntry *hashPtr; /* Entry in otherTable for this structure
* (needed when deleting). */
} TkCursor;
/*
* Hash table to map from a textual description of a cursor to the
* TkCursor record for the cursor, and key structure used in that
* hash table:
*/
static Tcl_HashTable nameTable;
typedef struct {
Tk_Uid name; /* Textual name for desired cursor. */
Display *display; /* Display for which cursor will be used. */
} NameKey;
/*
* Hash table to map from a collection of in-core data about a
* cursor (bitmap contents, etc.) to a TkCursor structure:
*/
static Tcl_HashTable dataTable;
typedef struct {
char *source; /* Cursor bits. */
char *mask; /* Mask bits. */
unsigned int width, height; /* Dimensions of cursor (and data
* and mask). */
int xHot, yHot; /* Location of cursor hot-spot. */
Tk_Uid fg, bg; /* Colors for cursor. */
Display *display; /* Display on which cursor will be used. */
} DataKey;
/*
* Hash table that maps from Cursor identifiers to the TkCursor structure
* for the cursor. This table is indexed by Cursor ids, and is used by
* Tk_FreeCursor.
*/
static Tcl_HashTable idTable;
static int initialized = 0; /* 0 means static structures haven't been
* initialized yet. */
/*
* The table below is used to map from the name of a cursor to its
* index in the official cursor font:
*/
static struct CursorName {
char *name;
unsigned int shape;
} cursorNames[] = {
{"X_cursor", XC_X_cursor},
{"arrow", XC_arrow},
{"based_arrow_down", XC_based_arrow_down},
{"based_arrow_up", XC_based_arrow_up},
{"boat", XC_boat},
{"bogosity", XC_bogosity},
{"bottom_left_corner", XC_bottom_left_corner},
{"bottom_right_corner", XC_bottom_right_corner},
{"bottom_side", XC_bottom_side},
{"bottom_tee", XC_bottom_tee},
{"box_spiral", XC_box_spiral},
{"center_ptr", XC_center_ptr},
{"circle", XC_circle},
{"clock", XC_clock},
{"coffee_mug", XC_coffee_mug},
{"cross", XC_cross},
{"cross_reverse", XC_cross_reverse},
{"crosshair", XC_crosshair},
{"diamond_cross", XC_diamond_cross},
{"dot", XC_dot},
{"dotbox", XC_dotbox},
{"double_arrow", XC_double_arrow},
{"draft_large", XC_draft_large},
{"draft_small", XC_draft_small},
{"draped_box", XC_draped_box},
{"exchange", XC_exchange},
{"fleur", XC_fleur},
{"gobbler", XC_gobbler},
{"gumby", XC_gumby},
{"hand1", XC_hand1},
{"hand2", XC_hand2},
{"heart", XC_heart},
{"icon", XC_icon},
{"iron_cross", XC_iron_cross},
{"left_ptr", XC_left_ptr},
{"left_side", XC_left_side},
{"left_tee", XC_left_tee},
{"leftbutton", XC_leftbutton},
{"ll_angle", XC_ll_angle},
{"lr_angle", XC_lr_angle},
{"man", XC_man},
{"middlebutton", XC_middlebutton},
{"mouse", XC_mouse},
{"pencil", XC_pencil},
{"pirate", XC_pirate},
{"plus", XC_plus},
{"question_arrow", XC_question_arrow},
{"right_ptr", XC_right_ptr},
{"right_side", XC_right_side},
{"right_tee", XC_right_tee},
{"rightbutton", XC_rightbutton},
{"rtl_logo", XC_rtl_logo},
{"sailboat", XC_sailboat},
{"sb_down_arrow", XC_sb_down_arrow},
{"sb_h_double_arrow", XC_sb_h_double_arrow},
{"sb_left_arrow", XC_sb_left_arrow},
{"sb_right_arrow", XC_sb_right_arrow},
{"sb_up_arrow", XC_sb_up_arrow},
{"sb_v_double_arrow", XC_sb_v_double_arrow},
{"shuttle", XC_shuttle},
{"sizing", XC_sizing},
{"spider", XC_spider},
{"spraycan", XC_spraycan},
{"star", XC_star},
{"target", XC_target},
{"tcross", XC_tcross},
{"top_left_arrow", XC_top_left_arrow},
{"top_left_corner", XC_top_left_corner},
{"top_right_corner", XC_top_right_corner},
{"top_side", XC_top_side},
{"top_tee", XC_top_tee},
{"trek", XC_trek},
{"ul_angle", XC_ul_angle},
{"umbrella", XC_umbrella},
{"ur_angle", XC_ur_angle},
{"watch", XC_watch},
{"xterm", XC_xterm},
{NULL, 0}
};
/*
* Font to use for cursors:
*/
#ifndef CURSORFONT
#define CURSORFONT "cursor"
#endif
/*
* Forward declarations for procedures defined in this file:
*/
static void CursorInit _ANSI_ARGS_((void));
/*
*----------------------------------------------------------------------
*
* Tk_GetCursor --
*
* Given a string describing a cursor, locate (or create if necessary)
* a cursor that fits the description.
*
* Results:
* The return value is the X identifer for the desired cursor,
* unless string couldn't be parsed correctly. In this case,
* None is returned and an error message is left in interp->result.
* The caller should never modify the cursor that is returned, and
* should eventually call Tk_FreeCursor when the cursor is no longer
* needed.
*
* Side effects:
* The cursor is added to an internal database with a reference count.
* For each call to this procedure, there should eventually be a call
* to Tk_FreeCursor, so that the database can be cleaned up when cursors
* aren't needed anymore.
*
*----------------------------------------------------------------------
*/
Cursor
Tk_GetCursor(interp, tkwin, string)
Tcl_Interp *interp; /* Interpreter to use for error reporting. */
Tk_Window tkwin; /* Window in which cursor will be used. */
Tk_Uid string; /* Description of cursor. See manual entry
* for details on legal syntax. */
{
NameKey key;
Tcl_HashEntry *nameHashPtr, *idHashPtr;
register TkCursor *cursorPtr;
int new;
Cursor cursor;
int argc;
char **argv = NULL;
Pixmap source = None;
Pixmap mask = None;
if (!initialized) {
CursorInit();
}
key.name = string;
key.display = Tk_Display(tkwin);
nameHashPtr = Tcl_CreateHashEntry(&nameTable, (char *) &key, &new);
if (!new) {
cursorPtr = (TkCursor *) Tcl_GetHashValue(nameHashPtr);
cursorPtr->refCount++;
return cursorPtr->cursor;
}
/*
* No suitable cursor exists. Parse the cursor name into fields
* and create a cursor, either from the standard cursor font or
* from bitmap files.
*/
if (Tcl_SplitList(interp, string, &argc, &argv) != TCL_OK) {
goto error;
}
if (argc == 0) {
badString:
Tcl_AppendResult(interp, "bad cursor spec \"", string, "\"",
(char *) NULL);
goto error;
}
if (argv[0][0] != '@') {
XColor fg, bg;
int maskIndex;
register struct CursorName *namePtr;
TkDisplay *dispPtr;
/*
* The cursor is to come from the standard cursor font. If one
* arg, it is cursor name (use black and white for fg and bg).
* If two args, they are name and fg color (ignore mask). If
* three args, they are name, fg, bg. Some of the code below
* is stolen from the XCreateFontCursor Xlib procedure.
*/
if (argc > 3) {
goto badString;
}
for (namePtr = cursorNames; ; namePtr++) {
if (namePtr->name == NULL) {
goto badString;
}
if ((namePtr->name[0] == argv[0][0])
&& (strcmp(namePtr->name, argv[0]) == 0)) {
break;
}
}
maskIndex = namePtr->shape + 1;
if (argc == 1) {
fg.red = fg.green = fg.blue = 0;
bg.red = bg.green = bg.blue = 65535;
} else {
if (XParseColor(key.display,
Tk_DefaultColormap(Tk_Screen(tkwin)),
argv[1], &fg) == 0) {
Tcl_AppendResult(interp, "invalid color name \"", argv[1],
"\"", (char *) NULL);
goto error;
}
if (argc == 2) {
bg.red = bg.green = bg.blue = 0;
maskIndex = namePtr->shape;
} else {
if (XParseColor(key.display,
Tk_DefaultColormap(Tk_Screen(tkwin)),
argv[2], &bg) == 0) {
Tcl_AppendResult(interp, "invalid color name \"", argv[2],
"\"", (char *) NULL);
goto error;
}
}
}
dispPtr = ((TkWindow *) tkwin)->dispPtr;
if (dispPtr->cursorFont == None) {
dispPtr->cursorFont = XLoadFont(key.display, CURSORFONT);
if (dispPtr->cursorFont == None) {
interp->result = "couldn't load cursor font";
goto error;
}
}
cursor = XCreateGlyphCursor(key.display, dispPtr->cursorFont,
dispPtr->cursorFont, namePtr->shape, maskIndex,
&fg, &bg);
} else {
unsigned int width, height, maskWidth, maskHeight;
int xHot, yHot, dummy1, dummy2;
XColor fg, bg;
/*
* The cursor is to be created by reading bitmap files. There
* should be either two elements in the list (source, color) or
* four (source mask fg bg).
*/
if ((argc != 2) && (argc != 4)) {
goto badString;
}
if (XReadBitmapFile(key.display, RootWindowOfScreen(Tk_Screen(tkwin)),
&argv[0][1], &width, &height, &source, &xHot, &yHot)
!= BitmapSuccess) {
Tcl_AppendResult(interp, "error reading bitmap file \"",
&argv[0][1], "\"", (char *) NULL);
goto error;
}
if ((xHot < 0) || (yHot < 0) || (xHot >= width) || (yHot >= height)) {
Tcl_AppendResult(interp, "bad hot spot in bitmap file \"",
&argv[0][1], "\"", (char *) NULL);
goto error;
}
if (argc == 2) {
if (XParseColor(key.display,
Tk_DefaultColormap(Tk_Screen(tkwin)),
argv[1], &fg) == 0) {
Tcl_AppendResult(interp, "invalid color name \"",
argv[1], "\"", (char *) NULL);
goto error;
}
cursor = XCreatePixmapCursor(key.display, source, source,
&fg, &fg, xHot, yHot);
} else {
if (XReadBitmapFile(key.display,
RootWindowOfScreen(Tk_Screen(tkwin)), argv[1],
&maskWidth, &maskHeight, &mask, &dummy1,
&dummy2) != BitmapSuccess) {
Tcl_AppendResult(interp, "error reading bitmap file \"",
argv[1], "\"", (char *) NULL);
goto error;
}
if ((maskWidth != width) && (maskHeight != height)) {
interp->result =
"source and mask bitmaps have different sizes";
goto error;
}
if (XParseColor(key.display,
Tk_DefaultColormap(Tk_Screen(tkwin)),
argv[2], &fg) == 0) {
Tcl_AppendResult(interp, "invalid color name \"", argv[2],
"\"", (char *) NULL);
goto error;
}
if (XParseColor(key.display,
Tk_DefaultColormap(Tk_Screen(tkwin)),
argv[3], &bg) == 0) {
Tcl_AppendResult(interp, "invalid color name \"", argv[3],
"\"", (char *) NULL);
goto error;
}
cursor = XCreatePixmapCursor(key.display, source, mask,
&fg, &bg, xHot, yHot);
}
}
ckfree((char *) argv);
/*
* Add information about this cursor to our database.
*/
cursorPtr = (TkCursor *) ckalloc(sizeof(TkCursor));
cursorPtr->cursor = cursor;
cursorPtr->display = key.display;
cursorPtr->refCount = 1;
cursorPtr->otherTable = &nameTable;
cursorPtr->hashPtr = nameHashPtr;
idHashPtr = Tcl_CreateHashEntry(&idTable, (char *) cursorPtr->cursor,
&new);
if (!new) {
/* deh patched to support multiple displays */
/* panic("cursor already registered in Tk_GetCursor"); */
cursorPtr->refCount = 1000;
}
Tcl_SetHashValue(nameHashPtr, cursorPtr);
Tcl_SetHashValue(idHashPtr, cursorPtr);
return cursorPtr->cursor;
error:
Tcl_DeleteHashEntry(nameHashPtr);
if (argv != NULL) {
ckfree((char *) argv);
}
if (source != None) {
XFreePixmap(key.display, source);
}
if (mask != None) {
XFreePixmap(key.display, mask);
}
return None;
}
/*
*----------------------------------------------------------------------
*
* Tk_GetCursorFromData --
*
* Given a description of the bits and colors for a cursor,
* make a cursor that has the given properties.
*
* Results:
* The return value is the X identifer for the desired cursor,
* unless it couldn't be created properly. In this case, None is
* returned and an error message is left in interp->result. The
* caller should never modify the cursor that is returned, and
* should eventually call Tk_FreeCursor when the cursor is no
* longer needed.
*
* Side effects:
* The cursor is added to an internal database with a reference count.
* For each call to this procedure, there should eventually be a call
* to Tk_FreeCursor, so that the database can be cleaned up when cursors
* aren't needed anymore.
*
*----------------------------------------------------------------------
*/
Cursor
Tk_GetCursorFromData(interp, tkwin, source, mask, width, height,
xHot, yHot, fg, bg)
Tcl_Interp *interp; /* Interpreter to use for error reporting. */
Tk_Window tkwin; /* Window in which cursor will be used. */
char *source; /* Bitmap data for cursor shape. */
char *mask; /* Bitmap data for cursor mask. */
unsigned int width, height; /* Dimensions of cursor. */
int xHot, yHot; /* Location of hot-spot in cursor. */
Tk_Uid fg; /* Foreground color for cursor. */
Tk_Uid bg; /* Background color for cursor. */
{
DataKey key;
Tcl_HashEntry *dataHashPtr, *idHashPtr;
register TkCursor *cursorPtr;
int new;
XColor fgColor, bgColor;
Pixmap sourcePixmap, maskPixmap;
if (!initialized) {
CursorInit();
}
key.source = source;
key.mask = mask;
key.width = width;
key.height = height;
key.xHot = xHot;
key.yHot = yHot;
key.fg = fg;
key.bg = bg;
key.display = Tk_Display(tkwin);
dataHashPtr = Tcl_CreateHashEntry(&dataTable, (char *) &key, &new);
if (!new) {
cursorPtr = (TkCursor *) Tcl_GetHashValue(dataHashPtr);
cursorPtr->refCount++;
return cursorPtr->cursor;
}
/*
* No suitable cursor exists yet. Make one using the data
* available and add it to the database.
*/
if (XParseColor(key.display, Tk_DefaultColormap(Tk_Screen(tkwin)),
fg, &fgColor) == 0) {
Tcl_AppendResult(interp, "invalid color name \"", fg, "\"",
(char *) NULL);
goto error;
}
if (XParseColor(key.display, Tk_DefaultColormap(Tk_Screen(tkwin)),
bg, &bgColor) == 0) {
Tcl_AppendResult(interp, "invalid color name \"", bg, "\"",
(char *) NULL);
goto error;
}
cursorPtr = (TkCursor *) ckalloc(sizeof(TkCursor));
sourcePixmap = XCreateBitmapFromData(key.display,
RootWindowOfScreen(Tk_Screen(tkwin)), source, width, height);
maskPixmap = XCreateBitmapFromData(key.display,
RootWindowOfScreen(Tk_Screen(tkwin)), mask, width, height);
cursorPtr->cursor = XCreatePixmapCursor(key.display, sourcePixmap,
maskPixmap, &fgColor, &bgColor, xHot, yHot);
XFreePixmap(key.display, sourcePixmap);
XFreePixmap(key.display, maskPixmap);
cursorPtr->display = key.display;
cursorPtr->refCount = 1;
cursorPtr->otherTable = &dataTable;
cursorPtr->hashPtr = dataHashPtr;
idHashPtr = Tcl_CreateHashEntry(&idTable, (char *) cursorPtr->cursor, &new);
if (!new) {
/* deh patched to support multiple displays */
/* panic("cursor already registered in Tk_GetCursorFromData"); */
cursorPtr->refCount = 1000;
}
Tcl_SetHashValue(dataHashPtr, cursorPtr);
Tcl_SetHashValue(idHashPtr, cursorPtr);
return cursorPtr->cursor;
error:
Tcl_DeleteHashEntry(dataHashPtr);
return None;
}
/*
*--------------------------------------------------------------
*
* Tk_NameOfCursor --
*
* Given a cursor, return a textual string identifying it.
*
* Results:
* If cursor was created by Tk_GetCursor, then the return
* value is the "string" that was used to create it.
* Otherwise the return value is a string giving the X
* identifier for the cursor. The storage for the returned
* string is only guaranteed to persist up until the next
* call to this procedure.
*
* Side effects:
* None.
*
*--------------------------------------------------------------
*/
char *
Tk_NameOfCursor(cursor)
Cursor cursor; /* Cursor to be released. */
{
Tcl_HashEntry *idHashPtr;
TkCursor *cursorPtr;
static char string[20];
if (!initialized) {
printid:
sprintf(string, "cursor id 0x%x", cursor);
return string;
}
idHashPtr = Tcl_FindHashEntry(&idTable, (char *) cursor);
if (idHashPtr == NULL) {
goto printid;
}
cursorPtr = (TkCursor *) Tcl_GetHashValue(idHashPtr);
if (cursorPtr->otherTable != &nameTable) {
goto printid;
}
return ((NameKey *) cursorPtr->hashPtr->key.words)->name;
}
/*
*----------------------------------------------------------------------
*
* Tk_FreeCursor --
*
* This procedure is called to release a cursor allocated by
* Tk_GetCursor or TkGetCursorFromData.
*
* Results:
* None.
*
* Side effects:
* The reference count associated with cursor is decremented, and
* it is officially deallocated if no-one is using it anymore.
*
*----------------------------------------------------------------------
*/
void
Tk_FreeCursor(cursor)
Cursor cursor; /* Cursor to be released. */
{
Tcl_HashEntry *idHashPtr;
register TkCursor *cursorPtr;
if (!initialized) {
panic("Tk_FreeCursor called before Tk_GetCursor");
}
idHashPtr = Tcl_FindHashEntry(&idTable, (char *) cursor);
if (idHashPtr == NULL) {
panic("Tk_FreeCursor received unknown cursor argument");
}
cursorPtr = (TkCursor *) Tcl_GetHashValue(idHashPtr);
cursorPtr->refCount--;
if (cursorPtr->refCount == 0) {
XFreeCursor(cursorPtr->display, cursorPtr->cursor);
Tcl_DeleteHashEntry(cursorPtr->hashPtr);
Tcl_DeleteHashEntry(idHashPtr);
ckfree((char *) cursorPtr);
}
}
/*
*----------------------------------------------------------------------
*
* CursorInit --
*
* Initialize the structures used for cursor management.
*
* Results:
* None.
*
* Side effects:
* Read the code.
*
*----------------------------------------------------------------------
*/
static void
CursorInit()
{
initialized = 1;
Tcl_InitHashTable(&nameTable, sizeof(NameKey)/sizeof(long));
Tcl_InitHashTable(&dataTable, sizeof(DataKey)/sizeof(long));
Tcl_InitHashTable(&idTable, TCL_ONE_WORD_KEYS);
}
| 7,545 |
945 | {
"csla": {
"propertyChangedMode": "Windows",
"propertyInfoFactory": "test1,test2",
"reader": "testReader",
"serializationFormatter": "testSerializationFormatter",
"mobileFactoryLoader": "csla.netcore.test.Configuration.TestMobileFactoryLoader, csla.netcore.test, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null",
"PrincipalCacheSize": 10,
"defaultTransactionIsolationLevel": "RepeatableRead",
"mobileWriter": "csla.netcore.test.Configuration.TestCslaWriter, csla.netcore.test, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null",
"defaultTransactionTimeoutInSeconds": 60,
"CslaDbProvider": "testDbProvider"
}
} | 234 |
1,467 | <filename>.changes/2.4.12.json
{
"date": "2019-02-20",
"version": "2.4.12",
"entries": [
{
"category": "AWS Direct Connect",
"type": "feature",
"description": "Documentation updates for AWS Direct Connect"
},
{
"category": "AWS CodeCommit",
"type": "feature",
"description": "This release adds an API for adding / updating / deleting / copying / moving / setting file modes for one or more files directly to an AWS CodeCommit repository without requiring a Git client."
},
{
"category": "AWS SDK for Java v2",
"type": "bugfix",
"description": "Fixed an issue where the SDK could be over-retrying on signature errors."
},
{
"category": "AWS Elemental MediaLive",
"type": "feature",
"description": "This release adds support for VPC inputs, allowing you to push content from your Amazon VPC directly to MediaLive."
},
{
"category": "AWS SDK for Java v2",
"type": "bugfix",
"description": "Fixed an issue where the SDK could fail to adjust the local clock under skewed-clock conditions."
}
]
} | 538 |
595 | <gh_stars>100-1000
//
// PBBudgetMonth.h
// Predicitve Budget
//
// Created by <NAME> on 2015. 02. 12..
// Copyright (c) 2015. DroidZONE. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "PBBudget.h"
#import "PBMonthlyCategoryBudget.h"
@interface PBBudgetMonth : NSObject
@property (nonatomic, weak) PBBudget* budget;
@property (nonatomic,strong) NSNumber* year;
@property (nonatomic,strong) NSNumber* month;
@property (nonatomic, strong, readonly) NSNumber* budgetedAmount;//calculated
@property (nonatomic, strong, readonly) NSNumber* availableAmount;//calculated
@property (nonatomic, strong, readonly) NSNumber* spentAmount;//calculated
@property (nonatomic, strong) NSMutableArray* monthlyCategoryBudgets;
- (instancetype) initWithBudget:(PBBudget*)b year:(NSInteger)y month:(NSInteger)m NS_DESIGNATED_INITIALIZER;
@end
| 285 |
1,178 | <reponame>leozz37/makani
/*
* Copyright 2020 Makani Technologies LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// CVT abstraction to switch between motors and motor dynos.
#ifndef AVIONICS_MOTOR_FIRMWARE_IO_CVT_H_
#define AVIONICS_MOTOR_FIRMWARE_IO_CVT_H_
#include <stdbool.h>
#include <stdint.h>
#include "avionics/common/avionics_messages.h"
#include "avionics/common/controller_arbitration.h"
#include "avionics/network/aio_node.h"
typedef struct {
int16_t command; // See MotorCommandFlag.
float speed_upper_limit; // [rad/s]
float speed_lower_limit; // [rad/s]
float torque; // [N-m]
} MotorCommand;
void CvtInit(int64_t now, int32_t motor_index);
bool CvtGetGetParamMessage(AioNode source, MotorGetParamMessage *msg,
uint16_t *sequence, int64_t *timestamp);
bool CvtGetSetParamMessage(AioNode source, MotorSetParamMessage *msg,
uint16_t *sequence, int64_t *timestamp);
bool CvtGetSetStateMessage(AioNode source, MotorSetStateMessage *msg,
uint16_t *sequence, int64_t *timestamp);
bool CvtGetStackingMessage(AioNode source, MotorStackingMessage *msg,
uint16_t *sequence, int64_t *timestamp);
bool CvtGetMotorCommand(int64_t now, MotorCommand* cmd,
ControllerLabel *source);
#endif // AVIONICS_MOTOR_FIRMWARE_IO_CVT_H_
| 738 |
777 | <filename>components/favicon_base/large_icon_url_parser.cc
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/favicon_base/large_icon_url_parser.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "third_party/skia/include/utils/SkParse.h"
#include "ui/gfx/favicon_size.h"
LargeIconUrlParser::LargeIconUrlParser() : size_in_pixels_(48) {
}
LargeIconUrlParser::~LargeIconUrlParser() {
}
bool LargeIconUrlParser::Parse(base::StringPiece path) {
if (path.empty())
return false;
size_t slash = path.find("/", 0); // |path| does not start with '/'.
if (slash == base::StringPiece::npos)
return false;
base::StringPiece size_str = path.substr(0, slash);
// Disallow empty, non-numeric, or non-positive sizes.
if (size_str.empty() ||
!base::StringToInt(size_str, &size_in_pixels_) ||
size_in_pixels_ <= 0)
return false;
// Need to store the index of the URL field, so Instant Extended can translate
// large icon URLs using advanced parameters.
// Example:
// "chrome-search://large-icon/48/<renderer-id>/<most-visited-id>"
// would be translated to:
// "chrome-search://large-icon/48/<most-visited-item-with-given-id>".
path_index_ = slash + 1;
url_string_ = path.substr(path_index_).as_string();
return true;
}
| 547 |
454 | <reponame>agave233/PaddleHelix
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Graph-based models for compounds.
"""
import numpy as np
import paddle
import paddle.nn as nn
import pgl
from pahelix.networks.compound_encoder import AtomEmbedding, BondEmbedding
from pahelix.networks.gnn_block import MeanPool
from src.utils import get_positive_expectation, get_negative_expectation
class GINEncoder(nn.Layer):
"""
| GIN Encoder for unsupervised InfoGraph.
Public Functions:
- ``forward``: forward to create the GIN compound encoder.
- ``get_embeddings``: compute all the embeddings given dataset.
- ``embedding_dim``: get dimension of the embedding.
"""
def __init__(self, config):
super(GINEncoder, self).__init__()
self.hidden_size = config['hidden_size']
self.num_layers = config['num_layers']
self.embed_dim = config['embed_dim']
self.atom_type_num = config['atom_type_num']
self.chirality_tag_num = config['chirality_tag_num']
self.bond_type_num = config['bond_type_num']
self.bond_direction_num = config['bond_direction_num']
self.readout = config['readout']
self.activation = config['activation']
self.atom_names = config['atom_names']
self.bond_names = config['bond_names']
self.atom_embedding = AtomEmbedding(self.atom_names, self.embed_dim)
self.gin_list = nn.LayerList()
self.norm_list = nn.LayerList()
for layer_id in range(self.num_layers):
self.gin_list.append(
pgl.nn.GINConv(self.embed_dim, self.embed_dim, activation=self.activation))
self.norm_list.append(nn.BatchNorm1D(self.embed_dim))
if self.readout == 'mean':
self.graph_pool = MeanPool()
else:
self.graph_pool = pgl.nn.GraphPool(pool_type=self.readout)
def forward(self, graph):
"""
Build the network.
"""
x = self.atom_embedding(graph.node_feat)
x = paddle.squeeze(x, axis=1)
patch_repr = []
for i in range(self.num_layers):
x = self.gin_list[i](graph, x)
x = self.norm_list[i](x)
patch_repr.append(x) # $h_i^{(k)}$
patch_summary = paddle.concat(patch_repr, axis=1) # $h_{\phi}^i$
patch_pool = [self.graph_pool(graph, x) for x in patch_repr]
global_repr = paddle.concat(patch_pool, axis=1)
return global_repr, patch_summary
@property
def embedding_dim(self):
return self.num_layers * self.hidden_size
class FF(nn.Layer):
"""Feedforward network with linear shortcut for InfoGraph"""
def __init__(self, in_size, hidden_size, num_layers=3):
super(FF, self).__init__()
layers = []
for layer_id in range(num_layers):
if layer_id == 0:
layers.append(nn.Linear(in_size, hidden_size))
else:
layers.append(nn.Linear(hidden_size, hidden_size))
layers.append(nn.ReLU())
self.block = nn.Sequential(*layers)
self.linear_shortcut = nn.Linear(in_size, hidden_size)
def forward(self, x):
return self.block(x) + self.linear_shortcut(x)
class PriorDiscriminator(nn.Layer):
"""Prior discriminator for InfoGraph"""
def __init__(self, in_size, hidden_size, num_layers=3):
super(PriorDiscriminator, self).__init__()
assert num_layers > 1
layers = []
for layer_id in range(num_layers):
if layer_id == 0:
layers.append(nn.Linear(in_size, hidden_size))
layers.append(nn.ReLU())
elif layer_id < num_layers - 1:
layers.append(nn.Linear(hidden_size, hidden_size))
layers.append(nn.ReLU())
else:
layers.append(nn.Linear(hidden_size, 1))
self.mlp = nn.Sequential(*layers)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.mlp(x)
return self.sigmoid(x)
class InfoGraph(nn.Layer):
"""InfoGraph model.
Args:
config (dict): config dictionary of the GIN encoder.
Returns:
global_repr (Tensor): global-level representation of graph
enc (Tensor): path-level representation of nodes
Reference: InfoGraph: Unsupervised and Semi-supervised Graph-Level
Representation Learning via Mutual Information Maximization
"""
def __init__(self, config):
super(InfoGraph, self).__init__()
self.encoder = GINEncoder(config)
dim = self.encoder.embedding_dim
self.feedforward = FF(dim, dim)
def forward(self, graph):
global_repr, patch_summary = self.encoder(graph)
g_enc = self.feedforward(global_repr)
l_enc = self.feedforward(patch_summary)
enc = paddle.matmul(l_enc, g_enc, transpose_y=True)
return global_repr, enc
class InfoGraphCriterion(nn.Layer):
""" Criterion of InfoGraph unspervised learning model
via maximization of mutual information.
"""
def __init__(self, config):
super(InfoGraphCriterion, self).__init__()
self.dim = config['hidden_size'] * config['num_layers']
self.measure = config['measure']
self.prior = config['prior']
self.gamma = config['gamma']
if self.prior:
self.prior_discriminator = PriorDiscriminator(self.dim, self.dim)
def forward(self, graph, global_repr, enc, pos_mask, neg_mask, prior=None):
E_pos = get_positive_expectation(
enc * pos_mask, self.measure, average=False)
E_pos = paddle.sum(E_pos) / graph.num_nodes
E_neg = get_negative_expectation(
enc * neg_mask, self.measure, average=False)
E_neg = paddle.sum(E_neg) / (graph.num_nodes * (graph.num_graph - 1))
local_global_loss = E_neg - E_pos
if self.prior:
term_1 = paddle.mean(paddle.log(self.prior_discriminator(prior)))
term_2 = paddle.mean(
paddle.log(1.0 - self.prior_discriminator(global_repr)))
prior_loss = - (term_1 + term_2) * self.gamma
else:
prior_loss = 0
return local_global_loss + prior_loss
| 2,954 |
4,829 | <filename>td/telegram/FullMessageId.h<gh_stars>1000+
//
// Copyright <NAME> (<EMAIL>), <NAME> (<EMAIL>) 2014-2021
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#pragma once
#include "td/telegram/DialogId.h"
#include "td/telegram/MessageId.h"
#include "td/utils/common.h"
#include "td/utils/StringBuilder.h"
namespace td {
struct FullMessageId {
private:
DialogId dialog_id;
MessageId message_id;
public:
FullMessageId() : dialog_id(), message_id() {
}
FullMessageId(DialogId dialog_id, MessageId message_id) : dialog_id(dialog_id), message_id(message_id) {
}
bool operator==(const FullMessageId &other) const {
return dialog_id == other.dialog_id && message_id == other.message_id;
}
bool operator!=(const FullMessageId &other) const {
return !(*this == other);
}
DialogId get_dialog_id() const {
return dialog_id;
}
MessageId get_message_id() const {
return message_id;
}
template <class StorerT>
void store(StorerT &storer) const {
dialog_id.store(storer);
message_id.store(storer);
}
template <class ParserT>
void parse(ParserT &parser) {
dialog_id.parse(parser);
message_id.parse(parser);
}
};
struct FullMessageIdHash {
std::size_t operator()(FullMessageId full_message_id) const {
return DialogIdHash()(full_message_id.get_dialog_id()) * 2023654985u +
MessageIdHash()(full_message_id.get_message_id());
}
};
inline StringBuilder &operator<<(StringBuilder &string_builder, FullMessageId full_message_id) {
return string_builder << full_message_id.get_message_id() << " in " << full_message_id.get_dialog_id();
}
} // namespace td
| 645 |
372 | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.sheets.v4.model;
/**
* Additional properties of a DATA_SOURCE sheet.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Google Sheets API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class DataSourceSheetProperties extends com.google.api.client.json.GenericJson {
/**
* The columns displayed on the sheet, corresponding to the values in RowData.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<DataSourceColumn> columns;
static {
// hack to force ProGuard to consider DataSourceColumn used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(DataSourceColumn.class);
}
/**
* The data execution status.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private DataExecutionStatus dataExecutionStatus;
/**
* ID of the DataSource the sheet is connected to.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String dataSourceId;
/**
* The columns displayed on the sheet, corresponding to the values in RowData.
* @return value or {@code null} for none
*/
public java.util.List<DataSourceColumn> getColumns() {
return columns;
}
/**
* The columns displayed on the sheet, corresponding to the values in RowData.
* @param columns columns or {@code null} for none
*/
public DataSourceSheetProperties setColumns(java.util.List<DataSourceColumn> columns) {
this.columns = columns;
return this;
}
/**
* The data execution status.
* @return value or {@code null} for none
*/
public DataExecutionStatus getDataExecutionStatus() {
return dataExecutionStatus;
}
/**
* The data execution status.
* @param dataExecutionStatus dataExecutionStatus or {@code null} for none
*/
public DataSourceSheetProperties setDataExecutionStatus(DataExecutionStatus dataExecutionStatus) {
this.dataExecutionStatus = dataExecutionStatus;
return this;
}
/**
* ID of the DataSource the sheet is connected to.
* @return value or {@code null} for none
*/
public java.lang.String getDataSourceId() {
return dataSourceId;
}
/**
* ID of the DataSource the sheet is connected to.
* @param dataSourceId dataSourceId or {@code null} for none
*/
public DataSourceSheetProperties setDataSourceId(java.lang.String dataSourceId) {
this.dataSourceId = dataSourceId;
return this;
}
@Override
public DataSourceSheetProperties set(String fieldName, Object value) {
return (DataSourceSheetProperties) super.set(fieldName, value);
}
@Override
public DataSourceSheetProperties clone() {
return (DataSourceSheetProperties) super.clone();
}
}
| 1,183 |
30,023 | <reponame>MrDelik/core
"""Support for LightwaveRF switches."""
from __future__ import annotations
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import LIGHTWAVE_LINK
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Find and return LightWave switches."""
if not discovery_info:
return
switches = []
lwlink = hass.data[LIGHTWAVE_LINK]
for device_id, device_config in discovery_info.items():
name = device_config[CONF_NAME]
switches.append(LWRFSwitch(name, device_id, lwlink))
async_add_entities(switches)
class LWRFSwitch(SwitchEntity):
"""Representation of a LightWaveRF switch."""
_attr_should_poll = False
def __init__(self, name, device_id, lwlink):
"""Initialize LWRFSwitch entity."""
self._attr_name = name
self._device_id = device_id
self._lwlink = lwlink
async def async_turn_on(self, **kwargs):
"""Turn the LightWave switch on."""
self._attr_is_on = True
self._lwlink.turn_on_switch(self._device_id, self._attr_name)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the LightWave switch off."""
self._attr_is_on = False
self._lwlink.turn_off(self._device_id, self._attr_name)
self.async_write_ha_state()
| 647 |
381 | <gh_stars>100-1000
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import lltype, llmemory, rdict
from rpython.rtyper.lltypesystem.llmemory import weakref_create, weakref_deref
from rpython.rtyper import rclass
from rpython.rtyper.rclass import getinstancerepr
from rpython.rtyper.rmodel import Repr
from rpython.rlib.rweakref import RWeakKeyDictionary
from rpython.rlib import jit
from rpython.rlib.objectmodel import compute_identity_hash
# Warning: this implementation of RWeakKeyDictionary is not exactly
# leaking, but can keep around some values for a long time, even after
# the corresponding keys were freed. They will be eventually freed if
# you continue to manipulate the dictionary. Avoid to use this if the
# values are objects that might keep alive tons of memory.
class WeakKeyDictRepr(Repr):
def __init__(self, rtyper):
self.rtyper = rtyper
self.lowleveltype = lltype.Ptr(WEAKDICT)
self.dict_cache = {}
def convert_const(self, weakdict):
if not isinstance(weakdict, RWeakKeyDictionary):
raise TyperError("expected an RWeakKeyDictionary: %r" % (
weakdict,))
try:
key = Constant(weakdict)
return self.dict_cache[key]
except KeyError:
self.setup()
if weakdict.length() != 0:
raise TyperError("got a non-empty prebuilt RWeakKeyDictionary")
l_dict = ll_new_weakdict()
self.dict_cache[key] = l_dict
return l_dict
def rtype_method_get(self, hop):
r_object = getinstancerepr(self.rtyper, None)
v_d, v_key = hop.inputargs(self, r_object)
hop.exception_cannot_occur()
v_result = hop.gendirectcall(ll_get, v_d, v_key)
v_result = hop.genop("cast_pointer", [v_result],
resulttype=hop.r_result.lowleveltype)
return v_result
def rtype_method_set(self, hop):
r_object = getinstancerepr(self.rtyper, None)
v_d, v_key, v_value = hop.inputargs(self, r_object, r_object)
hop.exception_cannot_occur()
if hop.args_s[2].is_constant() and hop.args_s[2].const is None:
hop.gendirectcall(ll_set_null, v_d, v_key)
else:
hop.gendirectcall(ll_set, v_d, v_key, v_value)
def rtype_method_length(self, hop):
v_d, = hop.inputargs(self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_length, v_d)
def specialize_make_weakdict(hop):
hop.exception_cannot_occur()
v_d = hop.gendirectcall(ll_new_weakdict)
return v_d
# ____________________________________________________________
NULLVALUE = lltype.nullptr(rclass.OBJECTPTR.TO)
WEAKDICTENTRY = lltype.Struct("weakdictentry",
("key", llmemory.WeakRefPtr),
("value", rclass.OBJECTPTR),
("f_hash", lltype.Signed))
def ll_debugrepr(x):
if x:
h = compute_identity_hash(x)
else:
h = 0
return '<%x>' % (h,)
def ll_valid(entries, i):
key = entries[i].key
if not key:
return False
elif weakref_deref(rclass.OBJECTPTR, key):
return True
else:
# The entry might be a dead weakref still holding a strong
# reference to the value; for this case, we clear the old
# value from the entry, if any.
entries[i].value = NULLVALUE
return False
def ll_everused(entries, i):
return bool(entries[i].key)
entrymeths = {
'allocate': lltype.typeMethod(rdict._ll_malloc_entries),
'delete': rdict._ll_free_entries,
'valid': ll_valid,
'everused': ll_everused,
'hash': rdict.ll_hash_from_cache,
'no_direct_compare': True,
}
WEAKDICTENTRYARRAY = lltype.GcArray(WEAKDICTENTRY,
adtmeths=entrymeths,
hints={'weakarray': 'key'})
# NB. the 'hints' is not used so far ^^^
@jit.dont_look_inside
def ll_new_weakdict():
d = lltype.malloc(WEAKDICT)
d.entries = WEAKDICT.entries.TO.allocate(rdict.DICT_INITSIZE)
d.num_items = 0
d.resize_counter = rdict.DICT_INITSIZE * 2
return d
@jit.dont_look_inside
def ll_get(d, llkey):
hash = compute_identity_hash(llkey)
i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK
#llop.debug_print(lltype.Void, i, 'get', hex(hash),
# ll_debugrepr(d.entries[i].key),
# ll_debugrepr(d.entries[i].value))
# NB. ll_valid() above was just called at least on entry i, so if
# it is an invalid entry with a dead weakref, the value was reset
# to NULLVALUE.
return d.entries[i].value
@jit.dont_look_inside
def ll_set(d, llkey, llvalue):
if llvalue:
ll_set_nonnull(d, llkey, llvalue)
else:
ll_set_null(d, llkey)
@jit.dont_look_inside
def ll_set_nonnull(d, llkey, llvalue):
hash = compute_identity_hash(llkey)
keyref = weakref_create(llkey) # GC effects here, before the rest
i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK
everused = d.entries.everused(i)
d.entries[i].key = keyref
d.entries[i].value = llvalue
d.entries[i].f_hash = hash
#llop.debug_print(lltype.Void, i, 'stored', hex(hash),
# ll_debugrepr(llkey),
# ll_debugrepr(llvalue))
if not everused:
d.resize_counter -= 3
if d.resize_counter <= 0:
#llop.debug_print(lltype.Void, 'RESIZE')
ll_weakdict_resize(d)
@jit.dont_look_inside
def ll_set_null(d, llkey):
hash = compute_identity_hash(llkey)
i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK
if d.entries.everused(i):
# If the entry was ever used, clean up its key and value.
# We don't store a NULL value, but a dead weakref, because
# the entry must still be marked as everused().
d.entries[i].key = llmemory.dead_wref
d.entries[i].value = NULLVALUE
#llop.debug_print(lltype.Void, i, 'zero')
def ll_update_num_items(d):
entries = d.entries
num_items = 0
for i in range(len(entries)):
if entries.valid(i):
num_items += 1
d.num_items = num_items
def ll_weakdict_resize(d):
# first set num_items to its correct, up-to-date value
ll_update_num_items(d)
rdict.ll_dict_resize(d)
def ll_keyeq(d, weakkey1, realkey2):
# only called by ll_dict_lookup() with the first arg coming from an
# entry.key, and the 2nd arg being the argument to ll_dict_lookup().
if not weakkey1:
assert bool(realkey2)
return False
return weakref_deref(rclass.OBJECTPTR, weakkey1) == realkey2
@jit.dont_look_inside
def ll_length(d):
# xxx slow, but it's only for debugging
ll_update_num_items(d)
#llop.debug_print(lltype.Void, 'length:', d.num_items)
return d.num_items
dictmeths = {
'll_get': ll_get,
'll_set': ll_set,
'keyeq': ll_keyeq,
'paranoia': False,
}
WEAKDICT = lltype.GcStruct("weakkeydict",
("num_items", lltype.Signed),
("resize_counter", lltype.Signed),
("entries", lltype.Ptr(WEAKDICTENTRYARRAY)),
adtmeths=dictmeths)
| 3,407 |
575 | // Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ash/scanning/scan_service.h"
#include <cstdint>
#include <map>
#include <string>
#include <vector>
#include "ash/content/scanning/mojom/scanning.mojom-test-utils.h"
#include "ash/content/scanning/mojom/scanning.mojom.h"
#include "base/containers/flat_set.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
#include "base/optional.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/test/metrics/histogram_tester.h"
#include "base/test/task_environment.h"
#include "base/time/time.h"
#include "base/unguessable_token.h"
#include "chrome/browser/ash/scanning/fake_lorgnette_scanner_manager.h"
#include "chromeos/dbus/lorgnette/lorgnette_service.pb.h"
#include "mojo/public/cpp/bindings/pending_remote.h"
#include "mojo/public/cpp/bindings/receiver.h"
#include "mojo/public/cpp/bindings/remote.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "ui/gfx/codec/png_codec.h"
namespace ash {
namespace {
namespace mojo_ipc = scanning::mojom;
// Path to the user's "My files" folder.
constexpr char kMyFilesPath[] = "/home/chronos/user/MyFiles";
// Scanner names used for tests.
constexpr char kFirstTestScannerName[] = "Test Scanner 1";
constexpr char kSecondTestScannerName[] = "Test Scanner 2";
constexpr char kEpsonTestName[] = "Epson";
// Document source name used for tests.
constexpr char kDocumentSourceName[] = "Flatbed";
constexpr char kAdfSourceName[] = "ADF Duplex";
// Resolutions used for tests.
constexpr uint32_t kFirstResolution = 75;
constexpr uint32_t kSecondResolution = 300;
// Returns a DocumentSource object.
lorgnette::DocumentSource CreateLorgnetteDocumentSource() {
lorgnette::DocumentSource source;
source.set_type(lorgnette::SOURCE_PLATEN);
source.set_name(kDocumentSourceName);
return source;
}
// Returns an ADF Duplex DocumentSource object.
lorgnette::DocumentSource CreateAdfDuplexDocumentSource() {
lorgnette::DocumentSource source;
source.set_type(lorgnette::SOURCE_ADF_DUPLEX);
source.set_name(kAdfSourceName);
return source;
}
// Returns a ScannerCapabilities object.
lorgnette::ScannerCapabilities CreateLorgnetteScannerCapabilities() {
lorgnette::ScannerCapabilities caps;
*caps.add_sources() = CreateLorgnetteDocumentSource();
caps.add_color_modes(lorgnette::MODE_COLOR);
caps.add_resolutions(kFirstResolution);
caps.add_resolutions(kSecondResolution);
return caps;
}
// Returns a ScannerCapabilities object used for testing a scanner
// that flips alternate pages..
lorgnette::ScannerCapabilities CreateEpsonScannerCapabilities() {
lorgnette::ScannerCapabilities caps;
*caps.add_sources() = CreateAdfDuplexDocumentSource();
caps.add_color_modes(lorgnette::MODE_COLOR);
caps.add_resolutions(kFirstResolution);
caps.add_resolutions(kSecondResolution);
return caps;
}
// Returns a vector of FilePaths to mimic saved scans.
std::vector<base::FilePath> CreateSavedScanPaths(
const base::FilePath& dir,
const base::Time::Exploded& scan_time,
const std::string& type,
int num_pages_to_scan) {
std::vector<base::FilePath> file_paths;
file_paths.reserve(num_pages_to_scan);
for (int i = 1; i <= num_pages_to_scan; i++) {
file_paths.push_back(dir.Append(base::StringPrintf(
"scan_%02d%02d%02d-%02d%02d%02d_%d.%s", scan_time.year, scan_time.month,
scan_time.day_of_month, scan_time.hour, scan_time.minute,
scan_time.second, i, type.c_str())));
}
return file_paths;
}
// Returns single FilePath to mimic saved PDF format scan.
base::FilePath CreateSavedPdfScanPath(const base::FilePath& dir,
const base::Time::Exploded& scan_time) {
return dir.Append(base::StringPrintf("scan_%02d%02d%02d-%02d%02d%02d.pdf",
scan_time.year, scan_time.month,
scan_time.day_of_month, scan_time.hour,
scan_time.minute, scan_time.second));
}
// Returns a manually generated PNG image.
std::string CreatePng() {
SkBitmap bitmap;
bitmap.allocN32Pixels(100, 100);
bitmap.eraseARGB(255, 0, 255, 0);
std::vector<unsigned char> bytes;
gfx::PNGCodec::EncodeBGRASkBitmap(bitmap, false, &bytes);
return std::string(bytes.begin(), bytes.end());
}
// Returns scan settings with the given path and file type.
mojo_ipc::ScanSettings CreateScanSettings(const base::FilePath& scan_to_path,
const mojo_ipc::FileType& file_type) {
mojo_ipc::ScanSettings settings;
settings.scan_to_path = scan_to_path;
settings.file_type = file_type;
return settings;
}
} // namespace
class FakeScanJobObserver : public mojo_ipc::ScanJobObserver {
public:
FakeScanJobObserver() = default;
~FakeScanJobObserver() override = default;
FakeScanJobObserver(const FakeScanJobObserver&) = delete;
FakeScanJobObserver& operator=(const FakeScanJobObserver&) = delete;
// mojo_ipc::ScanJobObserver:
void OnPageProgress(uint32_t page_number,
uint32_t progress_percent) override {
progress_ = progress_percent;
}
void OnPageComplete(const std::vector<uint8_t>& page_data) override {
page_complete_ = true;
}
void OnScanComplete(
mojo_ipc::ScanResult result,
const std::vector<base::FilePath>& scanned_file_paths) override {
scan_result_ = result;
scanned_file_paths_ = scanned_file_paths;
}
void OnCancelComplete(bool success) override {
cancel_scan_success_ = success;
}
// Creates a pending remote that can be passed in calls to
// ScanService::StartScan().
mojo::PendingRemote<mojo_ipc::ScanJobObserver> GenerateRemote() {
if (receiver_.is_bound())
receiver_.reset();
mojo::PendingRemote<mojo_ipc::ScanJobObserver> remote;
receiver_.Bind(remote.InitWithNewPipeAndPassReceiver());
return remote;
}
// Returns true if the scan completed successfully.
bool scan_success() const {
return progress_ == 100 && page_complete_ &&
scan_result_ == mojo_ipc::ScanResult::kSuccess;
}
// Returns true if the cancel scan request completed successfully.
bool cancel_scan_success() const { return cancel_scan_success_; }
// Returns the result of the scan job.
mojo_ipc::ScanResult scan_result() const { return scan_result_; }
// Returns file paths of the saved scan files.
std::vector<base::FilePath> scanned_file_paths() const {
return scanned_file_paths_;
}
private:
uint32_t progress_ = 0;
bool page_complete_ = false;
mojo_ipc::ScanResult scan_result_ = mojo_ipc::ScanResult::kUnknownError;
bool cancel_scan_success_ = false;
std::vector<base::FilePath> scanned_file_paths_;
mojo::Receiver<mojo_ipc::ScanJobObserver> receiver_{this};
};
class ScanServiceTest : public testing::Test {
public:
ScanServiceTest() = default;
void SetUp() override {
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
scan_service_.BindInterface(
scan_service_remote_.BindNewPipeAndPassReceiver());
}
// Gets scanners by calling ScanService::GetScanners() via the mojo::Remote.
std::vector<mojo_ipc::ScannerPtr> GetScanners() {
std::vector<mojo_ipc::ScannerPtr> scanners;
mojo_ipc::ScanServiceAsyncWaiter(scan_service_remote_.get())
.GetScanners(&scanners);
return scanners;
}
// Gets scanner capabilities for the scanner identified by |scanner_id| by
// calling ScanService::GetScannerCapabilities() via the mojo::Remote.
mojo_ipc::ScannerCapabilitiesPtr GetScannerCapabilities(
const base::UnguessableToken& scanner_id) {
mojo_ipc::ScannerCapabilitiesPtr caps =
mojo_ipc::ScannerCapabilities::New();
mojo_ipc::ScanServiceAsyncWaiter(scan_service_remote_.get())
.GetScannerCapabilities(scanner_id, &caps);
return caps;
}
// Starts a scan with the scanner identified by |scanner_id| with the given
// |settings| by calling ScanService::StartScan() via the mojo::Remote.
bool StartScan(const base::UnguessableToken& scanner_id,
mojo_ipc::ScanSettingsPtr settings) {
bool success;
mojo_ipc::ScanServiceAsyncWaiter(scan_service_remote_.get())
.StartScan(scanner_id, std::move(settings),
fake_scan_job_observer_.GenerateRemote(), &success);
task_environment_.RunUntilIdle();
return success;
}
// Performs a cancel scan request.
void CancelScan() {
scan_service_remote_->CancelScan();
task_environment_.RunUntilIdle();
}
protected:
base::test::TaskEnvironment task_environment_{
base::test::TaskEnvironment::TimeSource::MOCK_TIME};
base::ScopedTempDir temp_dir_;
FakeLorgnetteScannerManager fake_lorgnette_scanner_manager_;
FakeScanJobObserver fake_scan_job_observer_;
ScanService scan_service_{&fake_lorgnette_scanner_manager_, base::FilePath(),
base::FilePath()};
private:
mojo::Remote<mojo_ipc::ScanService> scan_service_remote_;
};
// Test that no scanners are returned when there are no scanner names.
TEST_F(ScanServiceTest, NoScannerNames) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse({});
auto scanners = GetScanners();
EXPECT_TRUE(scanners.empty());
}
// Test that a scanner is returned with the correct display name.
TEST_F(ScanServiceTest, GetScanners) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
EXPECT_EQ(scanners[0]->display_name,
base::UTF8ToUTF16(kFirstTestScannerName));
}
// Test that two returned scanners have unique IDs.
TEST_F(ScanServiceTest, UniqueScannerIds) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName, kSecondTestScannerName});
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 2u);
EXPECT_EQ(scanners[0]->display_name,
base::UTF8ToUTF16(kFirstTestScannerName));
EXPECT_EQ(scanners[1]->display_name,
base::UTF8ToUTF16(kSecondTestScannerName));
EXPECT_NE(scanners[0]->id, scanners[1]->id);
}
// Test that the number of detected scanners is recorded.
TEST_F(ScanServiceTest, RecordNumDetectedScanners) {
base::HistogramTester histogram_tester;
histogram_tester.ExpectTotalCount("Scanning.NumDetectedScanners", 0);
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName, kSecondTestScannerName});
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 2u);
histogram_tester.ExpectUniqueSample("Scanning.NumDetectedScanners", 2, 1);
}
// Test that attempting to get capabilities with a scanner ID that doesn't
// correspond to a scanner results in obtaining no capabilities.
TEST_F(ScanServiceTest, BadScannerId) {
auto caps = GetScannerCapabilities(base::UnguessableToken::Create());
EXPECT_TRUE(caps->sources.empty());
EXPECT_TRUE(caps->color_modes.empty());
EXPECT_TRUE(caps->resolutions.empty());
}
// Test that failing to obtain capabilities from the LorgnetteScannerManager
// results in obtaining no capabilities.
TEST_F(ScanServiceTest, NoCapabilities) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
fake_lorgnette_scanner_manager_.SetGetScannerCapabilitiesResponse(
base::nullopt);
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
auto caps = GetScannerCapabilities(scanners[0]->id);
EXPECT_TRUE(caps->sources.empty());
EXPECT_TRUE(caps->color_modes.empty());
EXPECT_TRUE(caps->resolutions.empty());
}
// Test that scanner capabilities can be obtained successfully.
TEST_F(ScanServiceTest, GetScannerCapabilities) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
fake_lorgnette_scanner_manager_.SetGetScannerCapabilitiesResponse(
CreateLorgnetteScannerCapabilities());
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
auto caps = GetScannerCapabilities(scanners[0]->id);
ASSERT_EQ(caps->sources.size(), 1u);
EXPECT_EQ(caps->sources[0]->type, mojo_ipc::SourceType::kFlatbed);
EXPECT_EQ(caps->sources[0]->name, kDocumentSourceName);
ASSERT_EQ(caps->color_modes.size(), 1u);
EXPECT_EQ(caps->color_modes[0], mojo_ipc::ColorMode::kColor);
ASSERT_EQ(caps->resolutions.size(), 2u);
EXPECT_EQ(caps->resolutions[0], kFirstResolution);
EXPECT_EQ(caps->resolutions[1], kSecondResolution);
}
// Test that attempting to scan with a scanner ID that doesn't correspond to a
// scanner results in a failed scan.
TEST_F(ScanServiceTest, ScanWithBadScannerId) {
EXPECT_FALSE(StartScan(base::UnguessableToken::Create(),
mojo_ipc::ScanSettings::New()));
}
// Test that attempting to scan with an unsupported file path fails.
// Specifically, use a file path with directory navigation (e.g. "..") to verify
// it can't be used to save scanned images to an unsupported path.
TEST_F(ScanServiceTest, ScanWithUnsupportedFilePath) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
const std::vector<std::string> scan_data = {"TestData"};
fake_lorgnette_scanner_manager_.SetScanResponse(scan_data);
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
const base::FilePath my_files_path(kMyFilesPath);
scan_service_.SetMyFilesPathForTesting(my_files_path);
const mojo_ipc::ScanSettings settings = CreateScanSettings(
my_files_path.Append("../../../var/log"), mojo_ipc::FileType::kPng);
EXPECT_FALSE(StartScan(scanners[0]->id, settings.Clone()));
}
// Test that a scan can be performed successfully.
TEST_F(ScanServiceTest, Scan) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
const std::vector<std::string> scan_data = {CreatePng(), CreatePng(),
CreatePng()};
fake_lorgnette_scanner_manager_.SetScanResponse(scan_data);
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
base::Time::Exploded scan_time;
// Since we're using mock time, this is deterministic.
base::Time::Now().LocalExplode(&scan_time);
scan_service_.SetMyFilesPathForTesting(temp_dir_.GetPath());
std::map<std::string, mojo_ipc::FileType> file_types = {
{"png", mojo_ipc::FileType::kPng}, {"jpg", mojo_ipc::FileType::kJpg}};
for (const auto& type : file_types) {
const std::vector<base::FilePath> saved_scan_paths = CreateSavedScanPaths(
temp_dir_.GetPath(), scan_time, type.first, scan_data.size());
for (const auto& saved_scan_path : saved_scan_paths)
EXPECT_FALSE(base::PathExists(saved_scan_path));
mojo_ipc::ScanSettings settings =
CreateScanSettings(temp_dir_.GetPath(), type.second);
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
for (const auto& saved_scan_path : saved_scan_paths)
EXPECT_TRUE(base::PathExists(saved_scan_path));
EXPECT_TRUE(fake_scan_job_observer_.scan_success());
EXPECT_EQ(mojo_ipc::ScanResult::kSuccess,
fake_scan_job_observer_.scan_result());
EXPECT_EQ(saved_scan_paths, fake_scan_job_observer_.scanned_file_paths());
}
}
// Test that a scan with PDF file format can be perfomed successfully.
TEST_F(ScanServiceTest, PdfScan) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
const std::vector<std::string> scan_data = {CreatePng(), CreatePng(),
CreatePng()};
fake_lorgnette_scanner_manager_.SetScanResponse(scan_data);
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
base::Time::Exploded scan_time;
// Since we're using mock time, this is deterministic.
base::Time::Now().LocalExplode(&scan_time);
scan_service_.SetMyFilesPathForTesting(temp_dir_.GetPath());
mojo_ipc::ScanSettings settings =
CreateScanSettings(temp_dir_.GetPath(), mojo_ipc::FileType::kPdf);
const base::FilePath saved_scan_path =
CreateSavedPdfScanPath(temp_dir_.GetPath(), scan_time);
EXPECT_FALSE(base::PathExists(saved_scan_path));
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
EXPECT_TRUE(base::PathExists(saved_scan_path));
EXPECT_TRUE(fake_scan_job_observer_.scan_success());
EXPECT_EQ(mojo_ipc::ScanResult::kSuccess,
fake_scan_job_observer_.scan_result());
const std::vector<base::FilePath> scanned_file_paths =
fake_scan_job_observer_.scanned_file_paths();
EXPECT_EQ(1u, scanned_file_paths.size());
EXPECT_EQ(saved_scan_path, scanned_file_paths.front());
}
// Test that an Epson ADF Duplex scan, which produces flipped pages, completes
// successfully.
TEST_F(ScanServiceTest, RotateEpsonADF) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse({kEpsonTestName});
fake_lorgnette_scanner_manager_.SetGetScannerCapabilitiesResponse(
CreateEpsonScannerCapabilities());
const std::vector<std::string> scan_data = {CreatePng(), CreatePng(),
CreatePng()};
fake_lorgnette_scanner_manager_.SetScanResponse(scan_data);
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
base::Time::Exploded scan_time;
// Since we're using mock time, this is deterministic.
base::Time::Now().LocalExplode(&scan_time);
scan_service_.SetMyFilesPathForTesting(temp_dir_.GetPath());
mojo_ipc::ScanSettings settings =
CreateScanSettings(temp_dir_.GetPath(), mojo_ipc::FileType::kPdf);
const base::FilePath saved_scan_path =
CreateSavedPdfScanPath(temp_dir_.GetPath(), scan_time);
EXPECT_FALSE(base::PathExists(saved_scan_path));
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
EXPECT_TRUE(base::PathExists(saved_scan_path));
EXPECT_TRUE(fake_scan_job_observer_.scan_success());
const std::vector<base::FilePath> scanned_file_paths =
fake_scan_job_observer_.scanned_file_paths();
EXPECT_EQ(1u, scanned_file_paths.size());
EXPECT_EQ(saved_scan_path, scanned_file_paths.front());
}
// Test that when a scan fails, the scan job is marked as failed.
TEST_F(ScanServiceTest, ScanFails) {
// Skip setting the scan data in FakeLorgnetteScannerManager so the scan will
// fail.
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
scan_service_.SetMyFilesPathForTesting(temp_dir_.GetPath());
const mojo_ipc::ScanSettings settings =
CreateScanSettings(temp_dir_.GetPath(), mojo_ipc::FileType::kPng);
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
EXPECT_FALSE(fake_scan_job_observer_.scan_success());
EXPECT_EQ(mojo_ipc::ScanResult::kDeviceBusy,
fake_scan_job_observer_.scan_result());
EXPECT_TRUE(fake_scan_job_observer_.scanned_file_paths().empty());
}
// Test that when a page fails to save during the scan, the scan job is marked
// as failed.
TEST_F(ScanServiceTest, PageSaveFails) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
// Sending an empty string in test data simulates a page saving to fail.
const std::vector<std::string> scan_data = {"TestData1", "", "TestData3"};
fake_lorgnette_scanner_manager_.SetScanResponse(scan_data);
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
scan_service_.SetMyFilesPathForTesting(temp_dir_.GetPath());
const mojo_ipc::ScanSettings settings =
CreateScanSettings(temp_dir_.GetPath(), mojo_ipc::FileType::kJpg);
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
EXPECT_FALSE(fake_scan_job_observer_.scan_success());
EXPECT_EQ(mojo_ipc::ScanResult::kUnknownError,
fake_scan_job_observer_.scan_result());
EXPECT_TRUE(fake_scan_job_observer_.scanned_file_paths().empty());
}
// Tests that a new scan job can succeed after the previous scan failed.
TEST_F(ScanServiceTest, ScanAfterFailedScan) {
// Skip setting the scan data in FakeLorgnetteScannerManager so the scan will
// fail.
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
scan_service_.SetMyFilesPathForTesting(temp_dir_.GetPath());
const mojo_ipc::ScanSettings settings =
CreateScanSettings(temp_dir_.GetPath(), mojo_ipc::FileType::kPng);
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
EXPECT_FALSE(fake_scan_job_observer_.scan_success());
EXPECT_EQ(mojo_ipc::ScanResult::kDeviceBusy,
fake_scan_job_observer_.scan_result());
EXPECT_TRUE(fake_scan_job_observer_.scanned_file_paths().empty());
// Set scan data so next scan is successful.
const std::vector<std::string> scan_data = {"TestData1", "TestData2",
"TestData3"};
fake_lorgnette_scanner_manager_.SetScanResponse(scan_data);
base::Time::Exploded scan_time;
// Since we're using mock time, this is deterministic.
base::Time::Now().LocalExplode(&scan_time);
const std::vector<base::FilePath> saved_scan_paths = CreateSavedScanPaths(
temp_dir_.GetPath(), scan_time, "png", scan_data.size());
for (const auto& saved_scan_path : saved_scan_paths)
EXPECT_FALSE(base::PathExists(saved_scan_path));
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
for (const auto& saved_scan_path : saved_scan_paths)
EXPECT_TRUE(base::PathExists(saved_scan_path));
EXPECT_TRUE(fake_scan_job_observer_.scan_success());
EXPECT_EQ(mojo_ipc::ScanResult::kSuccess,
fake_scan_job_observer_.scan_result());
EXPECT_EQ(saved_scan_paths, fake_scan_job_observer_.scanned_file_paths());
}
// Tests that a failed scan does not retain values from the previous successful
// scan.
TEST_F(ScanServiceTest, FailedScanAfterSuccessfulScan) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
const std::vector<std::string> scan_data = {"TestData1", "TestData2",
"TestData3"};
fake_lorgnette_scanner_manager_.SetScanResponse(scan_data);
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
base::Time::Exploded scan_time;
// Since we're using mock time, this is deterministic.
base::Time::Now().LocalExplode(&scan_time);
scan_service_.SetMyFilesPathForTesting(temp_dir_.GetPath());
const mojo_ipc::ScanSettings settings =
CreateScanSettings(temp_dir_.GetPath(), mojo_ipc::FileType::kPng);
const std::vector<base::FilePath> saved_scan_paths = CreateSavedScanPaths(
temp_dir_.GetPath(), scan_time, "png", scan_data.size());
for (const auto& saved_scan_path : saved_scan_paths)
EXPECT_FALSE(base::PathExists(saved_scan_path));
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
for (const auto& saved_scan_path : saved_scan_paths)
EXPECT_TRUE(base::PathExists(saved_scan_path));
EXPECT_TRUE(fake_scan_job_observer_.scan_success());
EXPECT_EQ(mojo_ipc::ScanResult::kSuccess,
fake_scan_job_observer_.scan_result());
EXPECT_EQ(saved_scan_paths, fake_scan_job_observer_.scanned_file_paths());
// Remove the scan data from FakeLorgnetteScannerManager so the scan will
// fail.
fake_lorgnette_scanner_manager_.SetScanResponse({});
EXPECT_TRUE(StartScan(scanners[0]->id, settings.Clone()));
EXPECT_FALSE(fake_scan_job_observer_.scan_success());
EXPECT_EQ(mojo_ipc::ScanResult::kDeviceBusy,
fake_scan_job_observer_.scan_result());
EXPECT_TRUE(fake_scan_job_observer_.scanned_file_paths().empty());
}
// Test that canceling sends an update to the observer OnCancelComplete().
TEST_F(ScanServiceTest, CancelScanBeforeScanCompletes) {
fake_lorgnette_scanner_manager_.SetGetScannerNamesResponse(
{kFirstTestScannerName});
const std::vector<std::string> scan_data = {"TestData"};
fake_lorgnette_scanner_manager_.SetScanResponse(scan_data);
auto scanners = GetScanners();
ASSERT_EQ(scanners.size(), 1u);
scan_service_.SetMyFilesPathForTesting(temp_dir_.GetPath());
const mojo_ipc::ScanSettings settings =
CreateScanSettings(temp_dir_.GetPath(), mojo_ipc::FileType::kPng);
StartScan(scanners[0]->id, settings.Clone());
CancelScan();
EXPECT_TRUE(fake_scan_job_observer_.cancel_scan_success());
}
} // namespace ash
| 9,129 |
1,602 | import xmltool
from xmltool import *
import codetools
from codetools import *
import copy
'''
About this script:
This script was developed to be a magic bullet for taking LAPACK fortran
and LAPACKE C code and documentation and turning it into the
ChaLAPACK and LAPACK interface modules.
It is not intended to be 'general puropse' and may break with other (maybe newer)
versions of LAPACK.
The idea here was to adopt a static-pass pattern that would be applied to an XML tree.
This is the pattern to be adopted by all Pass classes
class GenericPass ( Pass ):
dependencies = [] # list of Pass inheriting classes that must
# be completed before this pass is run
complete = False # static variable signifying that the pass
# had been successfully completed
@staticmethod
def apply( xml_tree ):
selfname = GenericPass
Pass.resolve( selfname, xml_tree ) # Resolve all of this passes dependencies
# potentially resolving their dependencies
print "[",selfname,"]"
# Work to be done in this pass
selfname.complete = True # Signify that this pass was completed successfully.
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
Pass is the parent class of all pass classes, and contains the parsed
input.xml file (input_xml), where pass specific inputs are found
'''
# Relative paths to LAPACK and its subdirectories.
lapack_root = "../LAPACK"
lapack_src = lapack_root + "/SRC"
lapack_matgen_src = lapack_root + "/TESTING/MATGEN"
lapack_install_src = lapack_root + "/INSTALL"
lapacke_include = lapack_root + "/lapacke/include"
blas_src = lapack_root + "/BLAS/SRC"
# Parses the documentation out from the text, stripping the comment tokens
# Captures: [1,text] raw documentation text, stripped of comment tokens
f_comment_regex = re.compile( r"(?:^|\n)\*>?(?P<text>.*)" )
# Parses the source out from the text
# Captures: [1,text] raw source code text
f_source_regex = re.compile( r"(?:^|\n)(?!\*>?)(?P<text>.*)" )
# Parses function declaration from the c header file
# Captures: [1,returns] return type, [2,name] function name, [3,arguments] full unparsed param list
c_func_decl_regex = re.compile( r"(?P<returns>\w+)\s+(?P<name>\w+)\s*\(\s*(?P<arguments>(?:\w+\s+)?\w+(?:\s*\*\s*|\s+)\w+(?:\s*,\s*(?:\w+\s+)?\w+(?:\s*\*\s*|\s+)\w+)*)?\s*\)\s*;" )
# Parses arguments to a function
# Captures: [1,modifier]? const modifier, [2,type] type, [3,refdepth] string containing whitespace and/or astierisk(s), [3,name] param name
c_args_regex = re.compile( r"(?:(?P<modifier>const)\s+)?(?P<type>\w+)(?P<refdepth>(?:\s*\*+\s*)|\s+)(?P<name>\w+)" )
# Parsers function declaration and argument documentation from the fortran code (Does not yet work with source. requires $ delimiting as well)
# Captures: [1,type]? return type, [2,name] function name, [3,arguments] full unparsed argument names,
doc_func_regex = re.compile( r"(?:(?P<type>(?:[\w\*]+)(?: +\w+)*)\s+)?(?:(?:SUBROUTINE)|(?:FUNCTION))\s+(?P<name>\w+)\(\s*(?P<arguments>(?:\w+\s*(?:,\s*\w+\s*)*)?)\s*\)" )
# Parses the scalar arguments from the documentation (TODO determine if source works too.)
# Captures: [1,body] full unparsed text of scalar arguments documentation
doc_scalarargs_regex = re.compile( r"Scalar Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_scalarargs_decls_regex = re.compile( r"(?P<type>(?:[\w\*]+)(?: +\w+)*) +(?P<names>\w+(?:\s*,(?:\s*\$\s*)?\s*\w+)*)" )
# Parses the array arguments from the documentation (TODO determine if source works too.)
# Captures: [1,body] full unparsed text of array arguments documentation
doc_arrayargs_regex = re.compile( r"Array Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_arrayargs_decls_regex = re.compile( r"(?P<type>(?:[\w\*]+)(?: +\w+)*) +(?P<names>\w+(?:\([\s\S]*?\))?(?:\s*,(?:\s*\$\s*)?\s*\w+(?:\([\s\S]*?\))?)*)" )
doc_arrayargs_decls_names_dims_regex = re.compile( r"(?P<name>\w+)(?:\((?P<dimensions>.*?)\))?," )
doc_functionargs_regex = re.compile( r"Function Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_functionargs_decls_regex = doc_scalarargs_decls_regex
# Parses the argument information from the documentation
# Captures: [1,name] name, [2,intent] intent, [3,body] full unparsed body of argument document
doc_args_regex = re.compile( r"\s+\\param\[(?P<intent>\w+(?:,\w+)*)\]\s+(?P<name>\w+)\s+\\verbatim\s+(?P<body>(?:[\s\S])+?)\s*\\endverbatim" )
# Parses the typeinfo group of doc_args_regex
# Captures: [1,name] argument name, [2,type] type, [3,array]? captures array keyword if exists, [4,dimension] captures text describing dimensions
doc_args_typeinfo_regex = re.compile( r"(?P<name>\w+)\s+is\s+(?P<type>\w+)(?: (?P<array>array), dimension\s+(?P<dimensions>.*))?" )
# Parses the argument documentation and provides the matrix size of an array (if there is one)
# Captures: [1] 1st dimension, [2] 2nd dimension
doc_args_dimensions_regex = re.compile( r"(\w+)[- ]by[- ](\w+)(?: coefficient)? matrix" )
# Parses the human documentation of the fortran base of scalar ints to determine how (if at all) they relate to matrix arrays
# Captures: [1,what] the semantic information of relation (order, rows, columns, rank) [2,who] an unbroken sentence of names referring to matrices/arrays
scalar_matrix_relation_regex = re.compile( r"(?:number\s+of\s+)?(?P<what>\w+)\s+(?:(?:of)|(?:in))\s+(?:the\s+)?(?:input\s+)?(?:(?:matrix)|(?:matrices)|(?:submatrix))?(?:\s+)?(?P<who>(?:(?:(?:\w+\( \w+ \))|(?:\w+))\s*)+)" );
# Parses the function purpose documentation from the documentation
# Captures: [1,body] the human readable text documenting the purpose of the function
doc_purpose_regex = re.compile( r"\\par\s+Purpose:\s+=+\s+\\verbatim\s+(?P<body>(?:[\s\S]+?))\s+\\endverbatim" )
# Parses function names
# Captures: [1,type] literal type of matrix, [2,config] configuration type of matrix, [3,function] function group
#func_name_group_regex = re.compile( r"^(?P<type>[dszc]|(?:ds)|(?:zc))(?P<config>(?:bd)|(?:di)|(?:gb)|(?:ge)|(?:gg)|(?:gt)|(?:hb)|(?:he)|(?:hg)|(?:hp)|(?:hs)|(?:op)|(?:or)|(?:pb)|(?:po)|(?:pp)|(?:pt)|(?:sb)|(?:sp)|(?:st)|(?:sy)|(?:tb)|(?:tg)|(?:tp)|(?:tr)|(?:tz)|(?:un)|(?:up))(?P<function>.+)" )
func_name_group_regex = re.compile( r"^(?P<type>(?:(?:ds)|(?:zc)|[dszc]))(?P<config>\w\w)(?P<function>\w\w\w*)" )
'''
class ResolutionFailure ( Exception )
Purpose:
Exception for errors encountered during Pass.resolve( ) calls.
Member Functions:
__init__( self, value ):
constructor. Value is the Pass class who errored during resolution
__str__( self ):
returns string stating which Pass class had an error.
Member Variables:
value:
the Pass class that errored during resolution
'''
class ResolutionFailure ( Exception ):
def __init__(self, value):
self.value = value
def __str__(self):
return "Error applying " + repr(self.value) + " to tree"
'''
class GeneralPassFailure ( Exception )
Purpose:
Generic exception class that is thrown when passes encounter critical errors
Member Functions:
__init__( self, message ):
constructor. Message is the message from the Pass to the user
__str__( self ):
returns the message to from the Pass to the user
Member Variables:
message:
the message to from the Pass to the user
'''
class GeneralPassFailure ( Exception ):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
'''
class Pass
Purpose:
Parent of all other pass classes.
Container of input xml file.
Dependency resolver.
Member Functions:
resolve( staticclass, xml_tree ):
recursively resolves dependencies of pass staticclass onto xml_tree
apply( xml_tree ):
abstract static method(?).
raises NotImplementedError exception
Member Variables:
compete:
boolean has-complete-pass-somewhere-once.
if false, pass has never been performed or failed to perform
if true, pass has been performed once.
dependencies:
list of dependencies that must be completed before pass can be applied
input_xml:
the user input xml that give input from the user to the passes
especially for passes that could not do automated finding in the source text
'''
class Pass:
complete = False
dependencies = []
input_xml = loadxml( "./input.xml" )
@staticmethod
def resolve( staticclass, xml_tree ):
print "Resolving", staticclass, "Dependencies"
for dep in staticclass.dependencies:
if not dep.complete:
dep.apply( xml_tree )
if not dep.complete:
raise ResolutionFailure( dep )
print "Resolved", staticclass, "Dependencies"
@staticmethod
def apply( xml_tree ):
raise NotImplementedError
'''
class CreateTreePas ( Pass )
Purpose:
takes in xmlelement with a root node, creates <LAPACK> and <LAPACKE> root nodes
'''
class CreateTreePass ( Pass ):
dependencies = []
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
SubElement( xml_tree, "LAPACK" )
SubElement( xml_tree, "LAPACKE" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DocumentSplitPass ( Pass )
Purpose:
Strip documentation (fortran comments) from code into seperate nodes
under common file node under LAPACK node.
'''
class DocumentSplitPass ( Pass ):
dependencies = [CreateTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DocumentSplitPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
text_node = SubElement( xml_tree.find( "./LAPACK" ), "text" )
src_files = []
source_dirs = [lapack_src, lapack_matgen_src, blas_src]
for dir in source_dirs:
for file in os.listdir( dir ):
if fnmatch.fnmatch( file, '*.f' ):
src_files.append( dir + "/" + file )
file_count = 1
for file in src_files:
sys.stdout.write("%s ( %d : %d ) \r" % (file, file_count, len(src_files) ) )
sys.stdout.flush()
file_node = SubElement( text_node, "file" )
file_node.set( "name", file )
src_node = SubElement( file_node, "source" )
doc_node = SubElement( file_node, "documentation" )
src_node.text = str()
doc_node.text = str()
file_read = open( file ).read()
for doc_match in f_comment_regex.finditer( file_read ):
doc_node.text += doc_match.group( "text" ) + "\n"
# Disabled the disabling# Disabled. Works. No use. Unnecessary load on tree.
for src_match in f_source_regex.finditer( file_read ):
src_node.text += src_match.group( "text" ) + "\n"
file_count += 1
sys.stdout.write(" \r")
sys.stdout.flush()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class LAPACKFunctionDefinePass ( Pass )
Purpose:
find fortran functions in the documentation and put them in the
<procedures> node under <LAPACK>
'''
class LAPACKFunctionDefinePass ( Pass ):
dependencies = [DocumentSplitPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = LAPACKFunctionDefinePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = SubElement( lapack_node, "procedures" )
file_nodes = text_node.findall( "./file" )
file_count = 1
for file_node in file_nodes:
sys.stdout.write("%s ( %d : %d ) \r" % (file_node.get("name"), file_count, len(file_nodes) ) )
sys.stdout.flush()
file_doc = file_node.find( "./documentation" )
for proc_decl in doc_func_regex.finditer( file_doc.text ):
proc_node = SubElement( procs_node, "procedure" )
proc_node.set( "name", proc_decl.group( "name" ) )
proc_node.set( "file-name", file_node.get( "name" ) )
if proc_decl.group( "type" ) != None:
proc_node.set( "return-type", proc_decl.group( "type" ) )
#print "\t", proc_decl.group("name")
arguments = proc_decl.group( "arguments" ).split( "," );
if len( arguments ) >= 1 and arguments[0] != "":
args_node = SubElement( proc_node, "arguments-list" )
arg_counter = 0
for arg in arguments:
#print "\t\t",arg.strip()
arg_node = SubElement( args_node, "argument" )
arg_node.set( "name", arg.strip() )
arg_node.set( "position", str(arg_counter) )
arg_counter += 1
#SubElement( proc_node, "documentation" )
file_count += 1
sys.stdout.write(" \r")
sys.stdout.flush()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncPurposeDocPass ( Pass )
Purpose:
collect function purpose documentation from fortran text
'''
class FuncPurposeDocPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncPurposeDocPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
purpose_match = doc_purpose_regex.search( doc_node.text )
purpose = purpose_match.group( "body" ) if purpose_match != None else "Unspecified"
SubElement( proc_node, "purpose" ).text = purpose
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncArgsDocPass ( Pass ):
Purpose:
collect argument documentation from fortran text
'''
class FuncArgsDocPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncArgsDocPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
for arg_match in doc_args_regex.finditer( doc_node.text ):
#print "\"",proc_file_name,"\"", arg_match.group()
arg_name = arg_match.group( "name" ).strip()
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + arg_name + "']" )
arg_node.set( "intent", arg_match.group( "intent" ) )
dim_match = doc_args_dimensions_regex.search( arg_match.group( "body" ) )
if dim_match != None:
arg_node.set( "matrix-size", dim_match.group(1) +"," + dim_match.group(2) )
SubElement( arg_node, "documentation" ).text = arg_match.group( "body" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncArgsTypePass ( Pass )
NON FUNCTIONAL
Purpose:
collect argument names and types under the Scalar Arguments
and Array Arguments header and include in tree for semantic understanding
'''
class FuncArgsTypePass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncArgsTypePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
# attribute scalar arguments
scalars = doc_scalarargs_regex.search( doc_node.text )
if scalars != None:
for line in doc_scalarargs_decls_regex.finditer( scalars.group( "body" ) ):
names_list = re.sub( r"[\s$]", "", line.group("names") ).split( "," )
#print line.group( "type" ), ":", names_list
type = line.group( "type" )
#skip any "IMPLICIT" 'typed' arguments
if type.lower() == "implicit":
continue
for name in names_list:
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
#prettyprintxml( proc_node.find("./arguments-list") )
continue
arg_node.set( "type", type )
arg_node.set( "semantic", "scalar" )
# attribute array arguments
arrays = doc_arrayargs_regex.search( doc_node.text )
if arrays != None:
for line in doc_arrayargs_decls_regex.finditer( arrays.group( "body" ) ):
name_list = re.sub( r"[\s$]", "", line.group("names") ) + ","
type = line.group( "type" )
for name_match in doc_arrayargs_decls_names_dims_regex.finditer( name_list ):
name = name_match.group( "name" )
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
continue
dimensions = name_match.group( "dimensions") if name_match.group( "dimensions") != None else ""
arg_node.set( "type", type )
arg_node.set( "semantic", "array" )
arg_node.set( "dimensions", dimensions )
# attribute function arguments
functions = doc_functionargs_regex.search( doc_node.text )
if functions != None:
for line in doc_functionargs_decls_regex.finditer( functions.group( "body" ) ):
names_list = re.sub( r"[\s$]", "", line.group("names") ).split( "," )
#print line.group( "type" ), ":", names_list
type = line.group( "type" )
#skip any "IMPLICIT" 'typed' arguments
if type.lower() == "external":
continue
for name in names_list:
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
#prettyprintxml( proc_node.find("./arguments-list") )
continue
arg_node.set( "type", type )
arg_node.set( "semantic", "function" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class AssociateArgsToArrayPass ( Pass ):
Purpose:
Threshes out scalar-argument pairing for array concepts.
'''
class AssociateArgsToArrayPass ( Pass ):
dependencies = [FuncArgsTypePass, FuncArgsDocPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = AssociateArgsToArrayPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
#proc_info = {} # {func_name} => { arg name } => [ what, what, who ]
for proc_node in procs_node.findall( "./procedure" ):
proc_name = proc_node.get( "name" )
'''
if not proc_name in proc_info:
proc_info[ proc_name ] = {}
'''
base_name = proc_name.lower()
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
pass
arg_names = [ arg.get("name") for arg in proc_node.findall( "./arguments-list/argument" ) ]
for arg_node in proc_node.findall( "./arguments-list/argument" ):
doc_node = arg_node.find( "documentation" )
if doc_node == None or arg_node.get("semantic") != "scalar" or arg_node.get("type").lower() != "integer":
continue
what = []
who = []
string = []
for m in scalar_matrix_relation_regex.finditer( doc_node.text ):
if not m.group( "what" ) in ["rows", "columns", "order", "rank"] :
continue
names = m.group( "who" ).strip()
names_list = []
if " and " in names:
names_list = [ name.strip() for name in names.split( "and" ) ]
else:
names_list = [ names ]
nameHasSpace = False
for name in names_list:
if " " in name:
nameHasSpace = True
break
if nameHasSpace:
#print names, " contains non names. Skipping."
continue
removes = []
for name in names_list:
if not name in arg_names:
removes.append( name )
for rm in removes:
names_list.remove( rm )
if len( names_list ) == 0:
#print "Names list had no argument names. Skipping"
continue
what.append( m.group( "what" ) )
who.append( names_list )
string.append( re.sub( "\s+", " ", m.group(0) ) )
if len( what ) == 0 and len( who ) == 0:
continue
#proc_info[ proc_name ][ arg_node.get( "name" ) ] = [ what, who, string]
associate_array = str()
associate_field = str()
first = True
for i in range( len( who ) ):
for array in who[i]:
associate_array += ( "," if not first else "" ) + array
associate_field += ( "," if not first else "" ) + what[i]
first = False
arg_node.set( "associate-array", associate_array )
arg_node.set( "associate-field", associate_field )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKPass ( Pass )
Purpose:
Tie together all passes over the LAPACK fortran source code and
resulting semantic analysis
'''
class BaseLAPACKPass ( Pass ):
dependencies = [FuncArgsTypePass, FuncArgsDocPass, AssociateArgsToArrayPass, FuncPurposeDocPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseLAPACKPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class LAPACKEFunctionDefinePass ( Pass ):
Purpose:
from lapacke.h text define all C function decls
in under the <LAPACKE> tree.
'''
class LAPACKEFunctionDefinePass ( Pass ):
dependencies = [CreateTreePass] # TODO include BaseLAPACKPass when the two need to meet
complete = False
@staticmethod
def apply( xml_tree ):
selfname = LAPACKEFunctionDefinePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
procs_node = SubElement( lapacke_root, "procedures" )
lapacke_header = open( lapacke_include + "/lapacke.h" ).read()
for func_decl in c_func_decl_regex.finditer( lapacke_header ):
#if func_decl.group( "name" ).lower().startswith( "lapacke_" ): continue
if procs_node.find( "./procedure/[@name='" + func_decl.group( "name" ) + "']" ) != None:
#print "proc", func_decl.group( "name" ), "redefined. Skipping"
continue
proc_node = SubElement( procs_node, "procedure" )
proc_node.set( "name", func_decl.group( "name" ) )
proc_node.set( "return-type", func_decl.group( "returns" ) )
args_node = SubElement( proc_node, "arguments-list" )
arg_count = 0
for arg in c_args_regex.finditer( func_decl.group( "arguments" ) ):
arg_node = SubElement( args_node, "argument" )
arg_node.set( "name", arg.group( "name" ) )
arg_node.set( "type", arg.group( "type" ) )
arg_node.set( "refdepth", str( arg.group( "refdepth" ).count("*") ) )
if arg.group( "modifier" ) != None:
arg_node.set( "modifier", arg.group( "modifier" ) )
arg_node.set( "position", str(arg_count) )
arg_count += 1
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKEPass ( Pass )
Purpose:
Ties together all passes over the lapacke.h text
and any basic analysis
'''
class BaseLAPACKEPass ( Pass ):
dependencies = [LAPACKEFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CharacterArraySemanticsCorrectionPass ( Pass )
Purpose:
lapack fortran documentation defines character*1 (single characters) under
array semantics. This corrects that to be a scalar.
'''
class CharacterArraySemanticsCorrectionPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CharacterArraySemanticsCorrectionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == None:
continue
#if f_arg.get( "name" ) == "JOBA":
# print f_proc.get( "name" ), f_arg.get( "name" ), f_arg.get( "type" ).lower()
if f_arg.get( "type" ).lower() == "character*1":
# print f_proc.get( "name" ), f_arg.get( "name" ), f_arg.get( "type" ), f_arg.get( "semantic" ), f_arg.get( "intent" ), f_arg.get( "dimensions" )
if f_arg.get( "semantic" ) == "array":
f_arg.set( "semantic", "scalar" )
if f_arg.get( "dimensions" ) != None:
f_arg.unset( "dimensions" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ArgumentSemanticsBucketingPass ( Pass )
Purpose:
Huristically assign argument semantics by bucketing all arguments.
Any argument who only has a None bucket and one other bucket can
'safely' have those in the None assigned from as semantics of the other.
'''
class ArgumentSemanticsBucketingPass ( Pass ):
dependencies = [CharacterArraySemanticsCorrectionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketArgumentsSemanticsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
variables = {}
for proc in lapack_f_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else "none"
if not arg_name in variables:
variables[ arg_name ] = {}
if not semantic in variables[ arg_name ]:
variables[ arg_name ][ semantic ] = []
variables[ arg_name ][ semantic ].append( proc_name )
for arg in variables:
if len( variables[ arg ] ) > 2:
print arg
for semantic in variables[ arg ]:
print " \"" + semantic + "\"", ":",variables[ arg ][ semantic ]
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class AssociateLAPACKtoLAPACKEPASS ( Pass )
Purpose:
link functions and args from both the C and Fortran world
together with paths from root.
'''
class AssociateFunctionsLAPACKtoLAPACKEPass ( Pass ):
dependencies = [BaseLAPACKEPass, BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = AssociateFunctionsLAPACKtoLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for lapack_c_proc in lapack_c_procs.findall( "./procedure" ):
proc_name = lapack_c_proc.get( "name" ).lower()
base_name = str()
if proc_name.startswith( "lapack_" ):
base_name = proc_name.replace( "lapack_", "" )
elif proc_name.startswith( "lapacke_" ):
base_name = proc_name.replace( "lapacke_", "" )
else:
print "Unknown root of name:", lapack_c_proc.get( "name" )
continue
base_name = base_name.replace( "_work", "" )
base_name = base_name.upper()
#print lapack_c_proc.get("name"), proc_name, base_name
lapack_f_proc = lapack_f_procs.find( "./procedure/[@name='" + base_name + "']" )
if lapack_f_proc == None:
#print "Could not find the fortran analogue of C function", lapack_c_proc.get( "name" ), "from base-name", base_name
continue
SubElement( SubElementUnique( lapack_c_proc, "analogues" ), "analogue" ).text = "./LAPACK/procedures/procedure/[@name='" + lapack_f_proc.get( "name" ) + "']"
SubElement( SubElementUnique( lapack_f_proc, "analogues" ), "analogue" ).text = "./LAPACKE/procedures/procedure/[@name='" + lapack_c_proc.get( "name" ) + "']"
'''
misses = []
for f_arg in lapack_f_proc.findall( "./arguments-list/argument" ):
f_arg_name = f_arg.get( "name" );
c_arg = lapack_c_proc.find( "./arguments-list/argument/[@name='" + f_arg_name.lower() + "']" )
# skip non-analogous args.
# TODO solve/mention matching failure somewhere? Maybe...
if c_arg == None:
#misses.append( f_arg_name )
continue
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + lapack_f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", lapack_f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + lapack_c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", lapack_c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DestroyUnassociatedCFunctionsTreePass ( Pass )
Purpose:
Remove procedures from LAPACKE subtree that do not have Fortran analogues
UNUSED
'''
class DestroyUnassociatedCFunctionsTreePass ( Pass ):
dependencies = [AssociateFunctionsLAPACKtoLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DestroyUnassociatedCFunctionsTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_root = xml_tree.find( "./LAPACKE" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.find( "./analogues" ) == None:
lapack_f_procs.remove( f_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DestroyUnassociatedFortranFunctionsTreePass ( Pass )
Purpose:
Remove procedures from LAPACK subtree that do not have C analogues
'''
class DestroyUnassociatedFortranFunctionsTreePass ( Pass ):
dependencies = [AssociateFunctionsLAPACKtoLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DestroyUnassociatedFortranFunctionsTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.find( "./analogues" ) == None:
lapack_f_procs.remove( f_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class EasyAssociateArgsPass ( Pass )
Purpose:
Create association between C and Fortran analogue function arguments
when that association is easy (ie they have the same name)
'''
class EasyAssociateArgsPass ( Pass ):
dependencies = [DestroyUnassociatedFortranFunctionsTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = EasyAssociateArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
c_procs = xml_tree.find( "./LAPACKE/procedures" )
for c_proc in c_procs.findall( "./procedure" ):
proc_name = c_proc.get( "name" ).lower()
supposed_f_ana_node = c_proc.find( "./analogues/analogue" )
if supposed_f_ana_node == None:
#print "Proc", c_proc.get( "name" ), "has no Fortran analogues. Skipping"
continue
f_proc = xml_tree.find( supposed_f_ana_node.text )
if f_proc == None:
print "BAD! No analogue where analogue should exist"
return
#continue
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_arg_name = f_arg.get( "name" );
c_arg = c_proc.find( "./arguments-list/argument/[@name='" + f_arg_name.lower() + "']" )
# skip non-analogous args.
if c_arg == None:
continue
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportArgumentAnaloguesPass ( Pass )
Purpose:
Create argument associations of name per the input.xml
using the function association created automatically during runtime
'''
class ImportArgumentAssociationsPass ( Pass ):
dependencies = [EasyAssociateArgsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportArgumentAssociationsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
c_procs = xml_tree.find( "./LAPACKE/procedures" )
pass_input = Pass.input_xml.find( "./pass/[@name='ImportArgumentAnaloguesPass']" )
for in_proc in pass_input.findall( "./procedure" ):
c_proc = c_procs.find( "./procedure/[@name='" + in_proc.get( "name" ) + "']" )
f_proc = xml_tree.find( c_proc.find( "./analogues/analogue" ).text )
for in_arg in in_proc.findall( "./argument" ):
c_arg = c_proc.find( "./arguments-list/argument/[@name='" + in_arg.get( "name" ) + "']" )
f_arg = f_proc.find( "./arguments-list/argument/[@name='" + in_arg.get( "substitution" ) + "']" )
#prettyprintxml( c_arg )
if c_arg == None or f_arg == None:
raise GeneralPassFailure( "Argument speficied in input not found in tree." + c_proc.get("name") +":"+ c_arg.get("name") )
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
#prettyprintxml( c_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseAssociatePass ( Pass )
Purpose:
Ties together all association of analogues pass
'''
class BaseAssociatePass ( Pass ):
dependencies = [EasyAssociateArgsPass, ImportArgumentAssociationsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseAssociatePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FoldLAPACKtoLAPACKEPass ( Pass ):
Purpose:
take the semantics derived from FuncArgsTypePass and
FuncArgumentDocToSemanticsPass over the LAPACK information
and apply them to functions found in the lapacke.h code.
Especially important for the LAPACK_* C functions.
Also important for any LAPACKE_* C functions that take pointers
to scalars.
'''
class FoldLAPACKSemanticsIntentsToLAPACKEPass ( Pass ):
dependencies = [CharacterArraySemanticsCorrectionPass, BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FoldLAPACKSemanticsIntentsToLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
analogues = c_proc.findall( "./analogues/analogue" )
if len( analogues ) > 1:
#print "proc", c_proc.get( "name" ), "has", len( analogues ), "analogues. skipping"
continue
elif len( analogues ) == 0:
#print "skipping", c_proc.get( "name" )
continue
f_proc = xml_tree.find( analogues[0].text )
for c_arg in c_proc.findall( "./arguments-list/argument" ):
analogues = c_arg.findall( "./analogues/analogue" )
if len( analogues ) > 1:
#print "arg", c_arg.get( "name" ), "has", len( analogues ), "analogues. skipping"
#prettyprintxml( c_proc )
continue
elif len( analogues ) == 0:
continue
f_arg = xml_tree.find( analogues[0].text )
semantic = f_arg.get( "semantic" )
if semantic != None:
c_arg.set( "semantic", semantic )
if semantic == "array":
c_arg.set( "dimensions", f_arg.get( "dimensions" ) )
intent = f_arg.get( "intent" )
if intent != None:
c_arg.set( "intent", intent )
dimensions = f_arg.get( "dimensions" )
if dimensions != None:
c_arg.set( "dimensions", dimensions )
matrix_size = f_arg.get( "matrix-size" )
if matrix_size != None:
c_arg.set( "matrix-size", matrix_size )
associate_array = f_arg.get( "associate-array" )
if associate_array != None:
c_arg.set( "associate-array", associate_array )
associate_field = f_arg.get( "associate-field" )
if associate_field != None:
c_arg.set( "associate-field", associate_field )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportLAPACKESemanticsIntentsPass ( Pass )
Purpose:
Import semantics and intents for LAPACKE arguments
that will may be unspecified after folding through
associations.
Will over-write semantics and intents issued by
FoldLAPACKSemanticsIntentsToLAPACKEPass
'''
class ImportLAPACKESemanticsIntentsPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportLAPACKESemanticsIntentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportLAPACKESemanticsIntentsPass']" )
for assign in pass_input.findall( "./assign" ):
for arg in xml_tree.findall( assign.get( "path" ) ):
semantic = assign.get( "semantic" )
intent = assign.get( "intent" )
if semantic == None and intent == None:
raise GeneralPassFailure( "assignment contains no semantic or intent attributes" + assign.get( "path" ) )
if semantic != None:
arg.set( "semantic", semantic )
if intent != None:
arg.set( "intent", intent )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class TypeSubstitutionPass ( Pass )
Purpose:
Token replacement pass of type tokens.
find-replace pairs are directly taken from input_xml file,
not inferred, detected, found, what-have-you.
No defining of types, purely text replacement.
applied to argument types and return-types of functions.
if replacement occurs, creates original-type and original-return-type
attributes that take the original value of the type and return-type attributes
Developer Note:
May move placement of TypeSubstitutionPass
since it is more related to Chapelizing that semantic
transformations like folding.
'''
class TypeSubstitutionPass ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TypeSubstitutionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='TypeSubstitutionPass']" )
procs = xml_tree.find( "./LAPACKE/procedures" )
subs = {}
for sub in pass_input.findall( "./substitution" ):
subs[ sub.get( "find" ) ] = sub.get( "replace" )
for proc in procs.findall( "./procedure" ):
proc_type = proc.get( "return-type" )
if proc_type in subs:
proc.set( "original-return-type", proc_type )
proc.set( "return-type", subs[ proc_type ] )
for arg in proc.findall( "./arguments-list/argument" ):
arg_type = arg.get( "type" )
if arg_type in subs:
arg.set( "original-type", arg_type )
arg.set( "type", subs[ arg_type ] )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportTypeArgumentPass ( Pass )
Purpose:
take arguments from the input file and retype
all arguments of the same name within the LAPACKE
tree to be of the type specified.
'''
class ImportArgumentTypePass ( Pass ):
dependencies = [TypeSubstitutionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportArgumentTypePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportArgumentTypePass']" )
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
find = pass_input.find( "./argument/[@name='" + arg.get("name") + "']" )
if find == None:
continue
arg.set("type", find.get("type" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseTransformLAPACKEPass ( Pass )
Purpose:
Ties together any transformation passes on the LAPACKE tree
that are unrelated to Chapelizing
Developer Note:
May move placement of TypeSubstitutionPass
since it is more related to Chapelizing that semantic
transformations like folding.
'''
class BaseTransformLAPACKEPass( Pass ):
dependencies = [BaseLAPACKEPass, TypeSubstitutionPass, FoldLAPACKSemanticsIntentsToLAPACKEPass, ImportLAPACKESemanticsIntentsPass, TypeSubstitutionPass, ImportArgumentTypePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseTransformLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CreateChapelModuleTreePass ( Pass )
Purpose:
Create chapel-module root, procedures, type-defines, const-defines subtrees
general setup for Chapelization and code generation
'''
class CreateChapelModuleTreePass ( Pass ):
dependencies = [ CreateTreePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateChapelModuleTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
chpl_module = SubElement( xml_tree, "chapel-module" )
procedures = SubElement( chpl_module, "procedures" )
types = SubElement( chpl_module, "type-defines" )
defines = SubElement( chpl_module, "const-defines" )
enums = SubElement( chpl_module, "enum-defines")
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelizeLAPACKE_FunctionsPass ( Pass )
Purpose:
take all LAPACKE_* functions defined in <LAPACKE> tree,
bust them apart to provide most information for later passes on
Chapelizing the LAPACKE_functions
'''
class ChapelizeLAPACKEFunctionsPass ( Pass ):
dependencies = [BaseTransformLAPACKEPass, CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelizeLAPACKEFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0
for proc in lapacke_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
module_proc = SubElement( module_procs, "procedure" )
module_proc.set("name", proc_name)
module_proc.set("return-type", proc.get( "return-type" ) )
module_proc_args = SubElement( module_proc, "arguments-list" )
for arg in proc.findall( "./arguments-list/argument" ):
#prettyprintxml( arg )
module_arg = SubElement( module_proc_args, "argument" )
module_arg.set( "name", arg.get("name") )
module_arg.set( "position", arg.get( "position" ) )
module_arg.set( "type", arg.get( "type" ) )
arg_semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else ""
arg_intent = arg.get( "intent" ) if arg.get( "intent" ) != None else ""
arg_refs = int( arg.get( "refdepth" ) )
dimensions = arg.get( "dimensions" )
if dimensions != None:
module_arg.set( "dimensions", dimensions )
matrix_size = arg.get( "matrix-size" )
if matrix_size != None:
module_arg.set( "matrix-size", matrix_size )
associate_array = arg.get( "associate-array" )
if associate_array != None:
module_arg.set( "associate-array", associate_array )
associate_field = arg.get( "associate-field" )
if associate_field != None:
module_arg.set( "associate-field", associate_field )
intent = None #"BADSTATE " + arg_semantic + " " + arg_intent + " " + arg_refs
semantic = None #"BADSTATE " + arg_semantic + " " + arg_intent + " " + arg_refs
if arg_refs == 0:
if arg_semantic == "array":
raise GeneralPassFailure( "Attempted array semantic with 0 refdepth " + proc_name + " " +arg.get("name") )
semantic = "scalar"
intent = "none"
if arg_refs == 1:
if arg_semantic == "array":
semantic = "array"
intent = "none"
else:
semantic = "scalar"
intent = "ref"
module_arg.set( "intent", intent )
module_arg.set( "semantic", semantic )
#module_proc.set( "category", "direct" )
proc_count += 1
print "Chapelized", proc_count, "LAPACKE functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class TranslateChapelKeywordsPass ( Pass ):
Purpose:
taking from the input xml file a list of chapel keywords
changes the text of argument names
'''
class TranslateChapelKeywordsPass ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TranslateChapelKeywordsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='TranslateChapelKeywordsPass']" )
global_info = pass_input.find( "./global" )
chpl_module_procs = xml_tree.find( "./chapel-module/procedures" )
global_pre = "" if global_info == None \
or global_info.get( "prefix" ) == None \
else global_info.get( "prefix" )
global_suf = "" if global_info == None \
or global_info.get( "suffix" ) == None \
else global_info.get( "suffix" )
keywords = {}
for keyword in pass_input.findall( "./keyword" ):
symbol = keyword.get( "symbol" )
replacement = "" if keyword.get( "replacement" ) == None \
else keyword.get( "replacement" )
if replacement == "" and global_pre == "" and global_suf == "":
raise GeneralPassFailure( "If no global prefix or suffix is defined, a replacement for a symbol must be defined. (" + symbol + ")" )
keywords[ symbol ] = replacement
for proc in chpl_module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
# Note: This will break includes if we go
# about replacing their names.
# arguments are fine because nobody cares about
# their names at the late stage of linking
'''
if proc_name in keywords:
if keywords[ proc_name ] == "":
proc_name = global_pre + proc_name + global_suf
else:
proc_name = keywords[ proc_name ]
proc.set( "name", proc_name )
'''
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
#print "\t",arg_name
if arg_name in keywords:
if keywords[ arg_name ] == "":
arg_name = global_pre + arg_name + global_suf
else:
arg_name = keywords[ arg_name ]
#print "\t\t=>",arg_name
arg.set( "name", arg_name )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelerrificLAPACKEFunctionsPass ( Pass ):
Purpose:
Create Chapel-errific, LAPACKE Functions that take chapel arrays and abstract the
dimensions of the arrays and matrices that are stored within.
'''
class ChapelerrificLAPACKEFunctionsPass ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass, TranslateChapelKeywordsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelerrificLAPACKEFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
chapel_module = xml_tree.find( "./chapel-module" );
chapel_procedures = chapel_module.find( "./procedures" )
for proc in chapel_procedures.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
pass
#proc = copy.deepcopy( chpl_proc )
args_node = proc.find( "./arguments-list" )
args_list = [ ]
args_names = []
remove_list = set()
pass_through = {}
for arg in args_node.findall( "./argument" ):
args_list.append( arg )
args_names.append( arg.get("name") )
pass_through[ arg.get("name") ] = arg.get( "name" )
for arg in args_list:
if arg.get( "semantic" ) != "array" :
continue
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).lower().split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i]
if dimension == "*":
continue
removeVar = None
for find in args_list:
if find.get( "name" ) == dimension:
removeVar = find
break
if removeVar != None:
remove_list.add( removeVar.get("name") )
pass_through[ dimension ] = "(" + arg.get("name") + ".domain.dim("+str(2-i)+").size) : c_int"
'''
else:
print ( dimension + " is not described in the arguments of "+proc.get( "name" ) + " for argument " + arg.get("name") )
'''
if arg.get( "matrix-size" ) != None:
matrix_size = arg.get( "matrix-size" ).lower()
rows = matrix_size.split(",")[0].strip()
cols = matrix_size.split(",")[1].strip()
removeRows = None
removeCols = None
for find in args_list:
if find.get( "name" ) == rows:
removeRows = find
if find.get( "name" ) == cols:
removeCols = find
if removeRows != None and removeCols != None:
pass_through[ rows ] = "(if matrix_order == lapack_memory_order.row_major then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size) : c_int"
pass_through[ cols ] = "(if matrix_order == lapack_memory_order.row_major then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size) : c_int"
remove_list.add( removeRows.get("name") )
remove_list.add( removeCols.get("name") )
'''
else:
print ( rows + " and " + cols + " are not described in the arguments of "+proc.get( "name" ) )
'''
for arg in args_list:
if arg.get( "semantic" ) != "scalar" :
continue
if arg.get( "type" ) == "c_char":
pass_through[ arg.get("name") ] = arg.get( "name" ) + ".byte(1) : c_char"
associate_array_str = arg.get( "associate-array" )
associate_field_str = arg.get( "associate-field" )
if associate_array_str != None:
array_field_map = {}
arrays = associate_array_str.split(",")
fields = associate_field_str.split(",")
array = ""
field = ""
for i in range( len( arrays ) ) :
arrays[i] = arrays[i].lower()
fields[i] = fields[i].lower()
array_field_map[ arrays[i] ] = fields[i]
for associate_array in arrays:
if associate_array in args_names:
array = associate_array
field = fields[ arrays.index( array ) ]
break;
if field == "rows":
pass_through[ arg.get("name") ] = "(if matrix_order == lapack_memory_order.row_major then " + array + ".domain.dim(1).size else " + array + ".domain.dim(2).size) : c_int"
elif field == "columns":
pass_through[ arg.get("name") ] = "(if matrix_order == lapack_memory_order.row_major then " + array + ".domain.dim(2).size else " + array + ".domain.dim(1).size) : c_int"
elif field == "order" or field == "rank":
pass_through[ arg.get("name") ] = "(" + array + ".domain.dim(1).size) : c_int"
else:
raise GeneralPassFailure( field + " is not a recognized array association field" )
remove_list.add( arg.get("name") )
pass_through_node = SubElement( proc, "pass-through-arguments-list" )
for arg in args_node.findall( "./argument" ):
passing = copy.deepcopy( arg )
passing.text = pass_through[ arg.get( "name" ) ]
pass_through_node.append( passing )
for arg in args_node:
if arg.get("name") in remove_list:
arg.set( "pass-up", "false" )
else:
arg.set( "pass-up", "true" )
'''
for arg in args_node:
if arg.get( "name" ) == "matrix_order":
arg.text = "LAPACK_ROW_MAJOR"
'''
#proc.set( "category", "chapelerrific" )
#proc.set( "call", proc.get("name") )
#proc.set( "name", proc.get("name").replace( "LAPACKE_", "" ) )
#chapel_procedures.append( proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseChapelizePass ( Pass )
Purpose:
Tie together all chapelization passes.
After this point, no more transformations on the
code should occur.
'''
class BaseChapelizePass ( Pass ):
dependencies = [ CreateChapelModuleTreePass, ChapelizeLAPACKEFunctionsPass, TranslateChapelKeywordsPass, ChapelerrificLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseChapelizePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportTypeDefinesPass ( Pass )
Purpose:
from input xml sets up tags that will be used
to generate typedefs in the module
'''
class ImportTypeDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportTypeDefinesPass']" )
module_types = xml_tree.find( "./chapel-module/type-defines" )
for define in pass_input.findall( "./define" ):
module_types.append( define )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportExternConstDefinesPass ( Pass )
Purpose:
from input xml set up tags that will be used
to generate extern const definitions
'''
class ImportExternConstDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportExternConstDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportExternConstDefinesPass']" )
module_defs = xml_tree.find( "./chapel-module/const-defines" )
for define in pass_input.findall( "./define" ):
module_defs.append( define )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportEnumeratedTypeDefinesPass ( Pass )
Purpose:
from input xml set up tags that will be used
to generate local enum definitions
'''
class ImportEnumeratedTypeDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportEnumeratedTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportEnumeratedTypeDefinesPass']" )
module_defs = xml_tree.find( "./chapel-module/enum-defines" )
for enumeration in pass_input.findall( "./enumeration" ):
module_defs.append( enumeration )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseImportPass ( Pass )
Purpose:
Ties together all passes that import from input xml
'''
class BaseImportPass ( Pass ):
dependencies = [ImportTypeDefinesPass, ImportExternConstDefinesPass, ImportEnumeratedTypeDefinesPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseImportPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseCodegenReadyPass ( Pass )
Purpose:
Ties together all passes that must be completed before all codegen could be done
'''
class BaseCodegenReadyPass ( Pass ):
dependencies = [BaseImportPass, BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseCodegenReadyPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternProcPass ( Pass )
Purpose:
generate chapel code at each procedure in the <chapel-module>
from the details of each procedure.
these are raw, basic extern procs of these functions.
'''
class ChapelModuleExternProcPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0;
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
basename = proc_name.replace( "LAPACK_", "" ).replace( "LAPACKE_", "" ).upper()
lapack_node = xml_tree.find( "./LAPACK/procedures/procedure/[@name='" + basename + "']" )
purpose = "" #"For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
''' #TODO get legal approval for Documentation inclusion.
if lapack_node == None or lapack_node.find( "./purpose" ) == None or lapack_node.find( "./purpose" ).text == None:
purpose = ""
else:
purpose = re.sub( r"[ \t]+", " ", lapack_node.find( "./purpose" ).text )
'''
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
def_code = SegmentProducer( "extern proc " + proc_name )
args_code = ListProducer( ", ", "(", ")" )
for arg in ordered_args:
args_code.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type")
)
)
return_code = LineProducer( " : " + proc.get( "return-type" ) + ";" )
#doc_comment = CommentProducer( "\nExternal Procedure to " + proc_name + "\n" + ("\nOriginal Fortran LAPACK documentation for " + basename + "::\n\n " + purpose + "\n\n" if purpose != "" else "") )
#doc_comment = CommentProducer( "\nExternal Procedure to " + proc_name + "\n" + purpose + "\n" )
code = SequenceOfProducers()
#code.append( doc_comment )
code.append( def_code )
code.append( args_code )
code.append( return_code )
code_node = SubElement( proc, "code" )
code_node.set( "category", "extern proc" )
code_node.text = code.generate()
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleStringToCharWraperProcPass ( Pass )
Purpose:
Create string wrappers to all of the generate external procs from
ChapelModuleExternProcPass
'''
class ChapelModuleStringToCharWraperProcPass ( Pass ):
dependencies = [ChapelModuleExternProcPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleStringToCharWraperProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc_name.startswith( "LAPACK_" ):
continue
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
char_flag = False
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
char_flag = arg.get( "type" ) == "c_char" or char_flag
# skip procedures that dont have char arguments
if not char_flag:
continue
code = SequenceOfProducers()
code.append( SegmentProducer( "inline proc " + proc_name ) )
args_code = ListProducer( ", ", "(", ")" )
for arg in ordered_args:
args_code.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
( arg.get("type") if arg.get("type") != "c_char" else "string" )
)
)
code.append( args_code )
code.append( SegmentProducer( " : " + proc.get( "return-type" ) ) )
func_body = ScopeProducer()
call_args_producer = ListProducer( ", ", "(", ")" )
for pass_arg in ordered_args:
call_args_producer.append( SegmentProducer( ( pass_arg.get("name" ) if pass_arg.get("type") != "c_char" else pass_arg.get( "name" ) + ".byte(1) : c_char" ) ) )
func_body.append( SegmentProducer( ( "return " if proc.get("return-type") != "void" else "" ) + proc.get("name") ) + call_args_producer + LineProducer( ";" ) )
code.append( func_body )
#code.prepend( CommentProducer( "\nString wrapped procedure of " + proc_name + "\n" ) )
code_node = SubElement( proc, "code" )
code_node.set( "category", "string wrapped" )
code_node.text = code.generate()
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleChapelerrificProcPass ( Pass )
Purpose:
Generate code for Chapel-errific upward facing procedures
'''
class ChapelModuleChapelerrificProcPass ( Pass ):
dependencies = [ChapelModuleExternProcPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleChapelerrificProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
pass_info = Pass.input_xml.find( "./pass/[@name='ChapelModuleChapelerrificProcPass']" )
helper_use = pass_info.find("./use").text
proc_count = 0
no_repeat = set()
iterative_functions = set()
for case in pass_info.findall("./cases/case" ):
iterative_functions.add( case.get("name") )
for proc in module_procs.findall( "./procedure" ):
if proc.find( "./pass-through-arguments-list" ) == None:
continue
base_name = proc.get("name").replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc.get("name"), "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
typeToTypeString = { "s" : "real(32)",
"d" : "real(64)",
"c" : "complex(64)",
"z" : "complex(128)",
"ds" : "real(64)",
"zc" : "complex(128)"
}
typeMap = {
"c_float" : "real(32)",
"c_double" : "real(64)",
"c_char" : "string"
}
if (type == "ds" or type == "zc") and not config+func in iterative_functions:
temp_type = type[0]
temp_config = type[1]+config[0]
temp_func = config[1] + func
type = temp_type
config = temp_config
func = temp_func
for name_category in [ (config+func, "untyped chapelerrific") ]: # (type+config+func, "chapelerrific")
[proc_name, category_name] = name_category
code = SequenceOfProducers()
purpose = ""
lapack_node = xml_tree.find( "./LAPACK/procedures/procedure/[@name='" + base_name.upper() + "']" )
purpose = "" #"For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
''' #TODO get legal approval for Documentation inclusion.
if proc_name in no_repeat:
purpose = "For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
elif lapack_node == None or lapack_node.find( "./purpose" ) == None or lapack_node.find( "./purpose" ).text == None:
prupose = ""
else:
purpose = ("Original Fortran LAPACK purpose documentation for " + base_name.upper() + "::\n\n " + re.sub( r"[ \t]+", " ", lapack_node.find( "./purpose" ).text ) + "\n\n" )
'''
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
code.append( SegmentProducer( "inline proc " + proc_name ) )
args_doc = str()
args_producer = ListProducer(", ", "(", ")")
for arg in ordered_args:
if arg.get("pass-up") == "true":
args_producer.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
( arg.get("type") if not arg.get("type") in typeMap else typeMap[ arg.get("type") ] ) + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
if lapack_node == None or arg.get("name") == "matrix_order":
continue
#print "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']"
lapack_arg_node = lapack_node.find( "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']" )
if lapack_arg_node == None:
continue
#prettyprintxml( lapack_arg_node )
''' #TODO get legal approval for Documentation inclusion.
if (not proc_name in no_repeat) and lapack_arg_node.find( "./documentation" ) != None:
#arg_doc = " " + arg.get(arg.get("name").upper() + " : " + arg.get("type") + ( "" if arg.get("intent") == "none" else arg.get("intent").strip() ) + "\n"
text = re.sub( r"\n", "\n ", re.sub( r"[ \t]+", " ", lapack_node.find( "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']/documentation" ).text ) )
arg_doc = " " + text + "\n\n"
if args_doc == "":
args_doc = "Original Fortran LAPACK argument documentation for " + base_name.upper() + "::\n\n"
args_doc += arg_doc
'''
#args_doc += "\n\n"
#code.prepend( CommentProducer( "\n" + ("Polymorphic " if category_name == "untyped chapelerrific" else "" ) + "Chapel idiomatic procedure of " + proc.get("name") + " for the type " + typeToTypeString[type] + ".\n\n" + purpose + args_doc ) )
code.prepend( CommentProducer( "\n" + "Wrapped procedure of " + proc.get("name") + " for the type " + typeToTypeString[type] + ".\n") )
code.append( args_producer )
code.append( SegmentProducer( ": " + proc.get( "return-type" ) ) )
func_body = ScopeProducer()
call_args_producer = ListProducer( ", ", "(", ")" )
for pass_arg in proc.findall( "./pass-through-arguments-list/argument" ):
call_args_producer.append( SegmentProducer( pass_arg.text ) )
func_body.append( SegmentProducer( ( "return " if proc.get("return-type") != "void" else "" ) + helper_use + "." + proc.get("name") ) + call_args_producer + LineProducer( ";" ) )
code.append( func_body )
code_node = SubElement( proc, "code" )
code_node.set( "category", category_name )
code_node.text = code.generate()
no_repeat.add( proc_name )
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternTypeDefinesPass ( Pass )
Purpose:
from the imported external type defines generate
external type code at each define tag
'''
class ChapelModuleExternTypeDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/type-defines" )
for define in module_defs.findall( "./define" ):
def_str = ("/*"+ define.find("./description").text + "*/\n" if define.find("./description") != None else "")
if define.get( "external" ) != None and define.get("external").lower() == "yes":
def_str += "extern "
def_str += "type " + define.get( "alias" ) + " "
if define.get( "base-type" ) != None:
def_str += "= " + define.get( "base-type" )
def_str += ";"
SubElement( define, "code" ).text = def_str
#prettyprintxml( module_defs )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternConstDefinesPass ( Pass ):
Purpose:
from the imported external const defines generate
eternal const code at each define tag
'''
class ChapelModuleExternConstDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternConstDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/const-defines" )
for define in module_defs.findall( "./define" ):
def_str = ("/*"+ define.find("./description").text + "*/\n" if define.find("./description") != None else "")
if define.get( "external" ) != None and define.get( "external" ).lower() == "yes":
def_str += "extern "
def_str += "const " + define.get( "symbol" ) + " : " + define.get( "type" ) + " "
if define.get( "value" ) != None:
def_str += " = " + define.get( "value" )
def_str += ";"
SubElement( define, "code" ).text = def_str
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleEnumDefinesPass ( Pass ):
Purpose:
from the imported enumeration defines generate
enum code at each enumeration tag
'''
class ChapelModuleEnumDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleEnumDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/enum-defines" )
for define in module_defs.findall( "./enumeration" ):
values = ListProducer(", ", "{", "}")
for value in define.findall( "./value" ):
values.append( SegmentProducer( value.get("name") + ( " = " + value.text if value.text != None and value.text.strip() != "" else "" ) ) )
description_node = define.find("./description")
if description_node != None:
SubElement( define, "code" ).text = CommentProducer( description_node.text ).generate() + ( SegmentProducer( "enum " + define.get("name") ) + values + LineProducer(";") ).generate()
else:
SubElement( define, "code" ).text = ( SegmentProducer( "enum " + define.get("name") ) + values + LineProducer(";") ).generate()
#prettyprintxml( module_defs )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseCodeGenerationPass ( Pass )
Purpose:
Ties together all code generation passes before
any text dumping into a file.
'''
class BaseCodeGenerationPass ( Pass ):
dependencies = [ChapelModuleExternProcPass, ChapelModuleStringToCharWraperProcPass, ChapelModuleChapelerrificProcPass, ChapelModuleExternTypeDefinesPass, ChapelModuleExternConstDefinesPass, ChapelModuleEnumDefinesPass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseCodeGenerationPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DumpCodePass ( Pass )
Purpose:
traverses <chapel-module> tree, collecting generated code text
and gently places it into the file defined in the input xml
pass information
'''
class DumpCodePass ( Pass ):
dependencies = [BaseCodeGenerationPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DumpCodePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='DumpCodePass']" )
module_root = xml_tree.find( "./chapel-module" )
ChaLAPACK_info = pass_input.find( "./main-module" )
helper_info = pass_input.find( "./helper-module" )
module_name = ChaLAPACK_info.get( "name" )
module_file = open( ChaLAPACK_info.get( "file-name" ), "w" )
module_file.write( pass_input.find("copyright").text )
module_file.write( "/*\n" + ChaLAPACK_info.find("./description").text + "\n*/\n" )
module_file.write( "module " + module_name + " {\n" )
for use in ChaLAPACK_info.findall( "./use" ):
module_file.write( "use " + use.text + ";\n" )
module_file.write( "\n" )
# inject types, consts, enums
for defn in module_root.findall( "./type-defines/define" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n\n" )
for defn in module_root.findall( "./const-defines/define" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n\n" )
for defn in module_root.findall( "./enum-defines/enumeration" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n" )
# inject helper module
if helper_info.get("no-doc") == "all":
module_file.write( "pragma \"no doc\"\n" )
module_file.write( "/*\n" + helper_info.find( "./description" ).text + "\n*/\n" )
module_file.write( "module " + helper_info.get("name") + " {\n" )
for use in helper_info.findall( "./use" ):
module_file.write( "use " + use.text + ";\n" )
module_file.write( "\n" )
nodoc_helper_procs = helper_info.get("no-doc") == "internals" or helper_info.get("no-doc") == "procedures" or helper_info.get("no-doc") == "all"
for proc in module_root.findall( "./procedures/procedure" ):
code = proc.find( "./code/[@category='extern proc']")
if code != None:
if nodoc_helper_procs:
module_file.write( "pragma \"no doc\"\n" )
module_file.write( code.text + "\n" )
code = proc.find( "./code/[@category='string wrapped']")
if code != None:
if nodoc_helper_procs:
module_file.write( "pragma \"no doc\"\n" )
module_file.write( code.text + "\n" )
module_file.write( "} // " + helper_info.get("name") + "\n" )
for proc in module_root.findall( "./procedures/procedure" ):
code = proc.find( "./code/[@category='untyped chapelerrific']" )
if code != None:
module_file.write( code.text + "\n" )
module_file.write("} // " + module_name + "\n")
module_file.close()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
Below are passes that were used to explore the LAPACK source, or are Passes that were removed from the main set.
EXTREME caution is advised if using them. They may (probably) not work with the current set of main passes
'''
class CountFunctions( Pass ):
dependencies = [BaseLAPACKPass, BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CountFunctions
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack = 0
for proc in xml_tree.findall( "./LAPACK/procedures/procedure" ):
lapack += 1
lapacke = 0
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
lapacke += 1
print "LAPACK", lapack, "LAPACKE", lapacke
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CreateAbstractLAPACKTreePass ( Pass )
Purpose:
Create Abstract-LAPACK tree
'''
class CreateAbstractLAPACKTreePass ( Pass ):
dependencies = [BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateAbstractLAPACKTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = SubElement( xml_tree, "Abstract-LAPACK" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketLAPACKFunctionGroups ( Pass )
Purpose:
bucket LAPACK functions by their base function, type, and matrix type
'''
class BucketLAPACKFunctionGroupsPass ( Pass ):
dependencies = [CreateAbstractLAPACKTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketLAPACKFunctionGroupsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
funcs = set()
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
if proc_name in funcs:
raise GeneralPassFailure( "DOUBLE HIT " + proc_name )
else:
funcs.add( proc_name )
# we only care about LAPACKE_ functions
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" ) #.replace( "_work", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if config != "ge":
continue
group_node = abstract_lapack.find( "./group/[@name='" + func + "']" )
if group_node == None:
group_node = SubElement( abstract_lapack, "group" )
group_node.set("name", func )
config_node = group_node.find( "./matrix-configuration/[@name='" + config + "']" )
if config_node == None:
config_node = SubElement( group_node, "matrix-configuration" )
config_node.set( "name", config )
if config_node.find( "./types/type/[@name='" + type + "']" ) != None:
print "Double declaration of abstract LAPACK function", type, config, func, base_name, proc_name
continue
#prettyprintxml( config_node.find( "./type/[@name='" + type + "']" ) )
types_node = SubElementUnique( config_node, "types" )
type_node = SubElement( types_node, "type" )
type_node.set( "name", type )
type_node.set( "analogue", "./chapel-module/procedures/procedure/[@name='" + proc_name + "']" )
#prettyprintxml( abstract_lapack )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class ImportAbstractLAPACKFunctionsPass ( Pass ):
dependencies = [BaseCodegenReadyPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportAbstractLAPACKFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/procedures" )
group_input = Pass.input_xml.find( "./pass/[@name='ImportAbstractLAPACKFunctionsPass']" )
proc_count = 0
for group in group_input.findall( "./group" ):
for config in group.findall( "./matrix-configuration" ):
code = SequenceOfProducers()
proc_name = config.get( "name" ) + group.get( "name" )
code.append( SegmentProducer( "proc " + proc_name ) )
args_producer = ListProducer(", ", "(", ")")
for arg in config.findall( "./method-arguments/argument" ):
args_producer.append( SegmentProducer(
arg.get("intent") + " " + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type") + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
code.append( args_producer )
code.append( LineProducer( ": lapack_int" ) )
code.append( SegmentProducer( "where " ) )
where_producer = ListProducer( " || ", "", "" )
for type in config.findall("./types/type"):
where_producer.append( SegmentProducer( "T == " + type.get( "type" ) ) )
code.append( where_producer )
info_var = config.get( "name" ) + group.get( "name" ) + "_return_info"
func_body = ScopeProducer()
func_body.append( LineProducer( "var " + info_var + " : lapack_int;" ) )
#if_bodies = SequenceOfProducers()
arg_relates = {}
ana_args = []
for arg in config.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get("name")
arg_relates[ arg_name ] = config.find( "./arguments-relationships/argument/[@name='" + arg_name + "']" )
ana_args.append( arg );
for type in config.findall("./types/type"):
chpl_ana = xml_tree.find( type.get( "analogue" ) )
if_condition = LineProducer( "if ( T == " + type.get("type") + " )" )
func_body.append( if_condition )
if_body = ScopeProducer()
call_equals = SegmentProducer( info_var + " = " + chpl_ana.get( "name" ) )
call_seq = ListProducer( ", ", "(", ")" )
for ana_arg in ana_args:
call_seq.append( SegmentProducer(
"(" + arg_relates[ana_arg.get("name")].text.strip() + ")" + \
(" : " + ana_arg.get("type") if ana_arg.get("semantic") != "array" else "")
)
)
if_body.append( call_equals + call_seq + LineProducer( ";" ) )
func_body.append( if_body )
func_body.append( LineProducer( "return " + info_var + ";" ) )
code.append( func_body )
module_proc = SubElement( module_defs, "procedure" )
module_proc.set( "name", proc_name )
code_node = SubElement( module_proc, "code" )
code_node.set( "category", "upward facing" )
code_node.text = code.generate()
proc_count += 1
print "Generated", proc_count, "procedures"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class GroupsArgumentCollectionPass ( Pass )
Purpose:
collect common arguments into the function groups
'''
class CommonArgumentCollectionPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CommonArgumentCollectionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
args_tree = ET.Element( "analogue-arguments-list" )
''''
prefix_type_map = {
"s" : "float",
"d" : "double",
"c" : "complex_float",
"z" : "complex_double",
"float" : "s",
"double" : "d",
"complex_float" : "c",
"complex_double" : "z"
}
'''
all_args = {} # dictionary to set [arg_name]=>set( function_names )
all_funcs = set() # set of all functions encountered
func_args_type = {} # {func_name} => {arg_name} => type_name
for type_func in config_node.findall( "./types/type" ):
type_name = type_func.get( "name" );
all_funcs.add( type_name );
func_args_type[ type_name ] = {};
chapel_func = xml_tree.find( type_func.get( "analogue" ) )
if chapel_func == None:
raise GeneralPassFailure( type_name + config_node.get( "name" ) + group_node.get( "name" ) + " does not have chapel analogue" )
for arg in chapel_func.findall( "./arguments-list/argument" ):
func_args_type[ type_name ][ arg.get("name") ] = arg.get("type")
args_type
find = args_tree.find( "./argument/[@name='" + arg.get( "name" ) + "']" )
if find == None:
args_tree.append( arg )
elif arg.get( "type" ) != find.get( "type" ):
find.set( "type", "?T" )
abstract_arg = ET.Element( "argument" )
arg_name = arg.get( "name" )
if not arg_name in all_args:
all_args[arg_name] = set()
all_args[arg_name].add(type_name)
for arg_name in all_args:
if all_args[ arg_name ] != all_funcs:
arg = args_tree.find( "./argument/[@name='" + arg_name + "']" )
args_tree.remove( arg )
for type_func_name in all_args[ arg_name ]:
#print "find", type_func_name
#prettyprintxml( config_node )
type_func = config_node.find( "./types/type/[@name='" + type_func_name + "']" )
args_list = SubElementUnique( type_func, "arguments-list" )
args_list.append( arg )
config_node.append( args_tree )
#prettyprintxml( abstract_lapack.find( "./group/[@name='sv']" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKAbstractPass ( Pass )
Purpose:
Ties together all passes that populate the Abstract-LAPACK classes
for upward facing LAPACK chapel procedures
'''
class BaseAbstractLAPACKPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass, CommonArgumentCollectionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseAbstractLAPACKPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class DropAttemptedAssociations ( Pass ):
dependencies = [BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropAttemptedAssociations
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
output_xml = ET.Element( "pass" );
output_xml.set( "name", "DropAttemptedAssociations" )
output_procs = SubElement( output_xml, "procedures" );
for chpl_proc in xml_tree.findall( "./chapel-module/procedures/procedure" ):
proc_name = chpl_proc.get( "name" )
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
continue
proc = copy.deepcopy( chpl_proc )
print proc.get( "name" )
#prettyprintxml( proc )
args_node = proc.find( "./arguments-list" )
args_list = [ ]
args_names = []
remove_list = set()
pass_through = {}
for arg in args_node.findall( "./argument" ):
args_list.append( arg )
args_names.append( arg.get("name") )
pass_through[ arg.get("name") ] = arg.get( "name" )
for arg in args_list:
if arg.get( "semantic" ) != "array" :
continue
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).lower().split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i]
if dimension == "*":
continue
removeVar = None
for find in args_list:
if find.get( "name" ) == dimension:
removeVar = find
break
if removeVar != None:
remove_list.add( removeVar )
pass_through[ dimension ] = arg.get("name") + ".domain.dim("+str(i+1)+").size"
else:
print ( dimension + " is not described in the arguments of "+proc.get( "name" ) )
if arg.get( "matrix-size" ) != None:
matrix_size = arg.get( "matrix-size" ).lower()
rows = matrix_size.split(",")[0].strip()
cols = matrix_size.split(",")[1].strip()
removeRows = None
removeCols = None
for find in args_list:
if find.get( "name" ) == rows:
removeRows = find
if find.get( "name" ) == cols:
removeCols = find
if removeRows != None and removeCols != None:
pass_through[ rows ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size "
pass_through[ cols ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size "
remove_list.add( removeRows )
remove_list.add( removeCols )
else:
print ( rows + " and " + cols + " are not described in the arguments of "+proc.get( "name" ) )
for arg in args_list:
if arg.get( "semantic" ) != "scalar" :
continue
if arg.get( "type" ) == "c_char":
arg.set( "type", "string" )
pass_through[ arg.get("name") ] = arg.get( "name" ) + ".byte(1) : c_char"
associate_array_str = arg.get( "associate-array" )
associate_field_str = arg.get( "associate-field" )
if associate_array_str != None:
array_field_map = {}
arrays = associate_array_str.split(",")
fields = associate_field_str.split(",")
array = ""
field = ""
for i in range( len( arrays ) ) :
arrays[i] = arrays[i].lower()
fields[i] = fields[i].lower()
array_field_map[ arrays[i] ] = fields[i]
for associate_array in arrays:
if associate_array in args_names:
array = associate_array
field = fields[ arrays.index( array ) ]
break;
if field == "rows":
pass_through[ arg.get("name") ] = "if matrix_order == LAPACK_ROW_MAJOR then " + array + ".domain.dim(1).size else " + array + ".domain.dim(2).size "
elif field == "columns":
pass_through[ arg.get("name") ] = "if matrix_order == LAPACK_ROW_MAJOR then " + array + ".domain.dim(2).size else " + array + ".domain.dim(1).size "
elif field == "order" or field == "rank":
pass_through[ arg.get("name") ] = array + ".domain.dim(1).size"
else:
raise GeneralPassFailure( field + " is not a recognized array association field" )
remove_list.add( arg )
pass_through_node = SubElement( proc, "pass-through" )
for arg in args_node.findall( "./argument" ):
passing = copy.deepcopy( arg )
passing.text = pass_through[ arg.get( "name" ) ]
pass_through_node.append( passing )
for rm_arg in remove_list:
args_node.remove( args_node.find( "./argument/[@name='" + rm_arg.get( "name" ) + "']" ) )
count = 0
for arg in args_node:
arg.set("position", str( count ) )
count += 1
if arg.get( "name" ) == "matrix_order":
arg.text = "LAPACK_ROW_MAJOR"
#print pass_through
#prettyprintxml( proc )
#print pass_through, "\n", "==="*20, "\n"
output_procs.append( proc )
prettywritexml( output_xml, "DropAttemptedAssociations_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class AbstractDropAttemptedAssociations ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropAttemptedAssociations
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
if config_node.findall( "./types/type/arguments-list" ) != [] :
print config_node.get("name") + group_node.get("name"), " has typed functions with non common arguments. Skipping."
continue
full_func_name = config_node.get("name") + group_node.get("name")
all_args = []
array_args = set()
method_args = []
pass_through = {} # str => str
removed = {} # str => bool
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
all_args.append( arg.get("name") )
if arg.get( "semantic" ) == "array" :
array_args.add( arg.get("name" ) )
removed[ arg.get("name") ] = False
method_args.append( arg )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
if removed[ arg.get("name") ] or arg.get( "semantic" ) != "array":
continue
pass_through[ arg.get("name") ] = arg.get( "name" )
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i].lower()
if dimension == "*":
continue
pass_through[ dimension ] = arg.get("name") + ".domain.dim("+str(i+1)+").size"
removed[ dimension ] = True
matrix_size = arg.get( "matrix-size" )
if matrix_size != None:
rows = matrix_size.split(",")[0].strip().lower()
cols = matrix_size.split(",")[1].strip().lower()
pass_through[ rows ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size "
pass_through[ cols ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size "
removed[ rows ] = True
removed[ cols ] = True
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
if removed[ arg.get("name") ] or arg.get( "semantic" ) != "scalar":
continue
pass_through[ arg.get("name") ] = arg.get("name")
for rm in removed:
if not removed[rm]:
continue
for i in range( len( method_args ) ):
if method_args[i].get("name") == rm:
method_args.remove( method_args[i] )
break;
interface_node = SubElement( config_node, "method-arguments" )
for arg in method_args :
argument = SubElement( interface_node, "argument" )
argument.set( "name", arg.get("name") )
argument.set( "intent" , arg.get("intent") )
argument.set( "semantic", arg.get("semantic") )
argument.set( "type", arg.get("type") )
argument.text = " " if arg.get("name") != "matrix_order" else "LAPACK_ROW_MAJOR"
pass_through_node = SubElement( config_node, "arguments-relationships" )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get( "name" )
arg_relate = SubElement( pass_through_node, "argument" )
arg_relate.set( "name", arg_name )
arg_relate.text = pass_through[arg_name]
prettywritexml( abstract_lapack, "AbstractDropAttemptedAssociations_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNeedsArgsDocPatchPass ( Pass )
Purpose:
Was used to find fortran files with incorrect argument documentation
( \param[intent] blah blah )
'''
class FindNeedsArgsDocPatchPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNeedsArgsDocPatchPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_root_procs = xml_tree.find( "./LAPACK/procedures" )
patch = []
for proc in lapack_root_procs.findall( "./procedure" ):
printed = False
#prettyprintxml( proc )
for arg in proc.findall( "./arguments-list/argument" ):
if arg.find( "./documentation" ) == None:
if not printed:
print proc.get( "name" ), proc.get( "file-name" )
printed = True
print arg.get( "name" ), "MISSING"
patch.append( (proc.get("name"), proc.get("file-name"), arg.get("name") ) )
print patch
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNeedsFuncArgsTypePatchPass ( Pass )
Purpose:
Was used to find fortran files with incorrect argument type documentation
( ie ..Scalar Arguments.. blah blah )
'''
class FindNeedsFuncArgsTypePatchPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNeedsFuncArgsTypePatchPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
none_scalar = []
none_array = []
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
scalars = doc_scalarargs_regex.search( doc_node.text )
arrays = doc_arrayargs_regex.search( doc_node.text )
if scalars == None:
none_scalar.append( (proc_node.get( "name"), proc_file_name) )
if arrays == None:
none_array.append( (proc_node.get( "name"), proc_file_name) )
print "none_scalars", none_scalar,"\n\nnone_arrays", none_array
print "="*100
for i in none_scalar:
sys.stdout.write( i[1] + "," )
print "\n"*2
for i in none_array:
sys.stdout.write( i[1] + "," )
print "\n"*2
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindAllLAPACKETypesPass ( Pass )
Purpose:
was used to collect all the types named in LAPACKE.h
to put into input xml type defines etc.
'''
class FindAllLAPACKETypesPass ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindAllLAPACKETypesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_procs_root = xml_tree.find( "./LAPACKE/procedures" )
types = set()
for proc in lapacke_procs_root.findall( "./procedure" ):
types.add( proc.get( "return-type" ) )
for arg in proc.findall( "./arguments-list/argument" ):
types.add( arg.get( "type" ) )
print types
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindScalarOutIntentsPass ( Pass )
Purpose:
Find scalars in the fortran code with 'out' intents, that
are also not INFOs
Explore if there are LAPACKE scalars that are out intents
'''
class FindScalarOutIntentsPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindScalarOutIntentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACK/procedures" )
print lapack_procs_root
outs = []
for proc in lapack_procs_root.findall( "./procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
if arg.get( "semantic" ) == "scalar" \
and "out" in arg.get( "intent" ) \
and arg.get("name").lower() != "info":
outs.append( (proc.get( "name" ), arg.get( "name" ), proc.get( "file-name" ) ) )
print outs
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindIntentSetPass ( Pass ):
Purpose:
find the set of all intents that exist in LAPACKE fold
'''
class FindIntentSetPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindIntentSetPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
intents = set()
for proc in lapack_procs_root.findall( "./procedure" ):
#print proc.get( "name" )
#prettyprintxml( proc )
for arg in proc.findall( "./arguments-list/argument" ):
#print arg.get( "name" )
if arg.get( "intent" ) != None:
intents.add( arg.get( "intent" ) )
print intents
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindIntentSemanticsRefsSetPass ( Pass ):
Purpose:
find the set of all combinations of intenst that exist in LAPACKE fold
'''
class FindIntentSemanticsRefsSetPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindIntentSemanticsRefsSetPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
combos = {}
for proc in lapack_procs_root.findall( "./procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
intent = arg.get( "intent" )
semantic = arg.get( "semantic" )
refdepth = arg.get( "refdepth" )
combos[ (intent, semantic, refdepth ) ] = (proc, arg)
for key in combos:
print key, "(", combos[ key ][0].get( "name" ), combos[key][1].get( "name" ), ")"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindBadIntentSemanticsCodePass ( Pass ):
Purpose:
find fortran code where intent or semantic are None
'''
class FindBadIntentSemanticsCodePass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindBadIntentSemanticsCodePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
list = {}
for proc in lapack_procs_root.findall( "./procedure" ):
proc_name = proc.get("name")
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
intent = arg.get( "intent" )
semantic = arg.get( "semantic" )
if arg_name != "matrix_order" and ( (intent == None) ^ (semantic == None) ):
if not proc_name in list:
list[proc_name] = []
list[proc_name].append( (arg_name, intent, semantic, proc) )
files_str = str()
for key in list:
proc = list[key][1][3]
analogue_txt = proc.find( "./analogues/analogue" ).text
analogue = xml_tree.find( analogue_txt )
files_str += analogue.get( "file-name" ) + ","
print key, analogue_txt, analogue.get( "file-name" )
#prettyprintxml( proc )
#prettyprintxml( analogue )
for elem in list[key]:
print "\t",elem
print ""
print files_str
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNonAnalouges ( Pass ):
Purpose:
find all C lapack procedures with no fortran analogues
'''
class FindPassByRefNonAnalouges ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindPassByRefNonAnalouges
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
list = []
for proc in lapack_procs_root.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc.find( "./analogues" ) == None:
#list.add( proc.get( "name" ) )
print "Function", proc_name, "has no fortran analogue"
continue
printed = False
for arg in proc.findall( "./arguments-list/argument" ):
if arg.find( "./analogues" ) == None and \
int(arg.get( "refdepth" )) > 0 :
if not printed:
printed = True
print "In function", proc_name, ":"
print "\tArgument", arg.get( "name" ), "of refdepth", int(arg.get( "refdepth" )), "has no fortran analogue"
if printed:
print ""
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindFortranNoTypes ( Pass )
Purpose:
find any fortran arguments with no associated type
'''
class FindFortranNoTypes ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindFortranNoTypes
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
print lapack_f_procs
for f_proc in lapack_f_procs.findall( "./procedure" ):
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == None:
print f_proc.get( "name" ), f_proc.get( "file-name" ), f_arg.get( "name" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketVariblesSemanticsPass ( Pass )
Purpose:
Find and bucket arguments by semantic
'''
class BucketArgumentsSemanticsPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketArgumentsSemanticsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACKE/procedures" )
variables = {}
for proc in lapack_f_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else "none"
if not arg_name in variables:
variables[ arg_name ] = {}
if not semantic in variables[ arg_name ]:
variables[ arg_name ][ semantic ] = []
variables[ arg_name ][ semantic ].append( proc_name )
for arg in variables:
if len( variables[ arg ] ) > 2:
print arg
for semantic in variables[ arg ]:
print " \"" + semantic + "\"", ":", len( variables[ arg ][ semantic ] )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketFortranTypes ( Pass )
Purpose:
find all fortran types
'''
class BucketFortranTypes ( Pass ):
dependencies = [DestroyUnassociatedFortranFunctionsTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketFortranTypes
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
lapack_c_procs = xml_tree.find( "./LAPACKE/procedures" )
f_types = set()
c_types = set()
for f_proc in lapack_f_procs.findall( "./procedure" ):
#if f_proc.get( "return-type" ) != None:
# f_types.add( f_proc.get( "return-type" ) )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) != None:
f_types.add( f_arg.get( "type" ) )
else:
f_types.add( "~BAD. None for type~" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
#if c_proc.get( "return-type" ) != None:
# c_types.add( c_proc.get( "return-type" ) )
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.get( "type" ) != None:
c_types.add( c_arg.get( "type" ) )
else:
c_types.add( "~BAD. None for type~" )
print "C types", c_types
print "Fortran types", f_types,"\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindAFortranType ( Pass )
Purpose:
find a fortran type
'''
class FindAFortranType ( Pass ):
dependencies = [ BaseAssociatePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindAFortranType
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
#lapack_c_procs = xml_tree.find( "./LAPACKE/procedures" )
f_types = set()
#c_types = set()
find = "RECURSIVE"
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.get( "return-type" ) == find:
print f_proc.get( "name" )
#return
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == find:
print f_proc.get( "name" ), f_arg.get( "name" )
#return
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindUnmatchedArgsPass ( Pass )
Purpose:
Find unresolved matches arising from misnames
'''
class FindUnmatchedArgsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindUnmatchedArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
#print c_arg.get( "name" )
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
#print "has none"
c_no_match.append( c_arg )
#prettyprintxml( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO":
f_no_match.append( f_arg )
if c_no_match == []: continue
print c_proc.get( "name" ), ":", f_proc.get( "name" )
print "+",c_proc.get( "name" )
for m in c_no_match:
#prettyprintxml( m )
print "\t-", m.get( "name" )
print "+",f_proc.get( "name" )
for m in f_no_match:
#prettyprintxml( m )
print "\t-", m.get( "name" )
print "\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNoneLAPACKESementicsPass ( Pass )
Purpose:
Find LAPACKE arguments of procedures with no semantics
this arises when the function or the arguments do not
have analogues or they have not been imported
'''
class FindNoneLAPACKESementicsPass ( Pass ):
dependencies = [BaseTransformLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNoneLAPACKESementicsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
#if c_proc.find( "./analogues/analogue" ) == None: continue
printed = False
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.get( "semantic" ) == None:
if not printed:
print c_proc.get( "name" )
printed = True
print "Missing sementic on", c_arg.get( "name" )
if printed:
print ""
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindHasNoFortranAnaloguePass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindHasNoFortranAnaloguePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
list = set()
for c_proc in lapack_c_procs.findall( "./procedure" ):
if c_proc.find( "./analogues/analogue" ) == None:
#list.add( c_proc.get("name").replace( "LAPACKE_", "" ).replace( "LAPACK_", "" ).replace( "_work", "" ) )
print c_proc.get( "name" )
for i in list:
print i
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DropFileOfCTreeUnmatchedArgsPass ( Pass )
Purpose:
export an xml file that has most skeleton of xml tree that could
be installed into the input xml for the ImportArgumentAnaloguesPass
'''
class DropFileOfCTreeUnmatchedArgsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfCTreeUnmatchedArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
output_tree = ET.Element( "pass-output" )
pass_output = SubElement( output_tree, "pass" )
pass_output.set( "name", "DropFileOfCTreeUnmatchedArgsPass" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
c_no_match.append( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO":
f_no_match.append( f_arg )
if c_no_match == []: #and f_no_match == []:
continue
proc_node = SubElement( pass_output, "procedure" )
proc_node.set("name", c_proc.get( "name" ) )
proc_node.set( "analogue-path", f_proc_ana.text )
for c_arg in c_no_match:
arg_node = SubElement( proc_node, "argument" )
arg_node.set( "name", c_arg.get("name") )
possible = SubElement( arg_node, "possible_substitutions" )
for f_arg in f_no_match :
f_arg_node = SubElement( possible, "option" )
f_arg_node.set( "name", f_arg.get( "name" ) )
f_arg_node.set( "semantic", f_arg.get( "semantic" ) )
f_arg_node.set( "intent", f_arg.get( "intent" ) )
f_arg_node.set( "type", f_arg.get( "type" ) )
f_arg_node.set( "substitution", f_arg.get( "name" ) )
prettywritexml( output_tree, "DropFileOfCTreeUnmatchedArgsPass_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DropFileOfCTreeUnmatchedArgsWithSuggestionsPass ( Pass )
Purpose:
export an xml file that has most skeleton of xml tree that could
be installed into the input xml for the ImportArgumentAnaloguesPass
and also include suggestions based on name-score and type union heuristics
that were used as an attempt to solve the issue automatically but were
found to be over-matchy
'''
class DropFileOfCTreeUnmatchedArgsWithSuggestionsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfCTreeUnmatchedArgsWithSuggestionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
output_tree = ET.Element( "pass-output" )
pass_output = SubElement( output_tree, "pass" )
pass_output.set( "name", "DropFileOfCTreeUnmatchedArgsPass" )
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
c_no_match.append( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO" :
f_no_match.append( f_arg )
if c_no_match == [] :
continue
proc_node = SubElement( output_tree, "procedure" )
proc_node.set( "name", c_proc.get( "name" ) )
proc_node.set( "path", "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']")
for c_arg in c_no_match:
arg_node = SubElement( proc_node, "argument" )
arg_info = SubElement( arg_node, "argument-info" )
arg_node.set( "name", c_arg.get( "name" ) )
arg_node.set( "substitution", "????" )
arg_node.set( "substitution-path", "????")
arg_info.set( "path", proc_node.get( "path" ) + "/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_info.set( "type", c_arg.get( "type" ) )
arg_info.set( "refdepth", c_arg.get("refdepth") )
if f_no_match != None:
possibles = SubElement( arg_node, "possible-analogues" )
for f_arg in f_no_match:
possible = SubElement( possibles, "possible" )
possible.set( "name", f_arg.get( "name" ) )
possible.set( "path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
possible.set( "semantic", f_arg.get( "semantic" ) )
possible.set( "intent", f_arg.get( "intent" ) )
possible.set( "type", f_arg.get( "type" ) )
print c_proc.get( "name" ), f_proc.get( "name" )
type_map = {
"LOGICAL" : "booelan",
"lapack_int" : "int32",
"INTEGER" : "int32",
"lapack_complex_double" : "complex128",
"lapack_complex_float" : "complex64",
"COMPLEX*16" : "complex128",
"COMPLEX" : "complex64",
"DOUBLE" : "real32",
"DOUBLE PRECISION" : "real64",
"REAL" : "real32",
"SINGLE PRECISION" : "real32",
"double" : "real64",
"float" : "real32",
"char" : "char",
"CHARACTER" : "char",
"CHARACTER*1" : "char"
}
t_sets = {}
for arg in c_no_match:
type = type_map[ arg.get( "type" ) ]
if not type in t_sets:
t_sets[ type ] = set()
t_sets[ type ].add( arg )
for arg in f_no_match:
#print f_proc.get("name"), arg.get("name")
type = type_map[ arg.get( "type" ) ]
if not type in t_sets:
t_sets[ type ] = set()
t_sets[ type ].add( arg )
for type in t_sets:
# when there only exists a pair of arguments in a type,
# and those arguments are each in opposite code trees (fortran/C)
# it can heuristically be assumed that those arguments can be associated
if len( t_sets[ type ] ) == 2:
arg_1 = t_sets[ type ].pop()
arg_2 = t_sets[ type ].pop()
if (arg_1 in c_no_match and arg_2 in f_no_match ) ^ \
(arg_2 in c_no_match and arg_1 in f_no_match ):
c_arg = arg_1 if arg_1 in c_no_match else arg_2
f_arg = arg_2 if arg_2 in f_no_match else arg_1
print "match", c_arg.get("name"), "to", f_arg.get("name"),"unique type union"
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
arg_node = proc_node.find( "./argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_node.set( "substitution", f_arg.get( "name" ) )
arg_node.set( "substitution-path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
'''
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
c_no_match.remove( c_arg )
f_no_match.remove( f_arg )
# if there are more than two arguments in a type
# we can try to match the strings from the
elif len( t_sets[ type ] ) > 2 :
change = True # True to emulate do-while
iter = 1
while change:
print "Iteration:", iter
change = False
c_removes = []
f_removes = []
for c_arg in c_no_match:
min_list = []
min_score = 10**1000
for f_arg in f_no_match:
score = score_string( c_arg.get("name").lower(), f_arg.get("name" ).lower() )
if score < min_score:
min_score = score
min_list = [ f_arg ]
elif score == min_score:
min_list.append( f_arg )
if len( min_list ) >1 :
print "BOTCHED matching for", c_arg.get("name"),": args",
for arg in min_list:
print arg.get("name"),",",
print "have same score", min_score
continue
min = min_list[0]
if min_score > 2:
print "FAILED to match", c_arg.get("name"), "to", min.get("name"), "score", min_score, "was too bad"
continue
change = True
print "match", c_arg.get("name"), "to", min.get("name"), "score", min_score
f_arg = min
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
arg_node = proc_node.find( "./argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_node.set( "substitution", f_arg.get( "name" ) )
arg_node.set( "substitution-path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
'''
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
c_removes.append( c_arg )
f_removes.append( f_arg )
for r in c_removes:
c_no_match.remove( r )
for r in f_removes:
f_no_match.remove( r )
iter += 1
print "No changes"
for c_arg in c_no_match:
print "Could not match", c_arg.get( "name" )
for f_arg in f_no_match:
print "Could not match", f_arg.get( "name" )
print ""
prettywritexml( output_tree, "DropFileOfCTreeUnmatchedArgsPass_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindLAPACKFunctionGroups ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindLAPACKFunctionGroups
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
names = set()
groups = {}
for proc in lapacke_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
base_name = proc_name.replace( "LAPACK_", "" ).replace( "LAPACKE_", "" ).replace( "_work", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "ie", base_name, "does not match regex"
continue
#names.add( base_name );
func = match.group( "function" )
config = match.group( "mtype" )
if not func in groups:
groups[ func ] = {}
if not config in groups[func] :
groups[func][config] = []
groups[func][config].append( proc_name )
group_counts = 0
config_count = 0
type_counts = 0
for func in groups:
print func
group_counts += 1
for config in groups[func]:
print "\t", config
config_counts += 1
for name in groups[func][config]:
print "\t\t", name
type_counts += 1
print group_counts, config_counts, type_counts
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDuplicateLAPACKEFunctions ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDuplicateLAPACKEFunctions
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
root_procs = xml_tree.find( "./LAPACKE/procedures" )
proc_names = set()
for proc in root_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
if proc_name in proc_names:
raise GeneralPassFailure( proc_name )
else:
proc_names.add( proc_name )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class PrintArgDoc ( Pass ):
dependencies = [FuncArgsDocPass, FuncArgsTypePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = PrintArgDoc
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
doc_set = set();
total = 0
docked = 0
arg="A"
for proc_node in procs_node.findall( "./procedure" ):
for arg_node in proc_node.findall( "./arguments-list/argument/[@name='"+ arg +"']" ):
doc_node = arg_node.find( "documentation" )
total += 1
if doc_node == None:
print proc_node.get( "name" ), "/", arg, "has no documentation"
continue
doc = doc_node.text.lower()
doc = doc.replace( "-", "" )
doc = doc.replace( "the", "" )
doc = re.sub( "lda\s+is\s+integer", "", doc )
doc = re.sub( "\s+", "", doc )
doc_set.add( doc )
docked += 1
doc_list = sorted( list( doc_set ), key=len )
for i in doc_list:
print i
print len( doc_list ), "/", docked, "/", total
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDifferenArgumentsPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferenArgumentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group/[@name='sv']" ):
for config_node in group_node.findall( "./matrix-configuration" ):
name_to_args = {} # function_name => { order => args_node }
name_to_counts = {} # function_name => number
for type_node in config_node.findall( "./type" ):
chap_func = xml_tree.find( type_node.get( "analogue" ) )
name_to_args[ type_node.get( "name" ) ] = {}
for arg in chap_func.findall( "./arguments-list/argument" ):
name_to_args[ type_node.get( "name" ) ][ arg.get("position") ] = arg
name_to_counts[ type_node.get( "name" ) ] = len( name_to_args[ type_node.get( "name" ) ] )
all_same = True
all_count = 0
names = name_to_counts.keys()
for i in range( len( names ) - 1 ):
all_same = all_same and ( name_to_counts[ names[i] ] == name_to_counts[ names[i+1] ] )
print all_same
all_count = name_to_counts[ names[1] ] # grab arbitrary count if all the same
for pos in range( all_count ):
is_same = True
for i in range( len(names)-1):
is_same = is_same and ( name_to_args[names[i]].get("name") == name_to_args[names[i+1]].get("name") ) \
and ( name_to_args[names[i]].get("semantic") == name_to_args[names[i+1]].get("semantic") ) \
and ( name_to_args[names[i]].get("intent") == name_to_args[names[i+1]].get("intent") )
print pos, is_same
if not is_same:
return
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class CountGroups ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferenArgumentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
groups = 0
configs = 0
types = 0
for group_node in abstract_lapack.findall( "./group" ):
groups += 1
for config_node in group_node.findall( "./matrix-configuration" ):
configs += 1
for type_node in config_node.findall( "./type" ):
types += 1
print groups, configs, types
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class DropFileOfGroups ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfGroups
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group/[@name='sv']" ):
for config_node in group_node.findall( "./matrix-configuration" ):
interface_tree = SubElement( config_node, "method-arguments" )
argument = SubElement( interface_tree, "argument" )
argument.set( "name", "?" )
argument.set( "intent" , "?" )
argument.set( "semantic", "?" )
argument.set( "type", "?" )
relation_tree = SubElement( config_node, "arguments-relationships" )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
arg_relate = SubElement( relation_tree, "argument" )
arg_relate.set( "name", arg.get("name") )
arg_relate.text = "RELATIONSHIP"
prettyprintxml( abstract_lapack)
prettywritexml( abstract_lapack, "DropFilesOfGroups.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class TryMatrixArgsUnion ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TryMatrixArgsUnion
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
pass
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class SolveArgsUnionFor ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = SolveArgsUnionFor
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
unique = set()
co = set()
non = set()
unset = True;
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
print config_node.get( "name" ) + group_node.get( "name" )
config_args = set()
array_args = set()
array_dims = {}
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
config_args.add( arg.get( "name" ).lower() )
if arg.get( "semantic" ) == "array":
array_args.add( arg.get("name") )
#prettyprintxml( arg )
if "m" in config_args:
print array_args
for elem in array_args:
print elem
co |= array_args
if unset:
unique |= array_args
unset = False;
else:
unique &= array_args
print unique, "\n"
print "="*10
print unique, "\n"
print co, "\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDifferentLengthCalls ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferentLengthCalls
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
procs_dict = {}
fams = set()
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
if proc.get("name").startswith("LAPACK_"):
continue
base_name = proc.get("name").replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc.get("name"), "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
name = config + func
if not name in procs_dict:
procs_dict[ name ] = set()
procs_dict[name].add( len( proc.findall( "./arguments-list/argument" ) ) )
if len( procs_dict[name] ) > 1 :
fams.add( name )
#return
#print procs_dict
for fam in fams:
print fam
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class IsNOrMEverTheSame ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = IsNorMEverTheSame
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_name = proc_node.get( "name" )
'''
base_name = proc_name.lower()
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
continue
'''
arg_names = [ arg.get("name") for arg in proc_node.findall( "./arguments-list/argument" ) ]
for arg_node in proc_node.findall( "./arguments-list/argument" ):
doc_node = arg_node.find( "documentation" )
if doc_node == None or arg_node.get("semantic") != "scalar" or arg_node.get("type").lower() != "integer":
continue
what = []
who = []
string = []
for m in scalar_matrix_relation_regex.finditer( doc_node.text ):
if not m.group( "what" ) in ["rows", "columns", "order", "rank"] :
continue
names = m.group( "who" ).strip()
names_list = []
if " and " in names:
names_list = [ name.strip() for name in names.split( "and" ) ]
else:
names_list = [ names ]
nameHasSpace = False
for name in names_list:
if " " in name:
nameHasSpace = True
break
if nameHasSpace:
print names, " contains non names. Skipping."
continue
removes = []
for name in names_list:
if not name in arg_names:
removes.append( name )
for rm in removes:
names_list.remove( rm )
if len( names_list ) == 0:
print "Names list had no argument names. Skipping"
continue
what.append( m.group( "what" ) )
who.append( names_list )
string.append( re.sub( "\s+", " ", m.group(0) ) )
if len( what ) == 0 and len( who ) == 0:
continue
#proc_info[ proc_name ][ arg_node.get( "name" ) ] = [ what, who, string]
associate_array = str()
associate_field = str()
first = True
for i in range( len( who ) ):
for array in who[i]:
associate_array += ( "," if not first else "" ) + array
associate_field += ( "," if not first else "" ) + what[i]
first = False
arg_node.set( "associate-array", associate_array )
arg_node.set( "associate-field", associate_field )
prettyprintxml( proc_node )
'''
for func in proc_info:
if proc_info[func] == {}:
continue
print func
for arg in proc_info[func]:
print "\t", arg
for elem in proc_info[func][arg]:
print "\t\t", elem
'''
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindGroupsWithUncommon ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = PretendCreate
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
printed = False
for type in config_node.findall( "./types/type" ):
if type.find( "./arguments-list" ) != None:
if not printed:
print config_node.get("name") + group_node.get("name")
printed = True
print "\t", type.get("name")
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class TestInputGroupsGen ( Pass ):
dependencies = [BaseCodegenReadyPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TestInputGroupsGen
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
#abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
group_input = loadxml( "DropAttemptedAssociations_input.xml" )
for group in group_input.findall( "./group" ):
for config in group.findall( "./matrix-configuration" ):
code = SequenceOfProducers()
print config.get( "name" ) + group.get( "name" )
code.append( SegmentProducer( "proc " + config.get( "name" ) + group.get( "name" ) ) )
args_producer = ListProducer(", ", "(", ")")
for arg in config.findall( "./method-arguments/argument" ):
args_producer.append( SegmentProducer(
arg.get("intent") + " " + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type") + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
code.append( args_producer )
code.append( LineProducer( ": lapack_int" ) )
code.append( SegmentProducer( "where " ) )
where_producer = ListProducer( " || ", "", "" )
for type in config.findall("./types/type"):
where_producer.append( SegmentProducer( "T == " + type.get( "type" ) ) )
code.append( where_producer )
info_var = config.get( "name" ) + group.get( "name" ) + "_return_info"
func_body = ScopeProducer()
func_body.append( LineProducer( "var " + info_var + " : lapack_int;" ) )
#if_bodies = SequenceOfProducers()
arg_relates = {}
ana_args = []
for arg in config.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get("name")
arg_relates[ arg_name ] = config.find( "./arguments-relationships/argument/[@name='" + arg_name + "']" )
ana_args.append( arg );
for type in config.findall("./types/type"):
chpl_ana = xml_tree.find( type.get( "analogue" ) )
if_condition = LineProducer( "if ( T == " + type.get("type") + " )" )
func_body.append( if_condition )
if_body = ScopeProducer()
call_equals = SegmentProducer( info_var + " = " + chpl_ana.get( "name" ) )
call_seq = ListProducer( ", ", "(", ")" )
for ana_arg in ana_args:
call_seq.append( SegmentProducer(
"(" + arg_relates[ana_arg.get("name")].text.strip() + ")" + \
(" : " + ana_arg.get("type") if ana_arg.get("semantic") != "array" else "")
)
)
if_body.append( call_equals + call_seq + LineProducer( ";" ) )
func_body.append( if_body )
func_body.append( LineProducer( "return " + info_var + ";" ) )
code.append( func_body )
print code.generate()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
| 70,171 |
1,874 | {
"keypair": {
"fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3",
"name": "keypair-<KEY>",
"public_key": "<KEY>-Nova\n",
"user_id": "fake",
"deleted": false,
"created_at": "2014-05-07T12:06:13.681238",
"updated_at": null,
"deleted_at": null,
"id": 1
}
}
| 213 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Bestiac","circ":"1ère circonscription","dpt":"Ariège","inscrits":31,"abs":9,"votants":22,"blancs":2,"nuls":0,"exp":20,"res":[{"nuance":"FI","nom":"<NAME>","voix":15},{"nuance":"REM","nom":"<NAME>","voix":5}]} | 106 |
1,144 | package de.metas.handlingunits.storage.impl;
/*
* #%L
* de.metas.handlingunits.base
* %%
* Copyright (C) 2015 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
import java.math.BigDecimal;
import org.compiere.model.I_C_UOM;
import de.metas.product.ProductId;
import de.metas.quantity.Capacity;
import de.metas.quantity.Quantity;
import de.metas.util.Check;
import lombok.NonNull;
public class PlainProductStorage extends AbstractProductStorage
{
private final Capacity capacityTotal;
private BigDecimal qtyInitial;
public PlainProductStorage(
final ProductId productId,
@NonNull final Quantity qtyTotal)
{
this(productId,
qtyTotal.getUOM(),
qtyTotal.toBigDecimal(),
BigDecimal.ZERO // qtyInitial=0 => empty by default
);
}
public PlainProductStorage(final ProductId productId,
final I_C_UOM uom,
final BigDecimal qtyTotal)
{
this(productId,
uom,
qtyTotal,
BigDecimal.ZERO // qtyInitial=0 => empty by default
);
}
public PlainProductStorage(final ProductId productId,
final I_C_UOM uom,
final BigDecimal qtyTotal,
final BigDecimal qtyInitial)
{
capacityTotal = Capacity.createCapacity(qtyTotal,
productId, uom,
false // allowNegativeCapacity
);
Check.assumeNotNull(qtyInitial, "qtyInitial not null");
this.qtyInitial = qtyInitial;
}
public PlainProductStorage(final Capacity capacity, final BigDecimal qtyInitial)
{
capacityTotal = capacity;
Check.assumeNotNull(qtyInitial, "qtyInitial not null");
this.qtyInitial = qtyInitial;
}
@Override
protected Capacity retrieveTotalCapacity()
{
return capacityTotal;
}
@Override
protected BigDecimal retrieveQtyInitial()
{
return qtyInitial;
}
@Override
protected void beforeMarkingStalled()
{
// we are just saving current qty as next qtyInitialO
qtyInitial = getQty().toBigDecimal();
}
}
| 851 |
794 | //
// CAScanQRcode.cpp
// CrossApp
//
// Created by mac on 2021/3/12.
// Copyright © 2021 cocos2d-x. All rights reserved.
//
#include "platform/CAScanQRcode.h"
#include "basics/CAApplication.h"
#include "platform/android/jni/JniHelper.h"
#include <jni.h>
static std::function<void(const std::string&)> s_ResultScanning_callback = nullptr;
extern "C"
{
// 扫描二维码结果
void Java_org_CrossApp_lib_CameraView_resultScanning(JNIEnv *env, jobject thiz, jstring value)
{
const char* str = env->GetStringUTFChars(value, NULL);
if (s_ResultScanning_callback) s_ResultScanning_callback(str);
s_ResultScanning_callback = nullptr;
env->ReleaseStringUTFChars(value, str);
}
// 摄像头关闭
void Java_org_CrossApp_lib_CameraView_cameraClosed(JNIEnv *env, jobject thiz)
{
CrossApp::CAApplication::getApplication()->resume();
}
}
NS_CC_BEGIN
void CAScanQRcode::showScanQRcode(const std::function<void(const std::string&)>& callback)
{
JniMethodInfo info;
bool ret = JniHelper::getStaticMethodInfo(info, "org/CrossApp/lib/CrossAppActivity", "showQRCodeView", "()V");
if (ret)
{
info.env->CallStaticVoidMethod(info.classID, info.methodID);
s_ResultScanning_callback = callback;
}
CrossApp::CAApplication::getApplication()->pause();
}
NS_CC_END
| 576 |
835 | # -*- coding: utf-8 -*-
import pytest
import requests
from verta.environment import Python
pytestmark = pytest.mark.not_oss # skip if run in oss setup. Applied to entire module
class TestFromRun:
def test_from_run(self, experiment_run, model_for_deployment, registered_model):
np = pytest.importorskip("numpy")
experiment_run.log_model(model_for_deployment["model"], custom_modules=[])
experiment_run.log_environment(Python(["scikit-learn"]))
artifact = np.random.random((36, 12))
experiment_run.log_artifact("some-artifact", artifact)
for i, run_id_arg in enumerate(
[experiment_run.id, experiment_run]
): # also accept run obj
model_version = registered_model.create_version_from_run(
run_id=run_id_arg,
name="From Run {} {}".format(experiment_run.id, i),
)
env_str = str(model_version.get_environment())
assert "scikit-learn" in env_str
assert "Python" in env_str
assert (
model_for_deployment["model"].get_params()
== model_version.get_model().get_params()
)
assert np.array_equal(model_version.get_artifact("some-artifact"), artifact)
def test_from_run_diff_workspaces(
self, client, experiment_run, organization, created_entities
):
registered_model = client.create_registered_model(workspace=organization.name)
created_entities.append(registered_model)
model_version = registered_model.create_version_from_run(
run_id=experiment_run.id, name="From Run {}".format(experiment_run.id)
)
assert model_version.workspace != experiment_run.workspace
def test_from_run_diff_workspaces_no_access_error(
self, experiment_run, client_2, created_entities
):
registered_model = client_2.create_registered_model()
created_entities.append(registered_model)
with pytest.raises(requests.HTTPError) as excinfo:
registered_model.create_version_from_run(
run_id=experiment_run.id, name="From Run {}".format(experiment_run.id)
)
exc_msg = str(excinfo.value).strip()
assert exc_msg.startswith("404")
assert "not found" in exc_msg
| 1,001 |
534 | /*
* Copyright (C) 2016 ceabie (https://github.com/ceabie/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ceabie.dexknife;
import org.gradle.api.file.FileTreeElement;
import org.gradle.api.file.RelativePath;
import org.gradle.internal.nativeintegration.filesystem.FileSystem;
import java.io.File;
import java.io.InputStream;
import java.io.OutputStream;
/**
* Mock the file tree element of Filter.
*
* @author ceabie
*/
public class ClassFileTreeElement implements FileTreeElement {
private RelativePath mRelativePath;
private File mFile;
public void setClassPath(String name) {
mFile = new File(name);
mRelativePath = RelativePath.parse(!isDirectory(), name);
}
@Override
public File getFile() {
return mFile;
}
@Override
public boolean isDirectory() {
return false;
}
@Override
public long getLastModified() {
return 0;
}
@Override
public long getSize() {
return 0;
}
@Override
public InputStream open() {
// try {
// return mZipFile.getInputStream(mZipEntry);
// } catch (IOException e) {
// e.printStackTrace();
// }
return null;
}
@Override
public void copyTo(OutputStream outputStream) {
}
@Override
public boolean copyTo(File file) {
return true;
}
@Override
public String getName() {
return mFile.getName();
}
@Override
public String getPath() {
return mFile.getPath();
}
@Override
public RelativePath getRelativePath() {
return mRelativePath;
}
@Override
public int getMode() {
return isDirectory()
? FileSystem.DEFAULT_DIR_MODE
: FileSystem.DEFAULT_FILE_MODE;
}
}
| 879 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.